1 /* $NetBSD: if_wg.c,v 1.74 2023/01/05 20:32:18 christos Exp $ */ 2 3 /* 4 * Copyright (C) Ryota Ozaki <ozaki.ryota@gmail.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * This network interface aims to implement the WireGuard protocol. 34 * The implementation is based on the paper of WireGuard as of 35 * 2018-06-30 [1]. The paper is referred in the source code with label 36 * [W]. Also the specification of the Noise protocol framework as of 37 * 2018-07-11 [2] is referred with label [N]. 38 * 39 * [1] https://www.wireguard.com/papers/wireguard.pdf 40 * [2] http://noiseprotocol.org/noise.pdf 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.74 2023/01/05 20:32:18 christos Exp $"); 45 46 #ifdef _KERNEL_OPT 47 #include "opt_altq_enabled.h" 48 #include "opt_inet.h" 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/types.h> 53 54 #include <sys/atomic.h> 55 #include <sys/callout.h> 56 #include <sys/cprng.h> 57 #include <sys/cpu.h> 58 #include <sys/device.h> 59 #include <sys/domain.h> 60 #include <sys/errno.h> 61 #include <sys/intr.h> 62 #include <sys/ioctl.h> 63 #include <sys/kernel.h> 64 #include <sys/kmem.h> 65 #include <sys/mbuf.h> 66 #include <sys/module.h> 67 #include <sys/mutex.h> 68 #include <sys/once.h> 69 #include <sys/percpu.h> 70 #include <sys/pserialize.h> 71 #include <sys/psref.h> 72 #include <sys/queue.h> 73 #include <sys/rwlock.h> 74 #include <sys/socket.h> 75 #include <sys/socketvar.h> 76 #include <sys/sockio.h> 77 #include <sys/sysctl.h> 78 #include <sys/syslog.h> 79 #include <sys/systm.h> 80 #include <sys/thmap.h> 81 #include <sys/threadpool.h> 82 #include <sys/time.h> 83 #include <sys/timespec.h> 84 #include <sys/workqueue.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_types.h> 89 #include <net/if_wg.h> 90 #include <net/pktqueue.h> 91 #include <net/route.h> 92 93 #include <netinet/in.h> 94 #include <netinet/in_pcb.h> 95 #include <netinet/in_var.h> 96 #include <netinet/ip.h> 97 #include <netinet/ip_var.h> 98 #include <netinet/udp.h> 99 #include <netinet/udp_var.h> 100 101 #ifdef INET6 102 #include <netinet/ip6.h> 103 #include <netinet6/in6_pcb.h> 104 #include <netinet6/in6_var.h> 105 #include <netinet6/ip6_var.h> 106 #include <netinet6/udp6_var.h> 107 #endif /* INET6 */ 108 109 #include <prop/proplib.h> 110 111 #include <crypto/blake2/blake2s.h> 112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h> 113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h> 114 #include <crypto/sodium/crypto_scalarmult.h> 115 116 #include "ioconf.h" 117 118 #ifdef WG_RUMPKERNEL 119 #include "wg_user.h" 120 #endif 121 122 /* 123 * Data structures 124 * - struct wg_softc is an instance of wg interfaces 125 * - It has a list of peers (struct wg_peer) 126 * - It has a threadpool job that sends/receives handshake messages and 127 * runs event handlers 128 * - It has its own two routing tables: one is for IPv4 and the other IPv6 129 * - struct wg_peer is a representative of a peer 130 * - It has a struct work to handle handshakes and timer tasks 131 * - It has a pair of session instances (struct wg_session) 132 * - It has a pair of endpoint instances (struct wg_sockaddr) 133 * - Normally one endpoint is used and the second one is used only on 134 * a peer migration (a change of peer's IP address) 135 * - It has a list of IP addresses and sub networks called allowedips 136 * (struct wg_allowedip) 137 * - A packets sent over a session is allowed if its destination matches 138 * any IP addresses or sub networks of the list 139 * - struct wg_session represents a session of a secure tunnel with a peer 140 * - Two instances of sessions belong to a peer; a stable session and a 141 * unstable session 142 * - A handshake process of a session always starts with a unstable instance 143 * - Once a session is established, its instance becomes stable and the 144 * other becomes unstable instead 145 * - Data messages are always sent via a stable session 146 * 147 * Locking notes: 148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock 149 * - Changes to the peer list are serialized by wg_lock 150 * - The peer list may be read with pserialize(9) and psref(9) 151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46]) 152 * => XXX replace by pserialize when routing table is psz-safe 153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken 154 * only in thread context and serializes: 155 * - the stable and unstable session pointers 156 * - all unstable session state 157 * - Packet processing may be done in softint context: 158 * - The stable session can be read under pserialize(9) or psref(9) 159 * - The stable session is always ESTABLISHED 160 * - On a session swap, we must wait for all readers to release a 161 * reference to a stable session before changing wgs_state and 162 * session states 163 * - Lock order: wg_lock -> wgp_lock 164 */ 165 166 167 #define WGLOG(level, fmt, args...) \ 168 log(level, "%s: " fmt, __func__, ##args) 169 170 /* Debug options */ 171 #ifdef WG_DEBUG 172 /* Output debug logs */ 173 #ifndef WG_DEBUG_LOG 174 #define WG_DEBUG_LOG 175 #endif 176 /* Output trace logs */ 177 #ifndef WG_DEBUG_TRACE 178 #define WG_DEBUG_TRACE 179 #endif 180 /* Output hash values, etc. */ 181 #ifndef WG_DEBUG_DUMP 182 #define WG_DEBUG_DUMP 183 #endif 184 /* Make some internal parameters configurable for testing and debugging */ 185 #ifndef WG_DEBUG_PARAMS 186 #define WG_DEBUG_PARAMS 187 #endif 188 #endif 189 190 #ifdef WG_DEBUG_TRACE 191 #define WG_TRACE(msg) \ 192 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)) 193 #else 194 #define WG_TRACE(msg) __nothing 195 #endif 196 197 #ifdef WG_DEBUG_LOG 198 #define WG_DLOG(fmt, args...) log(LOG_DEBUG, "%s: " fmt, __func__, ##args) 199 #else 200 #define WG_DLOG(fmt, args...) __nothing 201 #endif 202 203 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \ 204 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \ 205 &(wgprc)->wgprc_curpps, 1)) { \ 206 log(level, fmt, ##args); \ 207 } \ 208 } while (0) 209 210 #ifdef WG_DEBUG_PARAMS 211 static bool wg_force_underload = false; 212 #endif 213 214 #ifdef WG_DEBUG_DUMP 215 216 static char * 217 gethexdump(const char *p, size_t n) 218 { 219 char *buf; 220 size_t i; 221 222 if (n > SIZE_MAX/3 - 1) 223 return NULL; 224 buf = kmem_alloc(3*n + 1, KM_NOSLEEP); 225 if (buf == NULL) 226 return NULL; 227 for (i = 0; i < n; i++) 228 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]); 229 return buf; 230 } 231 232 static void 233 puthexdump(char *buf, const void *p, size_t n) 234 { 235 236 if (buf == NULL) 237 return; 238 kmem_free(buf, 3*n + 1); 239 } 240 241 #ifdef WG_RUMPKERNEL 242 static void 243 wg_dump_buf(const char *func, const char *buf, const size_t size) 244 { 245 char *hex = gethexdump(buf, size); 246 247 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)"); 248 puthexdump(hex, buf, size); 249 } 250 #endif 251 252 static void 253 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash, 254 const size_t size) 255 { 256 char *hex = gethexdump(hash, size); 257 258 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)"); 259 puthexdump(hex, hash, size); 260 } 261 262 #define WG_DUMP_HASH(name, hash) \ 263 wg_dump_hash(__func__, name, hash, WG_HASH_LEN) 264 #define WG_DUMP_HASH48(name, hash) \ 265 wg_dump_hash(__func__, name, hash, 48) 266 #define WG_DUMP_BUF(buf, size) \ 267 wg_dump_buf(__func__, buf, size) 268 #else 269 #define WG_DUMP_HASH(name, hash) __nothing 270 #define WG_DUMP_HASH48(name, hash) __nothing 271 #define WG_DUMP_BUF(buf, size) __nothing 272 #endif /* WG_DEBUG_DUMP */ 273 274 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-termintaed */ 275 #define WG_MAX_PROPLEN 32766 276 277 #define WG_MTU 1420 278 #define WG_ALLOWEDIPS 16 279 280 #define CURVE25519_KEY_LEN 32 281 #define TAI64N_LEN sizeof(uint32_t) * 3 282 #define POLY1305_AUTHTAG_LEN 16 283 #define HMAC_BLOCK_LEN 64 284 285 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */ 286 /* [N] 4.3: Hash functions */ 287 #define NOISE_DHLEN 32 288 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */ 289 #define NOISE_HASHLEN 32 290 #define NOISE_BLOCKLEN 64 291 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN 292 /* [N] 5.1: "k" */ 293 #define NOISE_CIPHER_KEY_LEN 32 294 /* 295 * [N] 9.2: "psk" 296 * "... psk is a 32-byte secret value provided by the application." 297 */ 298 #define NOISE_PRESHARED_KEY_LEN 32 299 300 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN 301 #define WG_TIMESTAMP_LEN TAI64N_LEN 302 303 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN 304 305 #define WG_COOKIE_LEN 16 306 #define WG_MAC_LEN 16 307 #define WG_RANDVAL_LEN 24 308 309 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN 310 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */ 311 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN 312 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */ 313 #define WG_HASH_LEN NOISE_HASHLEN 314 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN 315 #define WG_DH_OUTPUT_LEN NOISE_DHLEN 316 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN 317 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN 318 #define WG_DATA_KEY_LEN 32 319 #define WG_SALT_LEN 24 320 321 /* 322 * The protocol messages 323 */ 324 struct wg_msg { 325 uint32_t wgm_type; 326 } __packed; 327 328 /* [W] 5.4.2 First Message: Initiator to Responder */ 329 struct wg_msg_init { 330 uint32_t wgmi_type; 331 uint32_t wgmi_sender; 332 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN]; 333 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN]; 334 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN]; 335 uint8_t wgmi_mac1[WG_MAC_LEN]; 336 uint8_t wgmi_mac2[WG_MAC_LEN]; 337 } __packed; 338 339 /* [W] 5.4.3 Second Message: Responder to Initiator */ 340 struct wg_msg_resp { 341 uint32_t wgmr_type; 342 uint32_t wgmr_sender; 343 uint32_t wgmr_receiver; 344 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN]; 345 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN]; 346 uint8_t wgmr_mac1[WG_MAC_LEN]; 347 uint8_t wgmr_mac2[WG_MAC_LEN]; 348 } __packed; 349 350 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */ 351 struct wg_msg_data { 352 uint32_t wgmd_type; 353 uint32_t wgmd_receiver; 354 uint64_t wgmd_counter; 355 uint32_t wgmd_packet[0]; 356 } __packed; 357 358 /* [W] 5.4.7 Under Load: Cookie Reply Message */ 359 struct wg_msg_cookie { 360 uint32_t wgmc_type; 361 uint32_t wgmc_receiver; 362 uint8_t wgmc_salt[WG_SALT_LEN]; 363 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN]; 364 } __packed; 365 366 #define WG_MSG_TYPE_INIT 1 367 #define WG_MSG_TYPE_RESP 2 368 #define WG_MSG_TYPE_COOKIE 3 369 #define WG_MSG_TYPE_DATA 4 370 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA 371 372 /* Sliding windows */ 373 374 #define SLIWIN_BITS 2048u 375 #define SLIWIN_TYPE uint32_t 376 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE) 377 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW) 378 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE)) 379 380 struct sliwin { 381 SLIWIN_TYPE B[SLIWIN_WORDS]; 382 uint64_t T; 383 }; 384 385 static void 386 sliwin_reset(struct sliwin *W) 387 { 388 389 memset(W, 0, sizeof(*W)); 390 } 391 392 static int 393 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S) 394 { 395 396 /* 397 * If it's more than one window older than the highest sequence 398 * number we've seen, reject. 399 */ 400 #ifdef __HAVE_ATOMIC64_LOADSTORE 401 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T)) 402 return EAUTH; 403 #endif 404 405 /* 406 * Otherwise, we need to take the lock to decide, so don't 407 * reject just yet. Caller must serialize a call to 408 * sliwin_update in this case. 409 */ 410 return 0; 411 } 412 413 static int 414 sliwin_update(struct sliwin *W, uint64_t S) 415 { 416 unsigned word, bit; 417 418 /* 419 * If it's more than one window older than the highest sequence 420 * number we've seen, reject. 421 */ 422 if (S + SLIWIN_NPKT < W->T) 423 return EAUTH; 424 425 /* 426 * If it's higher than the highest sequence number we've seen, 427 * advance the window. 428 */ 429 if (S > W->T) { 430 uint64_t i = W->T / SLIWIN_BPW; 431 uint64_t j = S / SLIWIN_BPW; 432 unsigned k; 433 434 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++) 435 W->B[(i + k + 1) % SLIWIN_WORDS] = 0; 436 #ifdef __HAVE_ATOMIC64_LOADSTORE 437 atomic_store_relaxed(&W->T, S); 438 #else 439 W->T = S; 440 #endif 441 } 442 443 /* Test and set the bit -- if already set, reject. */ 444 word = (S / SLIWIN_BPW) % SLIWIN_WORDS; 445 bit = S % SLIWIN_BPW; 446 if (W->B[word] & (1UL << bit)) 447 return EAUTH; 448 W->B[word] |= 1U << bit; 449 450 /* Accept! */ 451 return 0; 452 } 453 454 struct wg_session { 455 struct wg_peer *wgs_peer; 456 struct psref_target 457 wgs_psref; 458 459 int wgs_state; 460 #define WGS_STATE_UNKNOWN 0 461 #define WGS_STATE_INIT_ACTIVE 1 462 #define WGS_STATE_INIT_PASSIVE 2 463 #define WGS_STATE_ESTABLISHED 3 464 #define WGS_STATE_DESTROYING 4 465 466 time_t wgs_time_established; 467 time_t wgs_time_last_data_sent; 468 bool wgs_is_initiator; 469 470 uint32_t wgs_local_index; 471 uint32_t wgs_remote_index; 472 #ifdef __HAVE_ATOMIC64_LOADSTORE 473 volatile uint64_t 474 wgs_send_counter; 475 #else 476 kmutex_t wgs_send_counter_lock; 477 uint64_t wgs_send_counter; 478 #endif 479 480 struct { 481 kmutex_t lock; 482 struct sliwin window; 483 } *wgs_recvwin; 484 485 uint8_t wgs_handshake_hash[WG_HASH_LEN]; 486 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN]; 487 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN]; 488 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN]; 489 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN]; 490 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN]; 491 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN]; 492 }; 493 494 struct wg_sockaddr { 495 union { 496 struct sockaddr_storage _ss; 497 struct sockaddr _sa; 498 struct sockaddr_in _sin; 499 struct sockaddr_in6 _sin6; 500 }; 501 struct psref_target wgsa_psref; 502 }; 503 504 #define wgsatoss(wgsa) (&(wgsa)->_ss) 505 #define wgsatosa(wgsa) (&(wgsa)->_sa) 506 #define wgsatosin(wgsa) (&(wgsa)->_sin) 507 #define wgsatosin6(wgsa) (&(wgsa)->_sin6) 508 509 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family) 510 511 struct wg_peer; 512 struct wg_allowedip { 513 struct radix_node wga_nodes[2]; 514 struct wg_sockaddr _wga_sa_addr; 515 struct wg_sockaddr _wga_sa_mask; 516 #define wga_sa_addr _wga_sa_addr._sa 517 #define wga_sa_mask _wga_sa_mask._sa 518 519 int wga_family; 520 uint8_t wga_cidr; 521 union { 522 struct in_addr _ip4; 523 struct in6_addr _ip6; 524 } wga_addr; 525 #define wga_addr4 wga_addr._ip4 526 #define wga_addr6 wga_addr._ip6 527 528 struct wg_peer *wga_peer; 529 }; 530 531 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN]; 532 533 struct wg_ppsratecheck { 534 struct timeval wgprc_lasttime; 535 int wgprc_curpps; 536 }; 537 538 struct wg_softc; 539 struct wg_peer { 540 struct wg_softc *wgp_sc; 541 char wgp_name[WG_PEER_NAME_MAXLEN + 1]; 542 struct pslist_entry wgp_peerlist_entry; 543 pserialize_t wgp_psz; 544 struct psref_target wgp_psref; 545 kmutex_t *wgp_lock; 546 kmutex_t *wgp_intr_lock; 547 548 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN]; 549 struct wg_sockaddr *wgp_endpoint; 550 struct wg_sockaddr *wgp_endpoint0; 551 volatile unsigned wgp_endpoint_changing; 552 bool wgp_endpoint_available; 553 554 /* The preshared key (optional) */ 555 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN]; 556 557 struct wg_session *wgp_session_stable; 558 struct wg_session *wgp_session_unstable; 559 560 /* first outgoing packet awaiting session initiation */ 561 struct mbuf *wgp_pending; 562 563 /* timestamp in big-endian */ 564 wg_timestamp_t wgp_timestamp_latest_init; 565 566 struct timespec wgp_last_handshake_time; 567 568 callout_t wgp_rekey_timer; 569 callout_t wgp_handshake_timeout_timer; 570 callout_t wgp_session_dtor_timer; 571 572 time_t wgp_handshake_start_time; 573 574 int wgp_n_allowedips; 575 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS]; 576 577 time_t wgp_latest_cookie_time; 578 uint8_t wgp_latest_cookie[WG_COOKIE_LEN]; 579 uint8_t wgp_last_sent_mac1[WG_MAC_LEN]; 580 bool wgp_last_sent_mac1_valid; 581 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN]; 582 bool wgp_last_sent_cookie_valid; 583 584 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX]; 585 586 time_t wgp_last_genrandval_time; 587 uint32_t wgp_randval; 588 589 struct wg_ppsratecheck wgp_ppsratecheck; 590 591 struct work wgp_work; 592 unsigned int wgp_tasks; 593 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0) 594 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1) 595 #define WGP_TASK_ESTABLISH_SESSION __BIT(2) 596 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3) 597 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4) 598 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5) 599 }; 600 601 struct wg_ops; 602 603 struct wg_softc { 604 struct ifnet wg_if; 605 LIST_ENTRY(wg_softc) wg_list; 606 kmutex_t *wg_lock; 607 kmutex_t *wg_intr_lock; 608 krwlock_t *wg_rwlock; 609 610 uint8_t wg_privkey[WG_STATIC_KEY_LEN]; 611 uint8_t wg_pubkey[WG_STATIC_KEY_LEN]; 612 613 int wg_npeers; 614 struct pslist_head wg_peers; 615 struct thmap *wg_peers_bypubkey; 616 struct thmap *wg_peers_byname; 617 struct thmap *wg_sessions_byindex; 618 uint16_t wg_listen_port; 619 620 struct threadpool *wg_threadpool; 621 622 struct threadpool_job wg_job; 623 int wg_upcalls; 624 #define WG_UPCALL_INET __BIT(0) 625 #define WG_UPCALL_INET6 __BIT(1) 626 627 #ifdef INET 628 struct socket *wg_so4; 629 struct radix_node_head *wg_rtable_ipv4; 630 #endif 631 #ifdef INET6 632 struct socket *wg_so6; 633 struct radix_node_head *wg_rtable_ipv6; 634 #endif 635 636 struct wg_ppsratecheck wg_ppsratecheck; 637 638 struct wg_ops *wg_ops; 639 640 #ifdef WG_RUMPKERNEL 641 struct wg_user *wg_user; 642 #endif 643 }; 644 645 /* [W] 6.1 Preliminaries */ 646 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60) 647 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13)) 648 #define WG_REKEY_AFTER_TIME 120 649 #define WG_REJECT_AFTER_TIME 180 650 #define WG_REKEY_ATTEMPT_TIME 90 651 #define WG_REKEY_TIMEOUT 5 652 #define WG_KEEPALIVE_TIMEOUT 10 653 654 #define WG_COOKIE_TIME 120 655 #define WG_RANDVAL_TIME (2 * 60) 656 657 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES; 658 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES; 659 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME; 660 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME; 661 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME; 662 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT; 663 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT; 664 665 static struct mbuf * 666 wg_get_mbuf(size_t, size_t); 667 668 static int wg_send_data_msg(struct wg_peer *, struct wg_session *, 669 struct mbuf *); 670 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *, 671 const uint32_t, const uint8_t [], const struct sockaddr *); 672 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *, 673 struct wg_session *, const struct wg_msg_init *); 674 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *); 675 676 static struct wg_peer * 677 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *, 678 struct psref *); 679 static struct wg_peer * 680 wg_lookup_peer_by_pubkey(struct wg_softc *, 681 const uint8_t [], struct psref *); 682 683 static struct wg_session * 684 wg_lookup_session_by_index(struct wg_softc *, 685 const uint32_t, struct psref *); 686 687 static void wg_update_endpoint_if_necessary(struct wg_peer *, 688 const struct sockaddr *); 689 690 static void wg_schedule_rekey_timer(struct wg_peer *); 691 static void wg_schedule_session_dtor_timer(struct wg_peer *); 692 693 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int); 694 static void wg_calculate_keys(struct wg_session *, const bool); 695 696 static void wg_clear_states(struct wg_session *); 697 698 static void wg_get_peer(struct wg_peer *, struct psref *); 699 static void wg_put_peer(struct wg_peer *, struct psref *); 700 701 static int wg_send_so(struct wg_peer *, struct mbuf *); 702 static int wg_send_udp(struct wg_peer *, struct mbuf *); 703 static int wg_output(struct ifnet *, struct mbuf *, 704 const struct sockaddr *, const struct rtentry *); 705 static void wg_input(struct ifnet *, struct mbuf *, const int); 706 static int wg_ioctl(struct ifnet *, u_long, void *); 707 static int wg_bind_port(struct wg_softc *, const uint16_t); 708 static int wg_init(struct ifnet *); 709 #ifdef ALTQ 710 static void wg_start(struct ifnet *); 711 #endif 712 static void wg_stop(struct ifnet *, int); 713 714 static void wg_peer_work(struct work *, void *); 715 static void wg_job(struct threadpool_job *); 716 static void wgintr(void *); 717 static void wg_purge_pending_packets(struct wg_peer *); 718 719 static int wg_clone_create(struct if_clone *, int); 720 static int wg_clone_destroy(struct ifnet *); 721 722 struct wg_ops { 723 int (*send_hs_msg)(struct wg_peer *, struct mbuf *); 724 int (*send_data_msg)(struct wg_peer *, struct mbuf *); 725 void (*input)(struct ifnet *, struct mbuf *, const int); 726 int (*bind_port)(struct wg_softc *, const uint16_t); 727 }; 728 729 struct wg_ops wg_ops_rumpkernel = { 730 .send_hs_msg = wg_send_so, 731 .send_data_msg = wg_send_udp, 732 .input = wg_input, 733 .bind_port = wg_bind_port, 734 }; 735 736 #ifdef WG_RUMPKERNEL 737 static bool wg_user_mode(struct wg_softc *); 738 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *); 739 740 static int wg_send_user(struct wg_peer *, struct mbuf *); 741 static void wg_input_user(struct ifnet *, struct mbuf *, const int); 742 static int wg_bind_port_user(struct wg_softc *, const uint16_t); 743 744 struct wg_ops wg_ops_rumpuser = { 745 .send_hs_msg = wg_send_user, 746 .send_data_msg = wg_send_user, 747 .input = wg_input_user, 748 .bind_port = wg_bind_port_user, 749 }; 750 #endif 751 752 #define WG_PEER_READER_FOREACH(wgp, wg) \ 753 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 754 wgp_peerlist_entry) 755 #define WG_PEER_WRITER_FOREACH(wgp, wg) \ 756 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 757 wgp_peerlist_entry) 758 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \ 759 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry) 760 #define WG_PEER_WRITER_REMOVE(wgp) \ 761 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry) 762 763 struct wg_route { 764 struct radix_node wgr_nodes[2]; 765 struct wg_peer *wgr_peer; 766 }; 767 768 static struct radix_node_head * 769 wg_rnh(struct wg_softc *wg, const int family) 770 { 771 772 switch (family) { 773 case AF_INET: 774 return wg->wg_rtable_ipv4; 775 #ifdef INET6 776 case AF_INET6: 777 return wg->wg_rtable_ipv6; 778 #endif 779 default: 780 return NULL; 781 } 782 } 783 784 785 /* 786 * Global variables 787 */ 788 static volatile unsigned wg_count __cacheline_aligned; 789 790 struct psref_class *wg_psref_class __read_mostly; 791 792 static struct if_clone wg_cloner = 793 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy); 794 795 static struct pktqueue *wg_pktq __read_mostly; 796 static struct workqueue *wg_wq __read_mostly; 797 798 void wgattach(int); 799 /* ARGSUSED */ 800 void 801 wgattach(int count) 802 { 803 /* 804 * Nothing to do here, initialization is handled by the 805 * module initialization code in wginit() below). 806 */ 807 } 808 809 static void 810 wginit(void) 811 { 812 813 wg_psref_class = psref_class_create("wg", IPL_SOFTNET); 814 815 if_clone_attach(&wg_cloner); 816 } 817 818 /* 819 * XXX Kludge: This should just happen in wginit, but workqueue_create 820 * cannot be run until after CPUs have been detected, and wginit runs 821 * before configure. 822 */ 823 static int 824 wginitqueues(void) 825 { 826 int error __diagused; 827 828 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL); 829 KASSERT(wg_pktq != NULL); 830 831 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL, 832 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU); 833 KASSERT(error == 0); 834 835 return 0; 836 } 837 838 static void 839 wg_guarantee_initialized(void) 840 { 841 static ONCE_DECL(init); 842 int error __diagused; 843 844 error = RUN_ONCE(&init, wginitqueues); 845 KASSERT(error == 0); 846 } 847 848 static int 849 wg_count_inc(void) 850 { 851 unsigned o, n; 852 853 do { 854 o = atomic_load_relaxed(&wg_count); 855 if (o == UINT_MAX) 856 return ENFILE; 857 n = o + 1; 858 } while (atomic_cas_uint(&wg_count, o, n) != o); 859 860 return 0; 861 } 862 863 static void 864 wg_count_dec(void) 865 { 866 unsigned c __diagused; 867 868 c = atomic_dec_uint_nv(&wg_count); 869 KASSERT(c != UINT_MAX); 870 } 871 872 static int 873 wgdetach(void) 874 { 875 876 /* Prevent new interface creation. */ 877 if_clone_detach(&wg_cloner); 878 879 /* Check whether there are any existing interfaces. */ 880 if (atomic_load_relaxed(&wg_count)) { 881 /* Back out -- reattach the cloner. */ 882 if_clone_attach(&wg_cloner); 883 return EBUSY; 884 } 885 886 /* No interfaces left. Nuke it. */ 887 workqueue_destroy(wg_wq); 888 pktq_destroy(wg_pktq); 889 psref_class_destroy(wg_psref_class); 890 891 return 0; 892 } 893 894 static void 895 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN], 896 uint8_t hash[WG_HASH_LEN]) 897 { 898 /* [W] 5.4: CONSTRUCTION */ 899 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; 900 /* [W] 5.4: IDENTIFIER */ 901 const char *id = "WireGuard v1 zx2c4 Jason@zx2c4.com"; 902 struct blake2s state; 903 904 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0, 905 signature, strlen(signature)); 906 907 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN); 908 memcpy(hash, ckey, WG_CHAINING_KEY_LEN); 909 910 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 911 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN); 912 blake2s_update(&state, id, strlen(id)); 913 blake2s_final(&state, hash); 914 915 WG_DUMP_HASH("ckey", ckey); 916 WG_DUMP_HASH("hash", hash); 917 } 918 919 static void 920 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[], 921 const size_t inputsize) 922 { 923 struct blake2s state; 924 925 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 926 blake2s_update(&state, hash, WG_HASH_LEN); 927 blake2s_update(&state, input, inputsize); 928 blake2s_final(&state, hash); 929 } 930 931 static void 932 wg_algo_mac(uint8_t out[], const size_t outsize, 933 const uint8_t key[], const size_t keylen, 934 const uint8_t input1[], const size_t input1len, 935 const uint8_t input2[], const size_t input2len) 936 { 937 struct blake2s state; 938 939 blake2s_init(&state, outsize, key, keylen); 940 941 blake2s_update(&state, input1, input1len); 942 if (input2 != NULL) 943 blake2s_update(&state, input2, input2len); 944 blake2s_final(&state, out); 945 } 946 947 static void 948 wg_algo_mac_mac1(uint8_t out[], const size_t outsize, 949 const uint8_t input1[], const size_t input1len, 950 const uint8_t input2[], const size_t input2len) 951 { 952 struct blake2s state; 953 /* [W] 5.4: LABEL-MAC1 */ 954 const char *label = "mac1----"; 955 uint8_t key[WG_HASH_LEN]; 956 957 blake2s_init(&state, sizeof(key), NULL, 0); 958 blake2s_update(&state, label, strlen(label)); 959 blake2s_update(&state, input1, input1len); 960 blake2s_final(&state, key); 961 962 blake2s_init(&state, outsize, key, sizeof(key)); 963 if (input2 != NULL) 964 blake2s_update(&state, input2, input2len); 965 blake2s_final(&state, out); 966 } 967 968 static void 969 wg_algo_mac_cookie(uint8_t out[], const size_t outsize, 970 const uint8_t input1[], const size_t input1len) 971 { 972 struct blake2s state; 973 /* [W] 5.4: LABEL-COOKIE */ 974 const char *label = "cookie--"; 975 976 blake2s_init(&state, outsize, NULL, 0); 977 blake2s_update(&state, label, strlen(label)); 978 blake2s_update(&state, input1, input1len); 979 blake2s_final(&state, out); 980 } 981 982 static void 983 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN], 984 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]) 985 { 986 987 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 988 989 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0); 990 crypto_scalarmult_base(pubkey, privkey); 991 } 992 993 static void 994 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN], 995 const uint8_t privkey[WG_STATIC_KEY_LEN], 996 const uint8_t pubkey[WG_STATIC_KEY_LEN]) 997 { 998 999 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 1000 1001 int ret __diagused = crypto_scalarmult(out, privkey, pubkey); 1002 KASSERT(ret == 0); 1003 } 1004 1005 static void 1006 wg_algo_hmac(uint8_t out[], const size_t outlen, 1007 const uint8_t key[], const size_t keylen, 1008 const uint8_t in[], const size_t inlen) 1009 { 1010 #define IPAD 0x36 1011 #define OPAD 0x5c 1012 uint8_t hmackey[HMAC_BLOCK_LEN] = {0}; 1013 uint8_t ipad[HMAC_BLOCK_LEN]; 1014 uint8_t opad[HMAC_BLOCK_LEN]; 1015 size_t i; 1016 struct blake2s state; 1017 1018 KASSERT(outlen == WG_HASH_LEN); 1019 KASSERT(keylen <= HMAC_BLOCK_LEN); 1020 1021 memcpy(hmackey, key, keylen); 1022 1023 for (i = 0; i < sizeof(hmackey); i++) { 1024 ipad[i] = hmackey[i] ^ IPAD; 1025 opad[i] = hmackey[i] ^ OPAD; 1026 } 1027 1028 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1029 blake2s_update(&state, ipad, sizeof(ipad)); 1030 blake2s_update(&state, in, inlen); 1031 blake2s_final(&state, out); 1032 1033 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1034 blake2s_update(&state, opad, sizeof(opad)); 1035 blake2s_update(&state, out, WG_HASH_LEN); 1036 blake2s_final(&state, out); 1037 #undef IPAD 1038 #undef OPAD 1039 } 1040 1041 static void 1042 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN], 1043 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN], 1044 const uint8_t input[], const size_t inputlen) 1045 { 1046 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1]; 1047 uint8_t one[1]; 1048 1049 /* 1050 * [N] 4.3: "an input_key_material byte sequence with length 1051 * either zero bytes, 32 bytes, or DHLEN bytes." 1052 */ 1053 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN); 1054 1055 WG_DUMP_HASH("ckey", ckey); 1056 if (input != NULL) 1057 WG_DUMP_HASH("input", input); 1058 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN, 1059 input, inputlen); 1060 WG_DUMP_HASH("tmp1", tmp1); 1061 one[0] = 1; 1062 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1063 one, sizeof(one)); 1064 WG_DUMP_HASH("out1", out1); 1065 if (out2 == NULL) 1066 return; 1067 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN); 1068 tmp2[WG_KDF_OUTPUT_LEN] = 2; 1069 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1070 tmp2, sizeof(tmp2)); 1071 WG_DUMP_HASH("out2", out2); 1072 if (out3 == NULL) 1073 return; 1074 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN); 1075 tmp2[WG_KDF_OUTPUT_LEN] = 3; 1076 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1077 tmp2, sizeof(tmp2)); 1078 WG_DUMP_HASH("out3", out3); 1079 } 1080 1081 static void __noinline 1082 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN], 1083 uint8_t cipher_key[WG_CIPHER_KEY_LEN], 1084 const uint8_t local_key[WG_STATIC_KEY_LEN], 1085 const uint8_t remote_key[WG_STATIC_KEY_LEN]) 1086 { 1087 uint8_t dhout[WG_DH_OUTPUT_LEN]; 1088 1089 wg_algo_dh(dhout, local_key, remote_key); 1090 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout)); 1091 1092 WG_DUMP_HASH("dhout", dhout); 1093 WG_DUMP_HASH("ckey", ckey); 1094 if (cipher_key != NULL) 1095 WG_DUMP_HASH("cipher_key", cipher_key); 1096 } 1097 1098 static void 1099 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1100 const uint64_t counter, const uint8_t plain[], const size_t plainsize, 1101 const uint8_t auth[], size_t authlen) 1102 { 1103 uint8_t nonce[(32 + 64) / 8] = {0}; 1104 long long unsigned int outsize; 1105 int error __diagused; 1106 1107 le64enc(&nonce[4], counter); 1108 1109 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain, 1110 plainsize, auth, authlen, NULL, nonce, key); 1111 KASSERT(error == 0); 1112 KASSERT(outsize == expected_outsize); 1113 } 1114 1115 static int 1116 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1117 const uint64_t counter, const uint8_t encrypted[], 1118 const size_t encryptedsize, const uint8_t auth[], size_t authlen) 1119 { 1120 uint8_t nonce[(32 + 64) / 8] = {0}; 1121 long long unsigned int outsize; 1122 int error; 1123 1124 le64enc(&nonce[4], counter); 1125 1126 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1127 encrypted, encryptedsize, auth, authlen, nonce, key); 1128 if (error == 0) 1129 KASSERT(outsize == expected_outsize); 1130 return error; 1131 } 1132 1133 static void 1134 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize, 1135 const uint8_t key[], const uint8_t plain[], const size_t plainsize, 1136 const uint8_t auth[], size_t authlen, 1137 const uint8_t nonce[WG_SALT_LEN]) 1138 { 1139 long long unsigned int outsize; 1140 int error __diagused; 1141 1142 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES); 1143 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize, 1144 plain, plainsize, auth, authlen, NULL, nonce, key); 1145 KASSERT(error == 0); 1146 KASSERT(outsize == expected_outsize); 1147 } 1148 1149 static int 1150 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize, 1151 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize, 1152 const uint8_t auth[], size_t authlen, 1153 const uint8_t nonce[WG_SALT_LEN]) 1154 { 1155 long long unsigned int outsize; 1156 int error; 1157 1158 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1159 encrypted, encryptedsize, auth, authlen, nonce, key); 1160 if (error == 0) 1161 KASSERT(outsize == expected_outsize); 1162 return error; 1163 } 1164 1165 static void 1166 wg_algo_tai64n(wg_timestamp_t timestamp) 1167 { 1168 struct timespec ts; 1169 1170 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */ 1171 getnanotime(&ts); 1172 /* TAI64 label in external TAI64 format */ 1173 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32)); 1174 /* second beginning from 1970 TAI */ 1175 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU)); 1176 /* nanosecond in big-endian format */ 1177 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec); 1178 } 1179 1180 /* 1181 * wg_get_stable_session(wgp, psref) 1182 * 1183 * Get a passive reference to the current stable session, or 1184 * return NULL if there is no current stable session. 1185 * 1186 * The pointer is always there but the session is not necessarily 1187 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However, 1188 * the session may transition from ESTABLISHED to DESTROYING while 1189 * holding the passive reference. 1190 */ 1191 static struct wg_session * 1192 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref) 1193 { 1194 int s; 1195 struct wg_session *wgs; 1196 1197 s = pserialize_read_enter(); 1198 wgs = atomic_load_consume(&wgp->wgp_session_stable); 1199 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED)) 1200 wgs = NULL; 1201 else 1202 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 1203 pserialize_read_exit(s); 1204 1205 return wgs; 1206 } 1207 1208 static void 1209 wg_put_session(struct wg_session *wgs, struct psref *psref) 1210 { 1211 1212 psref_release(psref, &wgs->wgs_psref, wg_psref_class); 1213 } 1214 1215 static void 1216 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs) 1217 { 1218 struct wg_peer *wgp = wgs->wgs_peer; 1219 struct wg_session *wgs0 __diagused; 1220 void *garbage; 1221 1222 KASSERT(mutex_owned(wgp->wgp_lock)); 1223 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1224 1225 /* Remove the session from the table. */ 1226 wgs0 = thmap_del(wg->wg_sessions_byindex, 1227 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index)); 1228 KASSERT(wgs0 == wgs); 1229 garbage = thmap_stage_gc(wg->wg_sessions_byindex); 1230 1231 /* Wait for passive references to drain. */ 1232 pserialize_perform(wgp->wgp_psz); 1233 psref_target_destroy(&wgs->wgs_psref, wg_psref_class); 1234 1235 /* Free memory, zero state, and transition to UNKNOWN. */ 1236 thmap_gc(wg->wg_sessions_byindex, garbage); 1237 wg_clear_states(wgs); 1238 wgs->wgs_state = WGS_STATE_UNKNOWN; 1239 } 1240 1241 /* 1242 * wg_get_session_index(wg, wgs) 1243 * 1244 * Choose a session index for wgs->wgs_local_index, and store it 1245 * in wg's table of sessions by index. 1246 * 1247 * wgs must be the unstable session of its peer, and must be 1248 * transitioning out of the UNKNOWN state. 1249 */ 1250 static void 1251 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs) 1252 { 1253 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1254 struct wg_session *wgs0; 1255 uint32_t index; 1256 1257 KASSERT(mutex_owned(wgp->wgp_lock)); 1258 KASSERT(wgs == wgp->wgp_session_unstable); 1259 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN); 1260 1261 do { 1262 /* Pick a uniform random index. */ 1263 index = cprng_strong32(); 1264 1265 /* Try to take it. */ 1266 wgs->wgs_local_index = index; 1267 wgs0 = thmap_put(wg->wg_sessions_byindex, 1268 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs); 1269 1270 /* If someone else beat us, start over. */ 1271 } while (__predict_false(wgs0 != wgs)); 1272 } 1273 1274 /* 1275 * wg_put_session_index(wg, wgs) 1276 * 1277 * Remove wgs from the table of sessions by index, wait for any 1278 * passive references to drain, and transition the session to the 1279 * UNKNOWN state. 1280 * 1281 * wgs must be the unstable session of its peer, and must not be 1282 * UNKNOWN or ESTABLISHED. 1283 */ 1284 static void 1285 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs) 1286 { 1287 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1288 1289 KASSERT(mutex_owned(wgp->wgp_lock)); 1290 KASSERT(wgs == wgp->wgp_session_unstable); 1291 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1292 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED); 1293 1294 wg_destroy_session(wg, wgs); 1295 psref_target_init(&wgs->wgs_psref, wg_psref_class); 1296 } 1297 1298 /* 1299 * Handshake patterns 1300 * 1301 * [W] 5: "These messages use the "IK" pattern from Noise" 1302 * [N] 7.5. Interactive handshake patterns (fundamental) 1303 * "The first character refers to the initiator’s static key:" 1304 * "I = Static key for initiator Immediately transmitted to responder, 1305 * despite reduced or absent identity hiding" 1306 * "The second character refers to the responder’s static key:" 1307 * "K = Static key for responder Known to initiator" 1308 * "IK: 1309 * <- s 1310 * ... 1311 * -> e, es, s, ss 1312 * <- e, ee, se" 1313 * [N] 9.4. Pattern modifiers 1314 * "IKpsk2: 1315 * <- s 1316 * ... 1317 * -> e, es, s, ss 1318 * <- e, ee, se, psk" 1319 */ 1320 static void 1321 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp, 1322 struct wg_session *wgs, struct wg_msg_init *wgmi) 1323 { 1324 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1325 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1326 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1327 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1328 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1329 1330 KASSERT(mutex_owned(wgp->wgp_lock)); 1331 KASSERT(wgs == wgp->wgp_session_unstable); 1332 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1333 1334 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT); 1335 wgmi->wgmi_sender = wgs->wgs_local_index; 1336 1337 /* [W] 5.4.2: First Message: Initiator to Responder */ 1338 1339 /* Ci := HASH(CONSTRUCTION) */ 1340 /* Hi := HASH(Ci || IDENTIFIER) */ 1341 wg_init_key_and_hash(ckey, hash); 1342 /* Hi := HASH(Hi || Sr^pub) */ 1343 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)); 1344 1345 WG_DUMP_HASH("hash", hash); 1346 1347 /* [N] 2.2: "e" */ 1348 /* Ei^priv, Ei^pub := DH-GENERATE() */ 1349 wg_algo_generate_keypair(pubkey, privkey); 1350 /* Ci := KDF1(Ci, Ei^pub) */ 1351 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1352 /* msg.ephemeral := Ei^pub */ 1353 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral)); 1354 /* Hi := HASH(Hi || msg.ephemeral) */ 1355 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1356 1357 WG_DUMP_HASH("ckey", ckey); 1358 WG_DUMP_HASH("hash", hash); 1359 1360 /* [N] 2.2: "es" */ 1361 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1362 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey); 1363 1364 /* [N] 2.2: "s" */ 1365 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1366 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static), 1367 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey), 1368 hash, sizeof(hash)); 1369 /* Hi := HASH(Hi || msg.static) */ 1370 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1371 1372 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1373 1374 /* [N] 2.2: "ss" */ 1375 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1376 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1377 1378 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1379 wg_timestamp_t timestamp; 1380 wg_algo_tai64n(timestamp); 1381 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1382 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash)); 1383 /* Hi := HASH(Hi || msg.timestamp) */ 1384 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1385 1386 /* [W] 5.4.4 Cookie MACs */ 1387 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1), 1388 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1389 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1390 /* Need mac1 to decrypt a cookie from a cookie message */ 1391 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1, 1392 sizeof(wgp->wgp_last_sent_mac1)); 1393 wgp->wgp_last_sent_mac1_valid = true; 1394 1395 if (wgp->wgp_latest_cookie_time == 0 || 1396 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1397 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2)); 1398 else { 1399 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2), 1400 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1401 (const uint8_t *)wgmi, 1402 offsetof(struct wg_msg_init, wgmi_mac2), 1403 NULL, 0); 1404 } 1405 1406 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1407 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1408 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1409 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1410 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index); 1411 } 1412 1413 static void __noinline 1414 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi, 1415 const struct sockaddr *src) 1416 { 1417 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1418 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1419 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1420 uint8_t peer_pubkey[WG_STATIC_KEY_LEN]; 1421 struct wg_peer *wgp; 1422 struct wg_session *wgs; 1423 int error, ret; 1424 struct psref psref_peer; 1425 uint8_t mac1[WG_MAC_LEN]; 1426 1427 WG_TRACE("init msg received"); 1428 1429 wg_algo_mac_mac1(mac1, sizeof(mac1), 1430 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1431 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1432 1433 /* 1434 * [W] 5.3: Denial of Service Mitigation & Cookies 1435 * "the responder, ..., must always reject messages with an invalid 1436 * msg.mac1" 1437 */ 1438 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) { 1439 WG_DLOG("mac1 is invalid\n"); 1440 return; 1441 } 1442 1443 /* 1444 * [W] 5.4.2: First Message: Initiator to Responder 1445 * "When the responder receives this message, it does the same 1446 * operations so that its final state variables are identical, 1447 * replacing the operands of the DH function to produce equivalent 1448 * values." 1449 * Note that the following comments of operations are just copies of 1450 * the initiator's ones. 1451 */ 1452 1453 /* Ci := HASH(CONSTRUCTION) */ 1454 /* Hi := HASH(Ci || IDENTIFIER) */ 1455 wg_init_key_and_hash(ckey, hash); 1456 /* Hi := HASH(Hi || Sr^pub) */ 1457 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey)); 1458 1459 /* [N] 2.2: "e" */ 1460 /* Ci := KDF1(Ci, Ei^pub) */ 1461 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral, 1462 sizeof(wgmi->wgmi_ephemeral)); 1463 /* Hi := HASH(Hi || msg.ephemeral) */ 1464 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral)); 1465 1466 WG_DUMP_HASH("ckey", ckey); 1467 1468 /* [N] 2.2: "es" */ 1469 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1470 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral); 1471 1472 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1473 1474 /* [N] 2.2: "s" */ 1475 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1476 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0, 1477 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash)); 1478 if (error != 0) { 1479 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 1480 "wg_algo_aead_dec for secret key failed\n"); 1481 return; 1482 } 1483 /* Hi := HASH(Hi || msg.static) */ 1484 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1485 1486 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer); 1487 if (wgp == NULL) { 1488 WG_DLOG("peer not found\n"); 1489 return; 1490 } 1491 1492 /* 1493 * Lock the peer to serialize access to cookie state. 1494 * 1495 * XXX Can we safely avoid holding the lock across DH? Take it 1496 * just to verify mac2 and then unlock/DH/lock? 1497 */ 1498 mutex_enter(wgp->wgp_lock); 1499 1500 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) { 1501 WG_TRACE("under load"); 1502 /* 1503 * [W] 5.3: Denial of Service Mitigation & Cookies 1504 * "the responder, ..., and when under load may reject messages 1505 * with an invalid msg.mac2. If the responder receives a 1506 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1507 * and is under load, it may respond with a cookie reply 1508 * message" 1509 */ 1510 uint8_t zero[WG_MAC_LEN] = {0}; 1511 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) { 1512 WG_TRACE("sending a cookie message: no cookie included"); 1513 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1514 wgmi->wgmi_mac1, src); 1515 goto out; 1516 } 1517 if (!wgp->wgp_last_sent_cookie_valid) { 1518 WG_TRACE("sending a cookie message: no cookie sent ever"); 1519 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1520 wgmi->wgmi_mac1, src); 1521 goto out; 1522 } 1523 uint8_t mac2[WG_MAC_LEN]; 1524 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1525 WG_COOKIE_LEN, (const uint8_t *)wgmi, 1526 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0); 1527 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) { 1528 WG_DLOG("mac2 is invalid\n"); 1529 goto out; 1530 } 1531 WG_TRACE("under load, but continue to sending"); 1532 } 1533 1534 /* [N] 2.2: "ss" */ 1535 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1536 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1537 1538 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1539 wg_timestamp_t timestamp; 1540 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0, 1541 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1542 hash, sizeof(hash)); 1543 if (error != 0) { 1544 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1545 "wg_algo_aead_dec for timestamp failed\n"); 1546 goto out; 1547 } 1548 /* Hi := HASH(Hi || msg.timestamp) */ 1549 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1550 1551 /* 1552 * [W] 5.1 "The responder keeps track of the greatest timestamp 1553 * received per peer and discards packets containing 1554 * timestamps less than or equal to it." 1555 */ 1556 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init, 1557 sizeof(timestamp)); 1558 if (ret <= 0) { 1559 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1560 "invalid init msg: timestamp is old\n"); 1561 goto out; 1562 } 1563 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp)); 1564 1565 /* 1566 * Message is good -- we're committing to handle it now, unless 1567 * we were already initiating a session. 1568 */ 1569 wgs = wgp->wgp_session_unstable; 1570 switch (wgs->wgs_state) { 1571 case WGS_STATE_UNKNOWN: /* new session initiated by peer */ 1572 wg_get_session_index(wg, wgs); 1573 break; 1574 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */ 1575 WG_TRACE("Session already initializing, ignoring the message"); 1576 goto out; 1577 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */ 1578 WG_TRACE("Session already initializing, destroying old states"); 1579 wg_clear_states(wgs); 1580 /* keep session index */ 1581 break; 1582 case WGS_STATE_ESTABLISHED: /* can't happen */ 1583 panic("unstable session can't be established"); 1584 break; 1585 case WGS_STATE_DESTROYING: /* rekey initiated by peer */ 1586 WG_TRACE("Session destroying, but force to clear"); 1587 callout_stop(&wgp->wgp_session_dtor_timer); 1588 wg_clear_states(wgs); 1589 /* keep session index */ 1590 break; 1591 default: 1592 panic("invalid session state: %d", wgs->wgs_state); 1593 } 1594 wgs->wgs_state = WGS_STATE_INIT_PASSIVE; 1595 1596 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1597 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1598 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral, 1599 sizeof(wgmi->wgmi_ephemeral)); 1600 1601 wg_update_endpoint_if_necessary(wgp, src); 1602 1603 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi); 1604 1605 wg_calculate_keys(wgs, false); 1606 wg_clear_states(wgs); 1607 1608 out: 1609 mutex_exit(wgp->wgp_lock); 1610 wg_put_peer(wgp, &psref_peer); 1611 } 1612 1613 static struct socket * 1614 wg_get_so_by_af(struct wg_softc *wg, const int af) 1615 { 1616 1617 switch (af) { 1618 #ifdef INET 1619 case AF_INET: 1620 return wg->wg_so4; 1621 #endif 1622 #ifdef INET6 1623 case AF_INET6: 1624 return wg->wg_so6; 1625 #endif 1626 default: 1627 panic("wg: no such af: %d", af); 1628 } 1629 } 1630 1631 static struct socket * 1632 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa) 1633 { 1634 1635 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa)); 1636 } 1637 1638 static struct wg_sockaddr * 1639 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref) 1640 { 1641 struct wg_sockaddr *wgsa; 1642 int s; 1643 1644 s = pserialize_read_enter(); 1645 wgsa = atomic_load_consume(&wgp->wgp_endpoint); 1646 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class); 1647 pserialize_read_exit(s); 1648 1649 return wgsa; 1650 } 1651 1652 static void 1653 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref) 1654 { 1655 1656 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class); 1657 } 1658 1659 static int 1660 wg_send_so(struct wg_peer *wgp, struct mbuf *m) 1661 { 1662 int error; 1663 struct socket *so; 1664 struct psref psref; 1665 struct wg_sockaddr *wgsa; 1666 1667 wgsa = wg_get_endpoint_sa(wgp, &psref); 1668 so = wg_get_so_by_peer(wgp, wgsa); 1669 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp); 1670 wg_put_sa(wgp, wgsa, &psref); 1671 1672 return error; 1673 } 1674 1675 static int 1676 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp) 1677 { 1678 int error; 1679 struct mbuf *m; 1680 struct wg_msg_init *wgmi; 1681 struct wg_session *wgs; 1682 1683 KASSERT(mutex_owned(wgp->wgp_lock)); 1684 1685 wgs = wgp->wgp_session_unstable; 1686 /* XXX pull dispatch out into wg_task_send_init_message */ 1687 switch (wgs->wgs_state) { 1688 case WGS_STATE_UNKNOWN: /* new session initiated by us */ 1689 wg_get_session_index(wg, wgs); 1690 break; 1691 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */ 1692 WG_TRACE("Session already initializing, skip starting new one"); 1693 return EBUSY; 1694 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */ 1695 WG_TRACE("Session already initializing, destroying old states"); 1696 wg_clear_states(wgs); 1697 /* keep session index */ 1698 break; 1699 case WGS_STATE_ESTABLISHED: /* can't happen */ 1700 panic("unstable session can't be established"); 1701 break; 1702 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */ 1703 WG_TRACE("Session destroying"); 1704 /* XXX should wait? */ 1705 return EBUSY; 1706 } 1707 wgs->wgs_state = WGS_STATE_INIT_ACTIVE; 1708 1709 m = m_gethdr(M_WAIT, MT_DATA); 1710 if (sizeof(*wgmi) > MHLEN) { 1711 m_clget(m, M_WAIT); 1712 CTASSERT(sizeof(*wgmi) <= MCLBYTES); 1713 } 1714 m->m_pkthdr.len = m->m_len = sizeof(*wgmi); 1715 wgmi = mtod(m, struct wg_msg_init *); 1716 wg_fill_msg_init(wg, wgp, wgs, wgmi); 1717 1718 error = wg->wg_ops->send_hs_msg(wgp, m); 1719 if (error == 0) { 1720 WG_TRACE("init msg sent"); 1721 1722 if (wgp->wgp_handshake_start_time == 0) 1723 wgp->wgp_handshake_start_time = time_uptime; 1724 callout_schedule(&wgp->wgp_handshake_timeout_timer, 1725 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz); 1726 } else { 1727 wg_put_session_index(wg, wgs); 1728 /* Initiation failed; toss packet waiting for it if any. */ 1729 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 1730 m_freem(m); 1731 } 1732 1733 return error; 1734 } 1735 1736 static void 1737 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 1738 struct wg_session *wgs, struct wg_msg_resp *wgmr, 1739 const struct wg_msg_init *wgmi) 1740 { 1741 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1742 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */ 1743 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1744 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1745 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1746 1747 KASSERT(mutex_owned(wgp->wgp_lock)); 1748 KASSERT(wgs == wgp->wgp_session_unstable); 1749 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 1750 1751 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1752 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1753 1754 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP); 1755 wgmr->wgmr_sender = wgs->wgs_local_index; 1756 wgmr->wgmr_receiver = wgmi->wgmi_sender; 1757 1758 /* [W] 5.4.3 Second Message: Responder to Initiator */ 1759 1760 /* [N] 2.2: "e" */ 1761 /* Er^priv, Er^pub := DH-GENERATE() */ 1762 wg_algo_generate_keypair(pubkey, privkey); 1763 /* Cr := KDF1(Cr, Er^pub) */ 1764 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1765 /* msg.ephemeral := Er^pub */ 1766 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral)); 1767 /* Hr := HASH(Hr || msg.ephemeral) */ 1768 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1769 1770 WG_DUMP_HASH("ckey", ckey); 1771 WG_DUMP_HASH("hash", hash); 1772 1773 /* [N] 2.2: "ee" */ 1774 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1775 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer); 1776 1777 /* [N] 2.2: "se" */ 1778 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1779 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey); 1780 1781 /* [N] 9.2: "psk" */ 1782 { 1783 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1784 /* Cr, r, k := KDF3(Cr, Q) */ 1785 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1786 sizeof(wgp->wgp_psk)); 1787 /* Hr := HASH(Hr || r) */ 1788 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1789 } 1790 1791 /* msg.empty := AEAD(k, 0, e, Hr) */ 1792 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty), 1793 cipher_key, 0, NULL, 0, hash, sizeof(hash)); 1794 /* Hr := HASH(Hr || msg.empty) */ 1795 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1796 1797 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1798 1799 /* [W] 5.4.4: Cookie MACs */ 1800 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */ 1801 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1), 1802 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1803 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1804 /* Need mac1 to decrypt a cookie from a cookie message */ 1805 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1, 1806 sizeof(wgp->wgp_last_sent_mac1)); 1807 wgp->wgp_last_sent_mac1_valid = true; 1808 1809 if (wgp->wgp_latest_cookie_time == 0 || 1810 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1811 /* msg.mac2 := 0^16 */ 1812 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2)); 1813 else { 1814 /* msg.mac2 := MAC(Lm, msg_b) */ 1815 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2), 1816 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1817 (const uint8_t *)wgmr, 1818 offsetof(struct wg_msg_resp, wgmr_mac2), 1819 NULL, 0); 1820 } 1821 1822 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1823 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1824 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1825 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1826 wgs->wgs_remote_index = wgmi->wgmi_sender; 1827 WG_DLOG("sender=%x\n", wgs->wgs_local_index); 1828 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1829 } 1830 1831 static void 1832 wg_swap_sessions(struct wg_peer *wgp) 1833 { 1834 struct wg_session *wgs, *wgs_prev; 1835 1836 KASSERT(mutex_owned(wgp->wgp_lock)); 1837 1838 wgs = wgp->wgp_session_unstable; 1839 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED); 1840 1841 wgs_prev = wgp->wgp_session_stable; 1842 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED || 1843 wgs_prev->wgs_state == WGS_STATE_UNKNOWN); 1844 atomic_store_release(&wgp->wgp_session_stable, wgs); 1845 wgp->wgp_session_unstable = wgs_prev; 1846 } 1847 1848 static void __noinline 1849 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr, 1850 const struct sockaddr *src) 1851 { 1852 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1853 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */ 1854 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1855 struct wg_peer *wgp; 1856 struct wg_session *wgs; 1857 struct psref psref; 1858 int error; 1859 uint8_t mac1[WG_MAC_LEN]; 1860 struct wg_session *wgs_prev; 1861 struct mbuf *m; 1862 1863 wg_algo_mac_mac1(mac1, sizeof(mac1), 1864 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1865 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1866 1867 /* 1868 * [W] 5.3: Denial of Service Mitigation & Cookies 1869 * "the responder, ..., must always reject messages with an invalid 1870 * msg.mac1" 1871 */ 1872 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) { 1873 WG_DLOG("mac1 is invalid\n"); 1874 return; 1875 } 1876 1877 WG_TRACE("resp msg received"); 1878 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref); 1879 if (wgs == NULL) { 1880 WG_TRACE("No session found"); 1881 return; 1882 } 1883 1884 wgp = wgs->wgs_peer; 1885 1886 mutex_enter(wgp->wgp_lock); 1887 1888 /* If we weren't waiting for a handshake response, drop it. */ 1889 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) { 1890 WG_TRACE("peer sent spurious handshake response, ignoring"); 1891 goto out; 1892 } 1893 1894 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) { 1895 WG_TRACE("under load"); 1896 /* 1897 * [W] 5.3: Denial of Service Mitigation & Cookies 1898 * "the responder, ..., and when under load may reject messages 1899 * with an invalid msg.mac2. If the responder receives a 1900 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1901 * and is under load, it may respond with a cookie reply 1902 * message" 1903 */ 1904 uint8_t zero[WG_MAC_LEN] = {0}; 1905 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) { 1906 WG_TRACE("sending a cookie message: no cookie included"); 1907 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1908 wgmr->wgmr_mac1, src); 1909 goto out; 1910 } 1911 if (!wgp->wgp_last_sent_cookie_valid) { 1912 WG_TRACE("sending a cookie message: no cookie sent ever"); 1913 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1914 wgmr->wgmr_mac1, src); 1915 goto out; 1916 } 1917 uint8_t mac2[WG_MAC_LEN]; 1918 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1919 WG_COOKIE_LEN, (const uint8_t *)wgmr, 1920 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0); 1921 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) { 1922 WG_DLOG("mac2 is invalid\n"); 1923 goto out; 1924 } 1925 WG_TRACE("under load, but continue to sending"); 1926 } 1927 1928 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1929 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1930 1931 /* 1932 * [W] 5.4.3 Second Message: Responder to Initiator 1933 * "When the initiator receives this message, it does the same 1934 * operations so that its final state variables are identical, 1935 * replacing the operands of the DH function to produce equivalent 1936 * values." 1937 * Note that the following comments of operations are just copies of 1938 * the initiator's ones. 1939 */ 1940 1941 /* [N] 2.2: "e" */ 1942 /* Cr := KDF1(Cr, Er^pub) */ 1943 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral, 1944 sizeof(wgmr->wgmr_ephemeral)); 1945 /* Hr := HASH(Hr || msg.ephemeral) */ 1946 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral)); 1947 1948 WG_DUMP_HASH("ckey", ckey); 1949 WG_DUMP_HASH("hash", hash); 1950 1951 /* [N] 2.2: "ee" */ 1952 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1953 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv, 1954 wgmr->wgmr_ephemeral); 1955 1956 /* [N] 2.2: "se" */ 1957 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1958 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral); 1959 1960 /* [N] 9.2: "psk" */ 1961 { 1962 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1963 /* Cr, r, k := KDF3(Cr, Q) */ 1964 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1965 sizeof(wgp->wgp_psk)); 1966 /* Hr := HASH(Hr || r) */ 1967 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1968 } 1969 1970 { 1971 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */ 1972 /* msg.empty := AEAD(k, 0, e, Hr) */ 1973 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty, 1974 sizeof(wgmr->wgmr_empty), hash, sizeof(hash)); 1975 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1976 if (error != 0) { 1977 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1978 "wg_algo_aead_dec for empty message failed\n"); 1979 goto out; 1980 } 1981 /* Hr := HASH(Hr || msg.empty) */ 1982 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1983 } 1984 1985 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash)); 1986 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key)); 1987 wgs->wgs_remote_index = wgmr->wgmr_sender; 1988 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1989 1990 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1991 wgs->wgs_state = WGS_STATE_ESTABLISHED; 1992 wgs->wgs_time_established = time_uptime; 1993 wgs->wgs_time_last_data_sent = 0; 1994 wgs->wgs_is_initiator = true; 1995 wg_calculate_keys(wgs, true); 1996 wg_clear_states(wgs); 1997 WG_TRACE("WGS_STATE_ESTABLISHED"); 1998 1999 callout_stop(&wgp->wgp_handshake_timeout_timer); 2000 2001 wg_swap_sessions(wgp); 2002 KASSERT(wgs == wgp->wgp_session_stable); 2003 wgs_prev = wgp->wgp_session_unstable; 2004 getnanotime(&wgp->wgp_last_handshake_time); 2005 wgp->wgp_handshake_start_time = 0; 2006 wgp->wgp_last_sent_mac1_valid = false; 2007 wgp->wgp_last_sent_cookie_valid = false; 2008 2009 wg_schedule_rekey_timer(wgp); 2010 2011 wg_update_endpoint_if_necessary(wgp, src); 2012 2013 /* 2014 * If we had a data packet queued up, send it; otherwise send a 2015 * keepalive message -- either way we have to send something 2016 * immediately or else the responder will never answer. 2017 */ 2018 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 2019 kpreempt_disable(); 2020 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 2021 M_SETCTX(m, wgp); 2022 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 2023 WGLOG(LOG_ERR, "pktq full, dropping\n"); 2024 m_freem(m); 2025 } 2026 kpreempt_enable(); 2027 } else { 2028 wg_send_keepalive_msg(wgp, wgs); 2029 } 2030 2031 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 2032 /* Wait for wg_get_stable_session to drain. */ 2033 pserialize_perform(wgp->wgp_psz); 2034 2035 /* Transition ESTABLISHED->DESTROYING. */ 2036 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 2037 2038 /* We can't destroy the old session immediately */ 2039 wg_schedule_session_dtor_timer(wgp); 2040 } else { 2041 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 2042 "state=%d", wgs_prev->wgs_state); 2043 } 2044 2045 out: 2046 mutex_exit(wgp->wgp_lock); 2047 wg_put_session(wgs, &psref); 2048 } 2049 2050 static int 2051 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 2052 struct wg_session *wgs, const struct wg_msg_init *wgmi) 2053 { 2054 int error; 2055 struct mbuf *m; 2056 struct wg_msg_resp *wgmr; 2057 2058 KASSERT(mutex_owned(wgp->wgp_lock)); 2059 KASSERT(wgs == wgp->wgp_session_unstable); 2060 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 2061 2062 m = m_gethdr(M_WAIT, MT_DATA); 2063 if (sizeof(*wgmr) > MHLEN) { 2064 m_clget(m, M_WAIT); 2065 CTASSERT(sizeof(*wgmr) <= MCLBYTES); 2066 } 2067 m->m_pkthdr.len = m->m_len = sizeof(*wgmr); 2068 wgmr = mtod(m, struct wg_msg_resp *); 2069 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi); 2070 2071 error = wg->wg_ops->send_hs_msg(wgp, m); 2072 if (error == 0) 2073 WG_TRACE("resp msg sent"); 2074 return error; 2075 } 2076 2077 static struct wg_peer * 2078 wg_lookup_peer_by_pubkey(struct wg_softc *wg, 2079 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref) 2080 { 2081 struct wg_peer *wgp; 2082 2083 int s = pserialize_read_enter(); 2084 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN); 2085 if (wgp != NULL) 2086 wg_get_peer(wgp, psref); 2087 pserialize_read_exit(s); 2088 2089 return wgp; 2090 } 2091 2092 static void 2093 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp, 2094 struct wg_msg_cookie *wgmc, const uint32_t sender, 2095 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src) 2096 { 2097 uint8_t cookie[WG_COOKIE_LEN]; 2098 uint8_t key[WG_HASH_LEN]; 2099 uint8_t addr[sizeof(struct in6_addr)]; 2100 size_t addrlen; 2101 uint16_t uh_sport; /* be */ 2102 2103 KASSERT(mutex_owned(wgp->wgp_lock)); 2104 2105 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE); 2106 wgmc->wgmc_receiver = sender; 2107 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt)); 2108 2109 /* 2110 * [W] 5.4.7: Under Load: Cookie Reply Message 2111 * "The secret variable, Rm, changes every two minutes to a 2112 * random value" 2113 */ 2114 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) { 2115 wgp->wgp_randval = cprng_strong32(); 2116 wgp->wgp_last_genrandval_time = time_uptime; 2117 } 2118 2119 switch (src->sa_family) { 2120 case AF_INET: { 2121 const struct sockaddr_in *sin = satocsin(src); 2122 addrlen = sizeof(sin->sin_addr); 2123 memcpy(addr, &sin->sin_addr, addrlen); 2124 uh_sport = sin->sin_port; 2125 break; 2126 } 2127 #ifdef INET6 2128 case AF_INET6: { 2129 const struct sockaddr_in6 *sin6 = satocsin6(src); 2130 addrlen = sizeof(sin6->sin6_addr); 2131 memcpy(addr, &sin6->sin6_addr, addrlen); 2132 uh_sport = sin6->sin6_port; 2133 break; 2134 } 2135 #endif 2136 default: 2137 panic("invalid af=%d", src->sa_family); 2138 } 2139 2140 wg_algo_mac(cookie, sizeof(cookie), 2141 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval), 2142 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport)); 2143 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey, 2144 sizeof(wg->wg_pubkey)); 2145 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key, 2146 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt); 2147 2148 /* Need to store to calculate mac2 */ 2149 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie)); 2150 wgp->wgp_last_sent_cookie_valid = true; 2151 } 2152 2153 static int 2154 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp, 2155 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN], 2156 const struct sockaddr *src) 2157 { 2158 int error; 2159 struct mbuf *m; 2160 struct wg_msg_cookie *wgmc; 2161 2162 KASSERT(mutex_owned(wgp->wgp_lock)); 2163 2164 m = m_gethdr(M_WAIT, MT_DATA); 2165 if (sizeof(*wgmc) > MHLEN) { 2166 m_clget(m, M_WAIT); 2167 CTASSERT(sizeof(*wgmc) <= MCLBYTES); 2168 } 2169 m->m_pkthdr.len = m->m_len = sizeof(*wgmc); 2170 wgmc = mtod(m, struct wg_msg_cookie *); 2171 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src); 2172 2173 error = wg->wg_ops->send_hs_msg(wgp, m); 2174 if (error == 0) 2175 WG_TRACE("cookie msg sent"); 2176 return error; 2177 } 2178 2179 static bool 2180 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype) 2181 { 2182 #ifdef WG_DEBUG_PARAMS 2183 if (wg_force_underload) 2184 return true; 2185 #endif 2186 2187 /* 2188 * XXX we don't have a means of a load estimation. The purpose of 2189 * the mechanism is a DoS mitigation, so we consider frequent handshake 2190 * messages as (a kind of) load; if a message of the same type comes 2191 * to a peer within 1 second, we consider we are under load. 2192 */ 2193 time_t last = wgp->wgp_last_msg_received_time[msgtype]; 2194 wgp->wgp_last_msg_received_time[msgtype] = time_uptime; 2195 return (time_uptime - last) == 0; 2196 } 2197 2198 static void 2199 wg_calculate_keys(struct wg_session *wgs, const bool initiator) 2200 { 2201 2202 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2203 2204 /* 2205 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e) 2206 */ 2207 if (initiator) { 2208 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL, 2209 wgs->wgs_chaining_key, NULL, 0); 2210 } else { 2211 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL, 2212 wgs->wgs_chaining_key, NULL, 0); 2213 } 2214 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send); 2215 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv); 2216 } 2217 2218 static uint64_t 2219 wg_session_get_send_counter(struct wg_session *wgs) 2220 { 2221 #ifdef __HAVE_ATOMIC64_LOADSTORE 2222 return atomic_load_relaxed(&wgs->wgs_send_counter); 2223 #else 2224 uint64_t send_counter; 2225 2226 mutex_enter(&wgs->wgs_send_counter_lock); 2227 send_counter = wgs->wgs_send_counter; 2228 mutex_exit(&wgs->wgs_send_counter_lock); 2229 2230 return send_counter; 2231 #endif 2232 } 2233 2234 static uint64_t 2235 wg_session_inc_send_counter(struct wg_session *wgs) 2236 { 2237 #ifdef __HAVE_ATOMIC64_LOADSTORE 2238 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1; 2239 #else 2240 uint64_t send_counter; 2241 2242 mutex_enter(&wgs->wgs_send_counter_lock); 2243 send_counter = wgs->wgs_send_counter++; 2244 mutex_exit(&wgs->wgs_send_counter_lock); 2245 2246 return send_counter; 2247 #endif 2248 } 2249 2250 static void 2251 wg_clear_states(struct wg_session *wgs) 2252 { 2253 2254 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2255 2256 wgs->wgs_send_counter = 0; 2257 sliwin_reset(&wgs->wgs_recvwin->window); 2258 2259 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v)) 2260 wgs_clear(handshake_hash); 2261 wgs_clear(chaining_key); 2262 wgs_clear(ephemeral_key_pub); 2263 wgs_clear(ephemeral_key_priv); 2264 wgs_clear(ephemeral_key_peer); 2265 #undef wgs_clear 2266 } 2267 2268 static struct wg_session * 2269 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index, 2270 struct psref *psref) 2271 { 2272 struct wg_session *wgs; 2273 2274 int s = pserialize_read_enter(); 2275 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index); 2276 if (wgs != NULL) { 2277 KASSERT(atomic_load_relaxed(&wgs->wgs_state) != 2278 WGS_STATE_UNKNOWN); 2279 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 2280 } 2281 pserialize_read_exit(s); 2282 2283 return wgs; 2284 } 2285 2286 static void 2287 wg_schedule_rekey_timer(struct wg_peer *wgp) 2288 { 2289 int timeout = MIN(wg_rekey_after_time, (unsigned)(INT_MAX / hz)); 2290 2291 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz); 2292 } 2293 2294 static void 2295 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs) 2296 { 2297 struct mbuf *m; 2298 2299 /* 2300 * [W] 6.5 Passive Keepalive 2301 * "A keepalive message is simply a transport data message with 2302 * a zero-length encapsulated encrypted inner-packet." 2303 */ 2304 m = m_gethdr(M_WAIT, MT_DATA); 2305 wg_send_data_msg(wgp, wgs, m); 2306 } 2307 2308 static bool 2309 wg_need_to_send_init_message(struct wg_session *wgs) 2310 { 2311 /* 2312 * [W] 6.2 Transport Message Limits 2313 * "if a peer is the initiator of a current secure session, 2314 * WireGuard will send a handshake initiation message to begin 2315 * a new secure session ... if after receiving a transport data 2316 * message, the current secure session is (REJECT-AFTER-TIME − 2317 * KEEPALIVE-TIMEOUT − REKEY-TIMEOUT) seconds old and it has 2318 * not yet acted upon this event." 2319 */ 2320 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 && 2321 (time_uptime - wgs->wgs_time_established) >= 2322 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout); 2323 } 2324 2325 static void 2326 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task) 2327 { 2328 2329 mutex_enter(wgp->wgp_intr_lock); 2330 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task); 2331 if (wgp->wgp_tasks == 0) 2332 /* 2333 * XXX If the current CPU is already loaded -- e.g., if 2334 * there's already a bunch of handshakes queued up -- 2335 * consider tossing this over to another CPU to 2336 * distribute the load. 2337 */ 2338 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL); 2339 wgp->wgp_tasks |= task; 2340 mutex_exit(wgp->wgp_intr_lock); 2341 } 2342 2343 static void 2344 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new) 2345 { 2346 struct wg_sockaddr *wgsa_prev; 2347 2348 WG_TRACE("Changing endpoint"); 2349 2350 memcpy(wgp->wgp_endpoint0, new, new->sa_len); 2351 wgsa_prev = wgp->wgp_endpoint; 2352 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0); 2353 wgp->wgp_endpoint0 = wgsa_prev; 2354 atomic_store_release(&wgp->wgp_endpoint_available, true); 2355 2356 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED); 2357 } 2358 2359 static bool 2360 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af) 2361 { 2362 uint16_t packet_len; 2363 const struct ip *ip; 2364 2365 if (__predict_false(decrypted_len < sizeof(struct ip))) 2366 return false; 2367 2368 ip = (const struct ip *)packet; 2369 if (ip->ip_v == 4) 2370 *af = AF_INET; 2371 else if (ip->ip_v == 6) 2372 *af = AF_INET6; 2373 else 2374 return false; 2375 2376 WG_DLOG("af=%d\n", *af); 2377 2378 switch (*af) { 2379 #ifdef INET 2380 case AF_INET: 2381 packet_len = ntohs(ip->ip_len); 2382 break; 2383 #endif 2384 #ifdef INET6 2385 case AF_INET6: { 2386 const struct ip6_hdr *ip6; 2387 2388 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr))) 2389 return false; 2390 2391 ip6 = (const struct ip6_hdr *)packet; 2392 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); 2393 break; 2394 } 2395 #endif 2396 default: 2397 return false; 2398 } 2399 2400 WG_DLOG("packet_len=%u\n", packet_len); 2401 if (packet_len > decrypted_len) 2402 return false; 2403 2404 return true; 2405 } 2406 2407 static bool 2408 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected, 2409 int af, char *packet) 2410 { 2411 struct sockaddr_storage ss; 2412 struct sockaddr *sa; 2413 struct psref psref; 2414 struct wg_peer *wgp; 2415 bool ok; 2416 2417 /* 2418 * II CRYPTOKEY ROUTING 2419 * "it will only accept it if its source IP resolves in the 2420 * table to the public key used in the secure session for 2421 * decrypting it." 2422 */ 2423 2424 if (af == AF_INET) { 2425 const struct ip *ip = (const struct ip *)packet; 2426 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 2427 sockaddr_in_init(sin, &ip->ip_src, 0); 2428 sa = sintosa(sin); 2429 #ifdef INET6 2430 } else { 2431 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet; 2432 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 2433 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0); 2434 sa = sin6tosa(sin6); 2435 #endif 2436 } 2437 2438 wgp = wg_pick_peer_by_sa(wg, sa, &psref); 2439 ok = (wgp == wgp_expected); 2440 if (wgp != NULL) 2441 wg_put_peer(wgp, &psref); 2442 2443 return ok; 2444 } 2445 2446 static void 2447 wg_session_dtor_timer(void *arg) 2448 { 2449 struct wg_peer *wgp = arg; 2450 2451 WG_TRACE("enter"); 2452 2453 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION); 2454 } 2455 2456 static void 2457 wg_schedule_session_dtor_timer(struct wg_peer *wgp) 2458 { 2459 2460 /* 1 second grace period */ 2461 callout_schedule(&wgp->wgp_session_dtor_timer, hz); 2462 } 2463 2464 static bool 2465 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2) 2466 { 2467 if (sa1->sa_family != sa2->sa_family) 2468 return false; 2469 2470 switch (sa1->sa_family) { 2471 #ifdef INET 2472 case AF_INET: 2473 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port; 2474 #endif 2475 #ifdef INET6 2476 case AF_INET6: 2477 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port; 2478 #endif 2479 default: 2480 return false; 2481 } 2482 } 2483 2484 static void 2485 wg_update_endpoint_if_necessary(struct wg_peer *wgp, 2486 const struct sockaddr *src) 2487 { 2488 struct wg_sockaddr *wgsa; 2489 struct psref psref; 2490 2491 wgsa = wg_get_endpoint_sa(wgp, &psref); 2492 2493 #ifdef WG_DEBUG_LOG 2494 char oldaddr[128], newaddr[128]; 2495 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr)); 2496 sockaddr_format(src, newaddr, sizeof(newaddr)); 2497 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr); 2498 #endif 2499 2500 /* 2501 * III: "Since the packet has authenticated correctly, the source IP of 2502 * the outer UDP/IP packet is used to update the endpoint for peer..." 2503 */ 2504 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 || 2505 !sockaddr_port_match(src, wgsatosa(wgsa)))) { 2506 /* XXX We can't change the endpoint twice in a short period */ 2507 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) { 2508 wg_change_endpoint(wgp, src); 2509 } 2510 } 2511 2512 wg_put_sa(wgp, wgsa, &psref); 2513 } 2514 2515 static void __noinline 2516 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m, 2517 const struct sockaddr *src) 2518 { 2519 struct wg_msg_data *wgmd; 2520 char *encrypted_buf = NULL, *decrypted_buf; 2521 size_t encrypted_len, decrypted_len; 2522 struct wg_session *wgs; 2523 struct wg_peer *wgp; 2524 int state; 2525 size_t mlen; 2526 struct psref psref; 2527 int error, af; 2528 bool success, free_encrypted_buf = false, ok; 2529 struct mbuf *n; 2530 2531 KASSERT(m->m_len >= sizeof(struct wg_msg_data)); 2532 wgmd = mtod(m, struct wg_msg_data *); 2533 2534 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA)); 2535 WG_TRACE("data"); 2536 2537 /* Find the putative session, or drop. */ 2538 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref); 2539 if (wgs == NULL) { 2540 WG_TRACE("No session found"); 2541 m_freem(m); 2542 return; 2543 } 2544 2545 /* 2546 * We are only ready to handle data when in INIT_PASSIVE, 2547 * ESTABLISHED, or DESTROYING. All transitions out of that 2548 * state dissociate the session index and drain psrefs. 2549 */ 2550 state = atomic_load_relaxed(&wgs->wgs_state); 2551 switch (state) { 2552 case WGS_STATE_UNKNOWN: 2553 panic("wg session %p in unknown state has session index %u", 2554 wgs, wgmd->wgmd_receiver); 2555 case WGS_STATE_INIT_ACTIVE: 2556 WG_TRACE("not yet ready for data"); 2557 goto out; 2558 case WGS_STATE_INIT_PASSIVE: 2559 case WGS_STATE_ESTABLISHED: 2560 case WGS_STATE_DESTROYING: 2561 break; 2562 } 2563 2564 /* 2565 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and 2566 * to update the endpoint if authentication succeeds. 2567 */ 2568 wgp = wgs->wgs_peer; 2569 2570 /* 2571 * Reject outrageously wrong sequence numbers before doing any 2572 * crypto work or taking any locks. 2573 */ 2574 error = sliwin_check_fast(&wgs->wgs_recvwin->window, 2575 le64toh(wgmd->wgmd_counter)); 2576 if (error) { 2577 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2578 "out-of-window packet: %"PRIu64"\n", 2579 le64toh(wgmd->wgmd_counter)); 2580 goto out; 2581 } 2582 2583 /* Ensure the payload and authenticator are contiguous. */ 2584 mlen = m_length(m); 2585 encrypted_len = mlen - sizeof(*wgmd); 2586 if (encrypted_len < WG_AUTHTAG_LEN) { 2587 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len); 2588 goto out; 2589 } 2590 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len); 2591 if (success) { 2592 encrypted_buf = mtod(m, char *) + sizeof(*wgmd); 2593 } else { 2594 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP); 2595 if (encrypted_buf == NULL) { 2596 WG_DLOG("failed to allocate encrypted_buf\n"); 2597 goto out; 2598 } 2599 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf); 2600 free_encrypted_buf = true; 2601 } 2602 /* m_ensure_contig may change m regardless of its result */ 2603 KASSERT(m->m_len >= sizeof(*wgmd)); 2604 wgmd = mtod(m, struct wg_msg_data *); 2605 2606 /* 2607 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid 2608 * a zero-length buffer (XXX). Drop if plaintext is longer 2609 * than MCLBYTES (XXX). 2610 */ 2611 decrypted_len = encrypted_len - WG_AUTHTAG_LEN; 2612 if (decrypted_len > MCLBYTES) { 2613 /* FIXME handle larger data than MCLBYTES */ 2614 WG_DLOG("couldn't handle larger data than MCLBYTES\n"); 2615 goto out; 2616 } 2617 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN); 2618 if (n == NULL) { 2619 WG_DLOG("wg_get_mbuf failed\n"); 2620 goto out; 2621 } 2622 decrypted_buf = mtod(n, char *); 2623 2624 /* Decrypt and verify the packet. */ 2625 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len); 2626 error = wg_algo_aead_dec(decrypted_buf, 2627 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */, 2628 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf, 2629 encrypted_len, NULL, 0); 2630 if (error != 0) { 2631 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2632 "failed to wg_algo_aead_dec\n"); 2633 m_freem(n); 2634 goto out; 2635 } 2636 WG_DLOG("outsize=%u\n", (u_int)decrypted_len); 2637 2638 /* Packet is genuine. Reject it if a replay or just too old. */ 2639 mutex_enter(&wgs->wgs_recvwin->lock); 2640 error = sliwin_update(&wgs->wgs_recvwin->window, 2641 le64toh(wgmd->wgmd_counter)); 2642 mutex_exit(&wgs->wgs_recvwin->lock); 2643 if (error) { 2644 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2645 "replay or out-of-window packet: %"PRIu64"\n", 2646 le64toh(wgmd->wgmd_counter)); 2647 m_freem(n); 2648 goto out; 2649 } 2650 2651 /* We're done with m now; free it and chuck the pointers. */ 2652 m_freem(m); 2653 m = NULL; 2654 wgmd = NULL; 2655 2656 /* 2657 * Validate the encapsulated packet header and get the address 2658 * family, or drop. 2659 */ 2660 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af); 2661 if (!ok) { 2662 m_freem(n); 2663 goto out; 2664 } 2665 2666 /* 2667 * The packet is genuine. Update the peer's endpoint if the 2668 * source address changed. 2669 * 2670 * XXX How to prevent DoS by replaying genuine packets from the 2671 * wrong source address? 2672 */ 2673 wg_update_endpoint_if_necessary(wgp, src); 2674 2675 /* Submit it into our network stack if routable. */ 2676 ok = wg_validate_route(wg, wgp, af, decrypted_buf); 2677 if (ok) { 2678 wg->wg_ops->input(&wg->wg_if, n, af); 2679 } else { 2680 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2681 "invalid source address\n"); 2682 m_freem(n); 2683 /* 2684 * The inner address is invalid however the session is valid 2685 * so continue the session processing below. 2686 */ 2687 } 2688 n = NULL; 2689 2690 /* Update the state machine if necessary. */ 2691 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) { 2692 /* 2693 * We were waiting for the initiator to send their 2694 * first data transport message, and that has happened. 2695 * Schedule a task to establish this session. 2696 */ 2697 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION); 2698 } else { 2699 if (__predict_false(wg_need_to_send_init_message(wgs))) { 2700 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 2701 } 2702 /* 2703 * [W] 6.5 Passive Keepalive 2704 * "If a peer has received a validly-authenticated transport 2705 * data message (section 5.4.6), but does not have any packets 2706 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends 2707 * a keepalive message." 2708 */ 2709 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n", 2710 (uintmax_t)time_uptime, 2711 (uintmax_t)wgs->wgs_time_last_data_sent); 2712 if ((time_uptime - wgs->wgs_time_last_data_sent) >= 2713 wg_keepalive_timeout) { 2714 WG_TRACE("Schedule sending keepalive message"); 2715 /* 2716 * We can't send a keepalive message here to avoid 2717 * a deadlock; we already hold the solock of a socket 2718 * that is used to send the message. 2719 */ 2720 wg_schedule_peer_task(wgp, 2721 WGP_TASK_SEND_KEEPALIVE_MESSAGE); 2722 } 2723 } 2724 out: 2725 wg_put_session(wgs, &psref); 2726 if (m != NULL) 2727 m_freem(m); 2728 if (free_encrypted_buf) 2729 kmem_intr_free(encrypted_buf, encrypted_len); 2730 } 2731 2732 static void __noinline 2733 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc) 2734 { 2735 struct wg_session *wgs; 2736 struct wg_peer *wgp; 2737 struct psref psref; 2738 int error; 2739 uint8_t key[WG_HASH_LEN]; 2740 uint8_t cookie[WG_COOKIE_LEN]; 2741 2742 WG_TRACE("cookie msg received"); 2743 2744 /* Find the putative session. */ 2745 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref); 2746 if (wgs == NULL) { 2747 WG_TRACE("No session found"); 2748 return; 2749 } 2750 2751 /* Lock the peer so we can update the cookie state. */ 2752 wgp = wgs->wgs_peer; 2753 mutex_enter(wgp->wgp_lock); 2754 2755 if (!wgp->wgp_last_sent_mac1_valid) { 2756 WG_TRACE("No valid mac1 sent (or expired)"); 2757 goto out; 2758 } 2759 2760 /* Decrypt the cookie and store it for later handshake retry. */ 2761 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey, 2762 sizeof(wgp->wgp_pubkey)); 2763 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key, 2764 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), 2765 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1), 2766 wgmc->wgmc_salt); 2767 if (error != 0) { 2768 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2769 "wg_algo_aead_dec for cookie failed: error=%d\n", error); 2770 goto out; 2771 } 2772 /* 2773 * [W] 6.6: Interaction with Cookie Reply System 2774 * "it should simply store the decrypted cookie value from the cookie 2775 * reply message, and wait for the expiration of the REKEY-TIMEOUT 2776 * timer for retrying a handshake initiation message." 2777 */ 2778 wgp->wgp_latest_cookie_time = time_uptime; 2779 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie)); 2780 out: 2781 mutex_exit(wgp->wgp_lock); 2782 wg_put_session(wgs, &psref); 2783 } 2784 2785 static struct mbuf * 2786 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m) 2787 { 2788 struct wg_msg wgm; 2789 size_t mbuflen; 2790 size_t msglen; 2791 2792 /* 2793 * Get the mbuf chain length. It is already guaranteed, by 2794 * wg_overudp_cb, to be large enough for a struct wg_msg. 2795 */ 2796 mbuflen = m_length(m); 2797 KASSERT(mbuflen >= sizeof(struct wg_msg)); 2798 2799 /* 2800 * Copy the message header (32-bit message type) out -- we'll 2801 * worry about contiguity and alignment later. 2802 */ 2803 m_copydata(m, 0, sizeof(wgm), &wgm); 2804 switch (le32toh(wgm.wgm_type)) { 2805 case WG_MSG_TYPE_INIT: 2806 msglen = sizeof(struct wg_msg_init); 2807 break; 2808 case WG_MSG_TYPE_RESP: 2809 msglen = sizeof(struct wg_msg_resp); 2810 break; 2811 case WG_MSG_TYPE_COOKIE: 2812 msglen = sizeof(struct wg_msg_cookie); 2813 break; 2814 case WG_MSG_TYPE_DATA: 2815 msglen = sizeof(struct wg_msg_data); 2816 break; 2817 default: 2818 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 2819 "Unexpected msg type: %u\n", le32toh(wgm.wgm_type)); 2820 goto error; 2821 } 2822 2823 /* Verify the mbuf chain is long enough for this type of message. */ 2824 if (__predict_false(mbuflen < msglen)) { 2825 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen, 2826 le32toh(wgm.wgm_type)); 2827 goto error; 2828 } 2829 2830 /* Make the message header contiguous if necessary. */ 2831 if (__predict_false(m->m_len < msglen)) { 2832 m = m_pullup(m, msglen); 2833 if (m == NULL) 2834 return NULL; 2835 } 2836 2837 return m; 2838 2839 error: 2840 m_freem(m); 2841 return NULL; 2842 } 2843 2844 static void 2845 wg_handle_packet(struct wg_softc *wg, struct mbuf *m, 2846 const struct sockaddr *src) 2847 { 2848 struct wg_msg *wgm; 2849 2850 m = wg_validate_msg_header(wg, m); 2851 if (__predict_false(m == NULL)) 2852 return; 2853 2854 KASSERT(m->m_len >= sizeof(struct wg_msg)); 2855 wgm = mtod(m, struct wg_msg *); 2856 switch (le32toh(wgm->wgm_type)) { 2857 case WG_MSG_TYPE_INIT: 2858 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src); 2859 break; 2860 case WG_MSG_TYPE_RESP: 2861 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src); 2862 break; 2863 case WG_MSG_TYPE_COOKIE: 2864 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm); 2865 break; 2866 case WG_MSG_TYPE_DATA: 2867 wg_handle_msg_data(wg, m, src); 2868 /* wg_handle_msg_data frees m for us */ 2869 return; 2870 default: 2871 panic("invalid message type: %d", le32toh(wgm->wgm_type)); 2872 } 2873 2874 m_freem(m); 2875 } 2876 2877 static void 2878 wg_receive_packets(struct wg_softc *wg, const int af) 2879 { 2880 2881 for (;;) { 2882 int error, flags; 2883 struct socket *so; 2884 struct mbuf *m = NULL; 2885 struct uio dummy_uio; 2886 struct mbuf *paddr = NULL; 2887 struct sockaddr *src; 2888 2889 so = wg_get_so_by_af(wg, af); 2890 flags = MSG_DONTWAIT; 2891 dummy_uio.uio_resid = 1000000000; 2892 2893 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL, 2894 &flags); 2895 if (error || m == NULL) { 2896 //if (error == EWOULDBLOCK) 2897 return; 2898 } 2899 2900 KASSERT(paddr != NULL); 2901 KASSERT(paddr->m_len >= sizeof(struct sockaddr)); 2902 src = mtod(paddr, struct sockaddr *); 2903 2904 wg_handle_packet(wg, m, src); 2905 } 2906 } 2907 2908 static void 2909 wg_get_peer(struct wg_peer *wgp, struct psref *psref) 2910 { 2911 2912 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class); 2913 } 2914 2915 static void 2916 wg_put_peer(struct wg_peer *wgp, struct psref *psref) 2917 { 2918 2919 psref_release(psref, &wgp->wgp_psref, wg_psref_class); 2920 } 2921 2922 static void 2923 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp) 2924 { 2925 struct wg_session *wgs; 2926 2927 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE"); 2928 2929 KASSERT(mutex_owned(wgp->wgp_lock)); 2930 2931 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) { 2932 WGLOG(LOG_DEBUG, "No endpoint available\n"); 2933 /* XXX should do something? */ 2934 return; 2935 } 2936 2937 wgs = wgp->wgp_session_stable; 2938 if (wgs->wgs_state == WGS_STATE_UNKNOWN) { 2939 /* XXX What if the unstable session is already INIT_ACTIVE? */ 2940 wg_send_handshake_msg_init(wg, wgp); 2941 } else { 2942 /* rekey */ 2943 wgs = wgp->wgp_session_unstable; 2944 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2945 wg_send_handshake_msg_init(wg, wgp); 2946 } 2947 } 2948 2949 static void 2950 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp) 2951 { 2952 struct wg_session *wgs; 2953 2954 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE"); 2955 2956 KASSERT(mutex_owned(wgp->wgp_lock)); 2957 KASSERT(wgp->wgp_handshake_start_time != 0); 2958 2959 wgs = wgp->wgp_session_unstable; 2960 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2961 return; 2962 2963 /* 2964 * XXX no real need to assign a new index here, but we do need 2965 * to transition to UNKNOWN temporarily 2966 */ 2967 wg_put_session_index(wg, wgs); 2968 2969 /* [W] 6.4 Handshake Initiation Retransmission */ 2970 if ((time_uptime - wgp->wgp_handshake_start_time) > 2971 wg_rekey_attempt_time) { 2972 /* Give up handshaking */ 2973 wgp->wgp_handshake_start_time = 0; 2974 WG_TRACE("give up"); 2975 2976 /* 2977 * If a new data packet comes, handshaking will be retried 2978 * and a new session would be established at that time, 2979 * however we don't want to send pending packets then. 2980 */ 2981 wg_purge_pending_packets(wgp); 2982 return; 2983 } 2984 2985 wg_task_send_init_message(wg, wgp); 2986 } 2987 2988 static void 2989 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp) 2990 { 2991 struct wg_session *wgs, *wgs_prev; 2992 struct mbuf *m; 2993 2994 KASSERT(mutex_owned(wgp->wgp_lock)); 2995 2996 wgs = wgp->wgp_session_unstable; 2997 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE) 2998 /* XXX Can this happen? */ 2999 return; 3000 3001 wgs->wgs_state = WGS_STATE_ESTABLISHED; 3002 wgs->wgs_time_established = time_uptime; 3003 wgs->wgs_time_last_data_sent = 0; 3004 wgs->wgs_is_initiator = false; 3005 WG_TRACE("WGS_STATE_ESTABLISHED"); 3006 3007 wg_swap_sessions(wgp); 3008 KASSERT(wgs == wgp->wgp_session_stable); 3009 wgs_prev = wgp->wgp_session_unstable; 3010 getnanotime(&wgp->wgp_last_handshake_time); 3011 wgp->wgp_handshake_start_time = 0; 3012 wgp->wgp_last_sent_mac1_valid = false; 3013 wgp->wgp_last_sent_cookie_valid = false; 3014 3015 /* If we had a data packet queued up, send it. */ 3016 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 3017 kpreempt_disable(); 3018 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 3019 M_SETCTX(m, wgp); 3020 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 3021 WGLOG(LOG_ERR, "pktq full, dropping\n"); 3022 m_freem(m); 3023 } 3024 kpreempt_enable(); 3025 } 3026 3027 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 3028 /* Wait for wg_get_stable_session to drain. */ 3029 pserialize_perform(wgp->wgp_psz); 3030 3031 /* Transition ESTABLISHED->DESTROYING. */ 3032 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 3033 3034 /* We can't destroy the old session immediately */ 3035 wg_schedule_session_dtor_timer(wgp); 3036 } else { 3037 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 3038 "state=%d", wgs_prev->wgs_state); 3039 wg_clear_states(wgs_prev); 3040 wgs_prev->wgs_state = WGS_STATE_UNKNOWN; 3041 } 3042 } 3043 3044 static void 3045 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp) 3046 { 3047 3048 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED"); 3049 3050 KASSERT(mutex_owned(wgp->wgp_lock)); 3051 3052 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) { 3053 pserialize_perform(wgp->wgp_psz); 3054 mutex_exit(wgp->wgp_lock); 3055 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, 3056 wg_psref_class); 3057 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, 3058 wg_psref_class); 3059 mutex_enter(wgp->wgp_lock); 3060 atomic_store_release(&wgp->wgp_endpoint_changing, 0); 3061 } 3062 } 3063 3064 static void 3065 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp) 3066 { 3067 struct wg_session *wgs; 3068 3069 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE"); 3070 3071 KASSERT(mutex_owned(wgp->wgp_lock)); 3072 3073 wgs = wgp->wgp_session_stable; 3074 if (wgs->wgs_state != WGS_STATE_ESTABLISHED) 3075 return; 3076 3077 wg_send_keepalive_msg(wgp, wgs); 3078 } 3079 3080 static void 3081 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp) 3082 { 3083 struct wg_session *wgs; 3084 3085 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION"); 3086 3087 KASSERT(mutex_owned(wgp->wgp_lock)); 3088 3089 wgs = wgp->wgp_session_unstable; 3090 if (wgs->wgs_state == WGS_STATE_DESTROYING) { 3091 wg_put_session_index(wg, wgs); 3092 } 3093 } 3094 3095 static void 3096 wg_peer_work(struct work *wk, void *cookie) 3097 { 3098 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work); 3099 struct wg_softc *wg = wgp->wgp_sc; 3100 unsigned int tasks; 3101 3102 mutex_enter(wgp->wgp_intr_lock); 3103 while ((tasks = wgp->wgp_tasks) != 0) { 3104 wgp->wgp_tasks = 0; 3105 mutex_exit(wgp->wgp_intr_lock); 3106 3107 mutex_enter(wgp->wgp_lock); 3108 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE)) 3109 wg_task_send_init_message(wg, wgp); 3110 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE)) 3111 wg_task_retry_handshake(wg, wgp); 3112 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION)) 3113 wg_task_establish_session(wg, wgp); 3114 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED)) 3115 wg_task_endpoint_changed(wg, wgp); 3116 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE)) 3117 wg_task_send_keepalive_message(wg, wgp); 3118 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION)) 3119 wg_task_destroy_prev_session(wg, wgp); 3120 mutex_exit(wgp->wgp_lock); 3121 3122 mutex_enter(wgp->wgp_intr_lock); 3123 } 3124 mutex_exit(wgp->wgp_intr_lock); 3125 } 3126 3127 static void 3128 wg_job(struct threadpool_job *job) 3129 { 3130 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job); 3131 int bound, upcalls; 3132 3133 mutex_enter(wg->wg_intr_lock); 3134 while ((upcalls = wg->wg_upcalls) != 0) { 3135 wg->wg_upcalls = 0; 3136 mutex_exit(wg->wg_intr_lock); 3137 bound = curlwp_bind(); 3138 if (ISSET(upcalls, WG_UPCALL_INET)) 3139 wg_receive_packets(wg, AF_INET); 3140 if (ISSET(upcalls, WG_UPCALL_INET6)) 3141 wg_receive_packets(wg, AF_INET6); 3142 curlwp_bindx(bound); 3143 mutex_enter(wg->wg_intr_lock); 3144 } 3145 threadpool_job_done(job); 3146 mutex_exit(wg->wg_intr_lock); 3147 } 3148 3149 static int 3150 wg_bind_port(struct wg_softc *wg, const uint16_t port) 3151 { 3152 int error; 3153 uint16_t old_port = wg->wg_listen_port; 3154 3155 if (port != 0 && old_port == port) 3156 return 0; 3157 3158 struct sockaddr_in _sin, *sin = &_sin; 3159 sin->sin_len = sizeof(*sin); 3160 sin->sin_family = AF_INET; 3161 sin->sin_addr.s_addr = INADDR_ANY; 3162 sin->sin_port = htons(port); 3163 3164 error = sobind(wg->wg_so4, sintosa(sin), curlwp); 3165 if (error != 0) 3166 return error; 3167 3168 #ifdef INET6 3169 struct sockaddr_in6 _sin6, *sin6 = &_sin6; 3170 sin6->sin6_len = sizeof(*sin6); 3171 sin6->sin6_family = AF_INET6; 3172 sin6->sin6_addr = in6addr_any; 3173 sin6->sin6_port = htons(port); 3174 3175 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp); 3176 if (error != 0) 3177 return error; 3178 #endif 3179 3180 wg->wg_listen_port = port; 3181 3182 return 0; 3183 } 3184 3185 static void 3186 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag) 3187 { 3188 struct wg_softc *wg = cookie; 3189 int reason; 3190 3191 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ? 3192 WG_UPCALL_INET : 3193 WG_UPCALL_INET6; 3194 3195 mutex_enter(wg->wg_intr_lock); 3196 wg->wg_upcalls |= reason; 3197 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job); 3198 mutex_exit(wg->wg_intr_lock); 3199 } 3200 3201 static int 3202 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so, 3203 struct sockaddr *src, void *arg) 3204 { 3205 struct wg_softc *wg = arg; 3206 struct wg_msg wgm; 3207 struct mbuf *m = *mp; 3208 3209 WG_TRACE("enter"); 3210 3211 /* Verify the mbuf chain is long enough to have a wg msg header. */ 3212 KASSERT(offset <= m_length(m)); 3213 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) { 3214 /* drop on the floor */ 3215 m_freem(m); 3216 return -1; 3217 } 3218 3219 /* 3220 * Copy the message header (32-bit message type) out -- we'll 3221 * worry about contiguity and alignment later. 3222 */ 3223 m_copydata(m, offset, sizeof(struct wg_msg), &wgm); 3224 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type)); 3225 3226 /* 3227 * Handle DATA packets promptly as they arrive. Other packets 3228 * may require expensive public-key crypto and are not as 3229 * sensitive to latency, so defer them to the worker thread. 3230 */ 3231 switch (le32toh(wgm.wgm_type)) { 3232 case WG_MSG_TYPE_DATA: 3233 /* handle immediately */ 3234 m_adj(m, offset); 3235 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) { 3236 m = m_pullup(m, sizeof(struct wg_msg_data)); 3237 if (m == NULL) 3238 return -1; 3239 } 3240 wg_handle_msg_data(wg, m, src); 3241 *mp = NULL; 3242 return 1; 3243 case WG_MSG_TYPE_INIT: 3244 case WG_MSG_TYPE_RESP: 3245 case WG_MSG_TYPE_COOKIE: 3246 /* pass through to so_receive in wg_receive_packets */ 3247 return 0; 3248 default: 3249 /* drop on the floor */ 3250 m_freem(m); 3251 return -1; 3252 } 3253 } 3254 3255 static int 3256 wg_socreate(struct wg_softc *wg, int af, struct socket **sop) 3257 { 3258 int error; 3259 struct socket *so; 3260 3261 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL); 3262 if (error != 0) 3263 return error; 3264 3265 solock(so); 3266 so->so_upcallarg = wg; 3267 so->so_upcall = wg_so_upcall; 3268 so->so_rcv.sb_flags |= SB_UPCALL; 3269 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg); 3270 sounlock(so); 3271 3272 *sop = so; 3273 3274 return 0; 3275 } 3276 3277 static bool 3278 wg_session_hit_limits(struct wg_session *wgs) 3279 { 3280 3281 /* 3282 * [W] 6.2: Transport Message Limits 3283 * "After REJECT-AFTER-MESSAGES transport data messages or after the 3284 * current secure session is REJECT-AFTER-TIME seconds old, whichever 3285 * comes first, WireGuard will refuse to send any more transport data 3286 * messages using the current secure session, ..." 3287 */ 3288 KASSERT(wgs->wgs_time_established != 0); 3289 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) { 3290 WG_DLOG("The session hits REJECT_AFTER_TIME\n"); 3291 return true; 3292 } else if (wg_session_get_send_counter(wgs) > 3293 wg_reject_after_messages) { 3294 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n"); 3295 return true; 3296 } 3297 3298 return false; 3299 } 3300 3301 static void 3302 wgintr(void *cookie) 3303 { 3304 struct wg_peer *wgp; 3305 struct wg_session *wgs; 3306 struct mbuf *m; 3307 struct psref psref; 3308 3309 while ((m = pktq_dequeue(wg_pktq)) != NULL) { 3310 wgp = M_GETCTX(m, struct wg_peer *); 3311 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) { 3312 WG_TRACE("no stable session"); 3313 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3314 goto next0; 3315 } 3316 if (__predict_false(wg_session_hit_limits(wgs))) { 3317 WG_TRACE("stable session hit limits"); 3318 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3319 goto next1; 3320 } 3321 wg_send_data_msg(wgp, wgs, m); 3322 m = NULL; /* consumed */ 3323 next1: wg_put_session(wgs, &psref); 3324 next0: if (m) 3325 m_freem(m); 3326 /* XXX Yield to avoid userland starvation? */ 3327 } 3328 } 3329 3330 static void 3331 wg_rekey_timer(void *arg) 3332 { 3333 struct wg_peer *wgp = arg; 3334 3335 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3336 } 3337 3338 static void 3339 wg_purge_pending_packets(struct wg_peer *wgp) 3340 { 3341 struct mbuf *m; 3342 3343 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 3344 m_freem(m); 3345 pktq_barrier(wg_pktq); 3346 } 3347 3348 static void 3349 wg_handshake_timeout_timer(void *arg) 3350 { 3351 struct wg_peer *wgp = arg; 3352 3353 WG_TRACE("enter"); 3354 3355 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE); 3356 } 3357 3358 static struct wg_peer * 3359 wg_alloc_peer(struct wg_softc *wg) 3360 { 3361 struct wg_peer *wgp; 3362 3363 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP); 3364 3365 wgp->wgp_sc = wg; 3366 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE); 3367 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp); 3368 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE); 3369 callout_setfunc(&wgp->wgp_handshake_timeout_timer, 3370 wg_handshake_timeout_timer, wgp); 3371 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE); 3372 callout_setfunc(&wgp->wgp_session_dtor_timer, 3373 wg_session_dtor_timer, wgp); 3374 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry); 3375 wgp->wgp_endpoint_changing = false; 3376 wgp->wgp_endpoint_available = false; 3377 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3378 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3379 wgp->wgp_psz = pserialize_create(); 3380 psref_target_init(&wgp->wgp_psref, wg_psref_class); 3381 3382 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP); 3383 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP); 3384 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3385 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3386 3387 struct wg_session *wgs; 3388 wgp->wgp_session_stable = 3389 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP); 3390 wgp->wgp_session_unstable = 3391 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP); 3392 wgs = wgp->wgp_session_stable; 3393 wgs->wgs_peer = wgp; 3394 wgs->wgs_state = WGS_STATE_UNKNOWN; 3395 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3396 #ifndef __HAVE_ATOMIC64_LOADSTORE 3397 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3398 #endif 3399 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3400 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3401 3402 wgs = wgp->wgp_session_unstable; 3403 wgs->wgs_peer = wgp; 3404 wgs->wgs_state = WGS_STATE_UNKNOWN; 3405 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3406 #ifndef __HAVE_ATOMIC64_LOADSTORE 3407 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3408 #endif 3409 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3410 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3411 3412 return wgp; 3413 } 3414 3415 static void 3416 wg_destroy_peer(struct wg_peer *wgp) 3417 { 3418 struct wg_session *wgs; 3419 struct wg_softc *wg = wgp->wgp_sc; 3420 3421 /* Prevent new packets from this peer on any source address. */ 3422 rw_enter(wg->wg_rwlock, RW_WRITER); 3423 for (int i = 0; i < wgp->wgp_n_allowedips; i++) { 3424 struct wg_allowedip *wga = &wgp->wgp_allowedips[i]; 3425 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family); 3426 struct radix_node *rn; 3427 3428 KASSERT(rnh != NULL); 3429 rn = rnh->rnh_deladdr(&wga->wga_sa_addr, 3430 &wga->wga_sa_mask, rnh); 3431 if (rn == NULL) { 3432 char addrstr[128]; 3433 sockaddr_format(&wga->wga_sa_addr, addrstr, 3434 sizeof(addrstr)); 3435 WGLOG(LOG_WARNING, "Couldn't delete %s", addrstr); 3436 } 3437 } 3438 rw_exit(wg->wg_rwlock); 3439 3440 /* Purge pending packets. */ 3441 wg_purge_pending_packets(wgp); 3442 3443 /* Halt all packet processing and timeouts. */ 3444 callout_halt(&wgp->wgp_rekey_timer, NULL); 3445 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL); 3446 callout_halt(&wgp->wgp_session_dtor_timer, NULL); 3447 3448 /* Wait for any queued work to complete. */ 3449 workqueue_wait(wg_wq, &wgp->wgp_work); 3450 3451 wgs = wgp->wgp_session_unstable; 3452 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3453 mutex_enter(wgp->wgp_lock); 3454 wg_destroy_session(wg, wgs); 3455 mutex_exit(wgp->wgp_lock); 3456 } 3457 mutex_destroy(&wgs->wgs_recvwin->lock); 3458 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3459 #ifndef __HAVE_ATOMIC64_LOADSTORE 3460 mutex_destroy(&wgs->wgs_send_counter_lock); 3461 #endif 3462 kmem_free(wgs, sizeof(*wgs)); 3463 3464 wgs = wgp->wgp_session_stable; 3465 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3466 mutex_enter(wgp->wgp_lock); 3467 wg_destroy_session(wg, wgs); 3468 mutex_exit(wgp->wgp_lock); 3469 } 3470 mutex_destroy(&wgs->wgs_recvwin->lock); 3471 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3472 #ifndef __HAVE_ATOMIC64_LOADSTORE 3473 mutex_destroy(&wgs->wgs_send_counter_lock); 3474 #endif 3475 kmem_free(wgs, sizeof(*wgs)); 3476 3477 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3478 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3479 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint)); 3480 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0)); 3481 3482 pserialize_destroy(wgp->wgp_psz); 3483 mutex_obj_free(wgp->wgp_intr_lock); 3484 mutex_obj_free(wgp->wgp_lock); 3485 3486 kmem_free(wgp, sizeof(*wgp)); 3487 } 3488 3489 static void 3490 wg_destroy_all_peers(struct wg_softc *wg) 3491 { 3492 struct wg_peer *wgp, *wgp0 __diagused; 3493 void *garbage_byname, *garbage_bypubkey; 3494 3495 restart: 3496 garbage_byname = garbage_bypubkey = NULL; 3497 mutex_enter(wg->wg_lock); 3498 WG_PEER_WRITER_FOREACH(wgp, wg) { 3499 if (wgp->wgp_name[0]) { 3500 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name, 3501 strlen(wgp->wgp_name)); 3502 KASSERT(wgp0 == wgp); 3503 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3504 } 3505 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3506 sizeof(wgp->wgp_pubkey)); 3507 KASSERT(wgp0 == wgp); 3508 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3509 WG_PEER_WRITER_REMOVE(wgp); 3510 wg->wg_npeers--; 3511 mutex_enter(wgp->wgp_lock); 3512 pserialize_perform(wgp->wgp_psz); 3513 mutex_exit(wgp->wgp_lock); 3514 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3515 break; 3516 } 3517 mutex_exit(wg->wg_lock); 3518 3519 if (wgp == NULL) 3520 return; 3521 3522 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3523 3524 wg_destroy_peer(wgp); 3525 thmap_gc(wg->wg_peers_byname, garbage_byname); 3526 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3527 3528 goto restart; 3529 } 3530 3531 static int 3532 wg_destroy_peer_name(struct wg_softc *wg, const char *name) 3533 { 3534 struct wg_peer *wgp, *wgp0 __diagused; 3535 void *garbage_byname, *garbage_bypubkey; 3536 3537 mutex_enter(wg->wg_lock); 3538 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name)); 3539 if (wgp != NULL) { 3540 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3541 sizeof(wgp->wgp_pubkey)); 3542 KASSERT(wgp0 == wgp); 3543 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3544 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3545 WG_PEER_WRITER_REMOVE(wgp); 3546 wg->wg_npeers--; 3547 if (wg->wg_npeers == 0) 3548 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN); 3549 mutex_enter(wgp->wgp_lock); 3550 pserialize_perform(wgp->wgp_psz); 3551 mutex_exit(wgp->wgp_lock); 3552 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3553 } 3554 mutex_exit(wg->wg_lock); 3555 3556 if (wgp == NULL) 3557 return ENOENT; 3558 3559 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3560 3561 wg_destroy_peer(wgp); 3562 thmap_gc(wg->wg_peers_byname, garbage_byname); 3563 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3564 3565 return 0; 3566 } 3567 3568 static int 3569 wg_if_attach(struct wg_softc *wg) 3570 { 3571 3572 wg->wg_if.if_addrlen = 0; 3573 wg->wg_if.if_mtu = WG_MTU; 3574 wg->wg_if.if_flags = IFF_MULTICAST; 3575 wg->wg_if.if_extflags = IFEF_MPSAFE; 3576 wg->wg_if.if_ioctl = wg_ioctl; 3577 wg->wg_if.if_output = wg_output; 3578 wg->wg_if.if_init = wg_init; 3579 #ifdef ALTQ 3580 wg->wg_if.if_start = wg_start; 3581 #endif 3582 wg->wg_if.if_stop = wg_stop; 3583 wg->wg_if.if_type = IFT_OTHER; 3584 wg->wg_if.if_dlt = DLT_NULL; 3585 wg->wg_if.if_softc = wg; 3586 #ifdef ALTQ 3587 IFQ_SET_READY(&wg->wg_if.if_snd); 3588 #endif 3589 if_initialize(&wg->wg_if); 3590 3591 wg->wg_if.if_link_state = LINK_STATE_DOWN; 3592 if_alloc_sadl(&wg->wg_if); 3593 if_register(&wg->wg_if); 3594 3595 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t)); 3596 3597 return 0; 3598 } 3599 3600 static void 3601 wg_if_detach(struct wg_softc *wg) 3602 { 3603 struct ifnet *ifp = &wg->wg_if; 3604 3605 bpf_detach(ifp); 3606 if_detach(ifp); 3607 } 3608 3609 static int 3610 wg_clone_create(struct if_clone *ifc, int unit) 3611 { 3612 struct wg_softc *wg; 3613 int error; 3614 3615 wg_guarantee_initialized(); 3616 3617 error = wg_count_inc(); 3618 if (error) 3619 return error; 3620 3621 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP); 3622 3623 if_initname(&wg->wg_if, ifc->ifc_name, unit); 3624 3625 PSLIST_INIT(&wg->wg_peers); 3626 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY); 3627 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY); 3628 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY); 3629 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3630 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3631 wg->wg_rwlock = rw_obj_alloc(); 3632 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock, 3633 "%s", if_name(&wg->wg_if)); 3634 wg->wg_ops = &wg_ops_rumpkernel; 3635 3636 error = threadpool_get(&wg->wg_threadpool, PRI_NONE); 3637 if (error) 3638 goto fail0; 3639 3640 #ifdef INET 3641 error = wg_socreate(wg, AF_INET, &wg->wg_so4); 3642 if (error) 3643 goto fail1; 3644 rn_inithead((void **)&wg->wg_rtable_ipv4, 3645 offsetof(struct sockaddr_in, sin_addr) * NBBY); 3646 #endif 3647 #ifdef INET6 3648 error = wg_socreate(wg, AF_INET6, &wg->wg_so6); 3649 if (error) 3650 goto fail2; 3651 rn_inithead((void **)&wg->wg_rtable_ipv6, 3652 offsetof(struct sockaddr_in6, sin6_addr) * NBBY); 3653 #endif 3654 3655 error = wg_if_attach(wg); 3656 if (error) 3657 goto fail3; 3658 3659 return 0; 3660 3661 fail4: __unused 3662 wg_if_detach(wg); 3663 fail3: wg_destroy_all_peers(wg); 3664 #ifdef INET6 3665 solock(wg->wg_so6); 3666 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3667 sounlock(wg->wg_so6); 3668 #endif 3669 #ifdef INET 3670 solock(wg->wg_so4); 3671 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3672 sounlock(wg->wg_so4); 3673 #endif 3674 mutex_enter(wg->wg_intr_lock); 3675 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3676 mutex_exit(wg->wg_intr_lock); 3677 #ifdef INET6 3678 if (wg->wg_rtable_ipv6 != NULL) 3679 free(wg->wg_rtable_ipv6, M_RTABLE); 3680 soclose(wg->wg_so6); 3681 fail2: 3682 #endif 3683 #ifdef INET 3684 if (wg->wg_rtable_ipv4 != NULL) 3685 free(wg->wg_rtable_ipv4, M_RTABLE); 3686 soclose(wg->wg_so4); 3687 fail1: 3688 #endif 3689 threadpool_put(wg->wg_threadpool, PRI_NONE); 3690 fail0: threadpool_job_destroy(&wg->wg_job); 3691 rw_obj_free(wg->wg_rwlock); 3692 mutex_obj_free(wg->wg_intr_lock); 3693 mutex_obj_free(wg->wg_lock); 3694 thmap_destroy(wg->wg_sessions_byindex); 3695 thmap_destroy(wg->wg_peers_byname); 3696 thmap_destroy(wg->wg_peers_bypubkey); 3697 PSLIST_DESTROY(&wg->wg_peers); 3698 kmem_free(wg, sizeof(*wg)); 3699 wg_count_dec(); 3700 return error; 3701 } 3702 3703 static int 3704 wg_clone_destroy(struct ifnet *ifp) 3705 { 3706 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if); 3707 3708 #ifdef WG_RUMPKERNEL 3709 if (wg_user_mode(wg)) { 3710 rumpuser_wg_destroy(wg->wg_user); 3711 wg->wg_user = NULL; 3712 } 3713 #endif 3714 3715 wg_if_detach(wg); 3716 wg_destroy_all_peers(wg); 3717 #ifdef INET6 3718 solock(wg->wg_so6); 3719 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3720 sounlock(wg->wg_so6); 3721 #endif 3722 #ifdef INET 3723 solock(wg->wg_so4); 3724 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3725 sounlock(wg->wg_so4); 3726 #endif 3727 mutex_enter(wg->wg_intr_lock); 3728 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3729 mutex_exit(wg->wg_intr_lock); 3730 #ifdef INET6 3731 if (wg->wg_rtable_ipv6 != NULL) 3732 free(wg->wg_rtable_ipv6, M_RTABLE); 3733 soclose(wg->wg_so6); 3734 #endif 3735 #ifdef INET 3736 if (wg->wg_rtable_ipv4 != NULL) 3737 free(wg->wg_rtable_ipv4, M_RTABLE); 3738 soclose(wg->wg_so4); 3739 #endif 3740 threadpool_put(wg->wg_threadpool, PRI_NONE); 3741 threadpool_job_destroy(&wg->wg_job); 3742 rw_obj_free(wg->wg_rwlock); 3743 mutex_obj_free(wg->wg_intr_lock); 3744 mutex_obj_free(wg->wg_lock); 3745 thmap_destroy(wg->wg_sessions_byindex); 3746 thmap_destroy(wg->wg_peers_byname); 3747 thmap_destroy(wg->wg_peers_bypubkey); 3748 PSLIST_DESTROY(&wg->wg_peers); 3749 kmem_free(wg, sizeof(*wg)); 3750 wg_count_dec(); 3751 3752 return 0; 3753 } 3754 3755 static struct wg_peer * 3756 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa, 3757 struct psref *psref) 3758 { 3759 struct radix_node_head *rnh; 3760 struct radix_node *rn; 3761 struct wg_peer *wgp = NULL; 3762 struct wg_allowedip *wga; 3763 3764 #ifdef WG_DEBUG_LOG 3765 char addrstr[128]; 3766 sockaddr_format(sa, addrstr, sizeof(addrstr)); 3767 WG_DLOG("sa=%s\n", addrstr); 3768 #endif 3769 3770 rw_enter(wg->wg_rwlock, RW_READER); 3771 3772 rnh = wg_rnh(wg, sa->sa_family); 3773 if (rnh == NULL) 3774 goto out; 3775 3776 rn = rnh->rnh_matchaddr(sa, rnh); 3777 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0) 3778 goto out; 3779 3780 WG_TRACE("success"); 3781 3782 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]); 3783 wgp = wga->wga_peer; 3784 wg_get_peer(wgp, psref); 3785 3786 out: 3787 rw_exit(wg->wg_rwlock); 3788 return wgp; 3789 } 3790 3791 static void 3792 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp, 3793 struct wg_session *wgs, struct wg_msg_data *wgmd) 3794 { 3795 3796 memset(wgmd, 0, sizeof(*wgmd)); 3797 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA); 3798 wgmd->wgmd_receiver = wgs->wgs_remote_index; 3799 /* [W] 5.4.6: msg.counter := Nm^send */ 3800 /* [W] 5.4.6: Nm^send := Nm^send + 1 */ 3801 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs)); 3802 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter)); 3803 } 3804 3805 static int 3806 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 3807 const struct rtentry *rt) 3808 { 3809 struct wg_softc *wg = ifp->if_softc; 3810 struct wg_peer *wgp = NULL; 3811 struct wg_session *wgs = NULL; 3812 struct psref wgp_psref, wgs_psref; 3813 int bound; 3814 int error; 3815 3816 bound = curlwp_bind(); 3817 3818 /* TODO make the nest limit configurable via sysctl */ 3819 error = if_tunnel_check_nesting(ifp, m, 1); 3820 if (error) { 3821 WGLOG(LOG_ERR, "tunneling loop detected and packet dropped\n"); 3822 goto out0; 3823 } 3824 3825 #ifdef ALTQ 3826 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags) 3827 & ALTQF_ENABLED; 3828 if (altq) 3829 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family); 3830 #endif 3831 3832 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT); 3833 3834 m->m_flags &= ~(M_BCAST|M_MCAST); 3835 3836 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref); 3837 if (wgp == NULL) { 3838 WG_TRACE("peer not found"); 3839 error = EHOSTUNREACH; 3840 goto out0; 3841 } 3842 3843 /* Clear checksum-offload flags. */ 3844 m->m_pkthdr.csum_flags = 0; 3845 m->m_pkthdr.csum_data = 0; 3846 3847 /* Check whether there's an established session. */ 3848 wgs = wg_get_stable_session(wgp, &wgs_psref); 3849 if (wgs == NULL) { 3850 /* 3851 * No established session. If we're the first to try 3852 * sending data, schedule a handshake and queue the 3853 * packet for when the handshake is done; otherwise 3854 * just drop the packet and let the ongoing handshake 3855 * attempt continue. We could queue more data packets 3856 * but it's not clear that's worthwhile. 3857 */ 3858 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) { 3859 m = NULL; /* consume */ 3860 WG_TRACE("queued first packet; init handshake"); 3861 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3862 } else { 3863 WG_TRACE("first packet already queued, dropping"); 3864 } 3865 goto out1; 3866 } 3867 3868 /* There's an established session. Toss it in the queue. */ 3869 #ifdef ALTQ 3870 if (altq) { 3871 mutex_enter(ifp->if_snd.ifq_lock); 3872 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 3873 M_SETCTX(m, wgp); 3874 ALTQ_ENQUEUE(&ifp->if_snd, m, error); 3875 m = NULL; /* consume */ 3876 } 3877 mutex_exit(ifp->if_snd.ifq_lock); 3878 if (m == NULL) { 3879 wg_start(ifp); 3880 goto out2; 3881 } 3882 } 3883 #endif 3884 kpreempt_disable(); 3885 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 3886 M_SETCTX(m, wgp); 3887 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 3888 WGLOG(LOG_ERR, "pktq full, dropping\n"); 3889 error = ENOBUFS; 3890 goto out3; 3891 } 3892 m = NULL; /* consumed */ 3893 error = 0; 3894 out3: kpreempt_enable(); 3895 3896 #ifdef ALTQ 3897 out2: 3898 #endif 3899 wg_put_session(wgs, &wgs_psref); 3900 out1: wg_put_peer(wgp, &wgp_psref); 3901 out0: if (m) 3902 m_freem(m); 3903 curlwp_bindx(bound); 3904 return error; 3905 } 3906 3907 static int 3908 wg_send_udp(struct wg_peer *wgp, struct mbuf *m) 3909 { 3910 struct psref psref; 3911 struct wg_sockaddr *wgsa; 3912 int error; 3913 struct socket *so; 3914 3915 wgsa = wg_get_endpoint_sa(wgp, &psref); 3916 so = wg_get_so_by_peer(wgp, wgsa); 3917 solock(so); 3918 if (wgsatosa(wgsa)->sa_family == AF_INET) { 3919 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp); 3920 } else { 3921 #ifdef INET6 3922 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa), 3923 NULL, curlwp); 3924 #else 3925 m_freem(m); 3926 error = EPFNOSUPPORT; 3927 #endif 3928 } 3929 sounlock(so); 3930 wg_put_sa(wgp, wgsa, &psref); 3931 3932 return error; 3933 } 3934 3935 /* Inspired by pppoe_get_mbuf */ 3936 static struct mbuf * 3937 wg_get_mbuf(size_t leading_len, size_t len) 3938 { 3939 struct mbuf *m; 3940 3941 KASSERT(leading_len <= MCLBYTES); 3942 KASSERT(len <= MCLBYTES - leading_len); 3943 3944 m = m_gethdr(M_DONTWAIT, MT_DATA); 3945 if (m == NULL) 3946 return NULL; 3947 if (len + leading_len > MHLEN) { 3948 m_clget(m, M_DONTWAIT); 3949 if ((m->m_flags & M_EXT) == 0) { 3950 m_free(m); 3951 return NULL; 3952 } 3953 } 3954 m->m_data += leading_len; 3955 m->m_pkthdr.len = m->m_len = len; 3956 3957 return m; 3958 } 3959 3960 static int 3961 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs, 3962 struct mbuf *m) 3963 { 3964 struct wg_softc *wg = wgp->wgp_sc; 3965 int error; 3966 size_t inner_len, padded_len, encrypted_len; 3967 char *padded_buf = NULL; 3968 size_t mlen; 3969 struct wg_msg_data *wgmd; 3970 bool free_padded_buf = false; 3971 struct mbuf *n; 3972 size_t leading_len = max_hdr + sizeof(struct udphdr); 3973 3974 mlen = m_length(m); 3975 inner_len = mlen; 3976 padded_len = roundup(mlen, 16); 3977 encrypted_len = padded_len + WG_AUTHTAG_LEN; 3978 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n", 3979 inner_len, padded_len, encrypted_len); 3980 if (mlen != 0) { 3981 bool success; 3982 success = m_ensure_contig(&m, padded_len); 3983 if (success) { 3984 padded_buf = mtod(m, char *); 3985 } else { 3986 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP); 3987 if (padded_buf == NULL) { 3988 error = ENOBUFS; 3989 goto end; 3990 } 3991 free_padded_buf = true; 3992 m_copydata(m, 0, mlen, padded_buf); 3993 } 3994 memset(padded_buf + mlen, 0, padded_len - inner_len); 3995 } 3996 3997 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len); 3998 if (n == NULL) { 3999 error = ENOBUFS; 4000 goto end; 4001 } 4002 KASSERT(n->m_len >= sizeof(*wgmd)); 4003 wgmd = mtod(n, struct wg_msg_data *); 4004 wg_fill_msg_data(wg, wgp, wgs, wgmd); 4005 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */ 4006 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len, 4007 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter), 4008 padded_buf, padded_len, 4009 NULL, 0); 4010 4011 error = wg->wg_ops->send_data_msg(wgp, n); 4012 if (error == 0) { 4013 struct ifnet *ifp = &wg->wg_if; 4014 if_statadd(ifp, if_obytes, mlen); 4015 if_statinc(ifp, if_opackets); 4016 if (wgs->wgs_is_initiator && 4017 wgs->wgs_time_last_data_sent == 0) { 4018 /* 4019 * [W] 6.2 Transport Message Limits 4020 * "if a peer is the initiator of a current secure 4021 * session, WireGuard will send a handshake initiation 4022 * message to begin a new secure session if, after 4023 * transmitting a transport data message, the current 4024 * secure session is REKEY-AFTER-TIME seconds old," 4025 */ 4026 wg_schedule_rekey_timer(wgp); 4027 } 4028 wgs->wgs_time_last_data_sent = time_uptime; 4029 if (wg_session_get_send_counter(wgs) >= 4030 wg_rekey_after_messages) { 4031 /* 4032 * [W] 6.2 Transport Message Limits 4033 * "WireGuard will try to create a new session, by 4034 * sending a handshake initiation message (section 4035 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES 4036 * transport data messages..." 4037 */ 4038 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 4039 } 4040 } 4041 end: 4042 m_freem(m); 4043 if (free_padded_buf) 4044 kmem_intr_free(padded_buf, padded_len); 4045 return error; 4046 } 4047 4048 static void 4049 wg_input(struct ifnet *ifp, struct mbuf *m, const int af) 4050 { 4051 pktqueue_t *pktq; 4052 size_t pktlen; 4053 4054 KASSERT(af == AF_INET || af == AF_INET6); 4055 4056 WG_TRACE(""); 4057 4058 m_set_rcvif(m, ifp); 4059 pktlen = m->m_pkthdr.len; 4060 4061 bpf_mtap_af(ifp, af, m, BPF_D_IN); 4062 4063 switch (af) { 4064 case AF_INET: 4065 pktq = ip_pktq; 4066 break; 4067 #ifdef INET6 4068 case AF_INET6: 4069 pktq = ip6_pktq; 4070 break; 4071 #endif 4072 default: 4073 panic("invalid af=%d", af); 4074 } 4075 4076 kpreempt_disable(); 4077 const u_int h = curcpu()->ci_index; 4078 if (__predict_true(pktq_enqueue(pktq, m, h))) { 4079 if_statadd(ifp, if_ibytes, pktlen); 4080 if_statinc(ifp, if_ipackets); 4081 } else { 4082 m_freem(m); 4083 } 4084 kpreempt_enable(); 4085 } 4086 4087 static void 4088 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN], 4089 const uint8_t privkey[WG_STATIC_KEY_LEN]) 4090 { 4091 4092 crypto_scalarmult_base(pubkey, privkey); 4093 } 4094 4095 static int 4096 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga) 4097 { 4098 struct radix_node_head *rnh; 4099 struct radix_node *rn; 4100 int error = 0; 4101 4102 rw_enter(wg->wg_rwlock, RW_WRITER); 4103 rnh = wg_rnh(wg, wga->wga_family); 4104 KASSERT(rnh != NULL); 4105 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh, 4106 wga->wga_nodes); 4107 rw_exit(wg->wg_rwlock); 4108 4109 if (rn == NULL) 4110 error = EEXIST; 4111 4112 return error; 4113 } 4114 4115 static int 4116 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer, 4117 struct wg_peer **wgpp) 4118 { 4119 int error = 0; 4120 const void *pubkey; 4121 size_t pubkey_len; 4122 const void *psk; 4123 size_t psk_len; 4124 const char *name = NULL; 4125 4126 if (prop_dictionary_get_string(peer, "name", &name)) { 4127 if (strlen(name) > WG_PEER_NAME_MAXLEN) { 4128 error = EINVAL; 4129 goto out; 4130 } 4131 } 4132 4133 if (!prop_dictionary_get_data(peer, "public_key", 4134 &pubkey, &pubkey_len)) { 4135 error = EINVAL; 4136 goto out; 4137 } 4138 #ifdef WG_DEBUG_DUMP 4139 { 4140 char *hex = gethexdump(pubkey, pubkey_len); 4141 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n", 4142 pubkey, pubkey_len, hex); 4143 puthexdump(hex, pubkey, pubkey_len); 4144 } 4145 #endif 4146 4147 struct wg_peer *wgp = wg_alloc_peer(wg); 4148 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey)); 4149 if (name != NULL) 4150 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name)); 4151 4152 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) { 4153 if (psk_len != sizeof(wgp->wgp_psk)) { 4154 error = EINVAL; 4155 goto out; 4156 } 4157 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk)); 4158 } 4159 4160 const void *addr; 4161 size_t addr_len; 4162 struct wg_sockaddr *wgsa = wgp->wgp_endpoint; 4163 4164 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len)) 4165 goto skip_endpoint; 4166 if (addr_len < sizeof(*wgsatosa(wgsa)) || 4167 addr_len > sizeof(*wgsatoss(wgsa))) { 4168 error = EINVAL; 4169 goto out; 4170 } 4171 memcpy(wgsatoss(wgsa), addr, addr_len); 4172 switch (wgsa_family(wgsa)) { 4173 case AF_INET: 4174 #ifdef INET6 4175 case AF_INET6: 4176 #endif 4177 break; 4178 default: 4179 error = EPFNOSUPPORT; 4180 goto out; 4181 } 4182 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) { 4183 error = EINVAL; 4184 goto out; 4185 } 4186 { 4187 char addrstr[128]; 4188 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr)); 4189 WG_DLOG("addr=%s\n", addrstr); 4190 } 4191 wgp->wgp_endpoint_available = true; 4192 4193 prop_array_t allowedips; 4194 skip_endpoint: 4195 allowedips = prop_dictionary_get(peer, "allowedips"); 4196 if (allowedips == NULL) 4197 goto skip; 4198 4199 prop_object_iterator_t _it = prop_array_iterator(allowedips); 4200 prop_dictionary_t prop_allowedip; 4201 int j = 0; 4202 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) { 4203 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4204 4205 if (!prop_dictionary_get_int(prop_allowedip, "family", 4206 &wga->wga_family)) 4207 continue; 4208 if (!prop_dictionary_get_data(prop_allowedip, "ip", 4209 &addr, &addr_len)) 4210 continue; 4211 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr", 4212 &wga->wga_cidr)) 4213 continue; 4214 4215 switch (wga->wga_family) { 4216 case AF_INET: { 4217 struct sockaddr_in sin; 4218 char addrstr[128]; 4219 struct in_addr mask; 4220 struct sockaddr_in sin_mask; 4221 4222 if (addr_len != sizeof(struct in_addr)) 4223 return EINVAL; 4224 memcpy(&wga->wga_addr4, addr, addr_len); 4225 4226 sockaddr_in_init(&sin, (const struct in_addr *)addr, 4227 0); 4228 sockaddr_copy(&wga->wga_sa_addr, 4229 sizeof(sin), sintosa(&sin)); 4230 4231 sockaddr_format(sintosa(&sin), 4232 addrstr, sizeof(addrstr)); 4233 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4234 4235 in_len2mask(&mask, wga->wga_cidr); 4236 sockaddr_in_init(&sin_mask, &mask, 0); 4237 sockaddr_copy(&wga->wga_sa_mask, 4238 sizeof(sin_mask), sintosa(&sin_mask)); 4239 4240 break; 4241 } 4242 #ifdef INET6 4243 case AF_INET6: { 4244 struct sockaddr_in6 sin6; 4245 char addrstr[128]; 4246 struct in6_addr mask; 4247 struct sockaddr_in6 sin6_mask; 4248 4249 if (addr_len != sizeof(struct in6_addr)) 4250 return EINVAL; 4251 memcpy(&wga->wga_addr6, addr, addr_len); 4252 4253 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr, 4254 0, 0, 0); 4255 sockaddr_copy(&wga->wga_sa_addr, 4256 sizeof(sin6), sin6tosa(&sin6)); 4257 4258 sockaddr_format(sin6tosa(&sin6), 4259 addrstr, sizeof(addrstr)); 4260 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4261 4262 in6_prefixlen2mask(&mask, wga->wga_cidr); 4263 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0); 4264 sockaddr_copy(&wga->wga_sa_mask, 4265 sizeof(sin6_mask), sin6tosa(&sin6_mask)); 4266 4267 break; 4268 } 4269 #endif 4270 default: 4271 error = EINVAL; 4272 goto out; 4273 } 4274 wga->wga_peer = wgp; 4275 4276 error = wg_rtable_add_route(wg, wga); 4277 if (error != 0) 4278 goto out; 4279 4280 j++; 4281 } 4282 wgp->wgp_n_allowedips = j; 4283 skip: 4284 *wgpp = wgp; 4285 out: 4286 return error; 4287 } 4288 4289 static int 4290 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd) 4291 { 4292 int error; 4293 char *buf; 4294 4295 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len); 4296 if (ifd->ifd_len >= WG_MAX_PROPLEN) 4297 return E2BIG; 4298 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP); 4299 error = copyin(ifd->ifd_data, buf, ifd->ifd_len); 4300 if (error != 0) 4301 return error; 4302 buf[ifd->ifd_len] = '\0'; 4303 #ifdef WG_DEBUG_DUMP 4304 log(LOG_DEBUG, "%.*s\n", 4305 (int)MIN(INT_MAX, ifd->ifd_len), 4306 (const char *)buf); 4307 #endif 4308 *_buf = buf; 4309 return 0; 4310 } 4311 4312 static int 4313 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd) 4314 { 4315 int error; 4316 prop_dictionary_t prop_dict; 4317 char *buf = NULL; 4318 const void *privkey; 4319 size_t privkey_len; 4320 4321 error = wg_alloc_prop_buf(&buf, ifd); 4322 if (error != 0) 4323 return error; 4324 error = EINVAL; 4325 prop_dict = prop_dictionary_internalize(buf); 4326 if (prop_dict == NULL) 4327 goto out; 4328 if (!prop_dictionary_get_data(prop_dict, "private_key", 4329 &privkey, &privkey_len)) 4330 goto out; 4331 #ifdef WG_DEBUG_DUMP 4332 { 4333 char *hex = gethexdump(privkey, privkey_len); 4334 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n", 4335 privkey, privkey_len, hex); 4336 puthexdump(hex, privkey, privkey_len); 4337 } 4338 #endif 4339 if (privkey_len != WG_STATIC_KEY_LEN) 4340 goto out; 4341 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN); 4342 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey); 4343 error = 0; 4344 4345 out: 4346 kmem_free(buf, ifd->ifd_len + 1); 4347 return error; 4348 } 4349 4350 static int 4351 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd) 4352 { 4353 int error; 4354 prop_dictionary_t prop_dict; 4355 char *buf = NULL; 4356 uint16_t port; 4357 4358 error = wg_alloc_prop_buf(&buf, ifd); 4359 if (error != 0) 4360 return error; 4361 error = EINVAL; 4362 prop_dict = prop_dictionary_internalize(buf); 4363 if (prop_dict == NULL) 4364 goto out; 4365 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port)) 4366 goto out; 4367 4368 error = wg->wg_ops->bind_port(wg, (uint16_t)port); 4369 4370 out: 4371 kmem_free(buf, ifd->ifd_len + 1); 4372 return error; 4373 } 4374 4375 static int 4376 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd) 4377 { 4378 int error; 4379 prop_dictionary_t prop_dict; 4380 char *buf = NULL; 4381 struct wg_peer *wgp = NULL, *wgp0 __diagused; 4382 4383 error = wg_alloc_prop_buf(&buf, ifd); 4384 if (error != 0) 4385 return error; 4386 error = EINVAL; 4387 prop_dict = prop_dictionary_internalize(buf); 4388 if (prop_dict == NULL) 4389 goto out; 4390 4391 error = wg_handle_prop_peer(wg, prop_dict, &wgp); 4392 if (error != 0) 4393 goto out; 4394 4395 mutex_enter(wg->wg_lock); 4396 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4397 sizeof(wgp->wgp_pubkey)) != NULL || 4398 (wgp->wgp_name[0] && 4399 thmap_get(wg->wg_peers_byname, wgp->wgp_name, 4400 strlen(wgp->wgp_name)) != NULL)) { 4401 mutex_exit(wg->wg_lock); 4402 wg_destroy_peer(wgp); 4403 error = EEXIST; 4404 goto out; 4405 } 4406 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4407 sizeof(wgp->wgp_pubkey), wgp); 4408 KASSERT(wgp0 == wgp); 4409 if (wgp->wgp_name[0]) { 4410 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name, 4411 strlen(wgp->wgp_name), wgp); 4412 KASSERT(wgp0 == wgp); 4413 } 4414 WG_PEER_WRITER_INSERT_HEAD(wgp, wg); 4415 wg->wg_npeers++; 4416 mutex_exit(wg->wg_lock); 4417 4418 if_link_state_change(&wg->wg_if, LINK_STATE_UP); 4419 4420 out: 4421 kmem_free(buf, ifd->ifd_len + 1); 4422 return error; 4423 } 4424 4425 static int 4426 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd) 4427 { 4428 int error; 4429 prop_dictionary_t prop_dict; 4430 char *buf = NULL; 4431 const char *name; 4432 4433 error = wg_alloc_prop_buf(&buf, ifd); 4434 if (error != 0) 4435 return error; 4436 error = EINVAL; 4437 prop_dict = prop_dictionary_internalize(buf); 4438 if (prop_dict == NULL) 4439 goto out; 4440 4441 if (!prop_dictionary_get_string(prop_dict, "name", &name)) 4442 goto out; 4443 if (strlen(name) > WG_PEER_NAME_MAXLEN) 4444 goto out; 4445 4446 error = wg_destroy_peer_name(wg, name); 4447 out: 4448 kmem_free(buf, ifd->ifd_len + 1); 4449 return error; 4450 } 4451 4452 static bool 4453 wg_is_authorized(struct wg_softc *wg, u_long cmd) 4454 { 4455 int au = cmd == SIOCGDRVSPEC ? 4456 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV : 4457 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV; 4458 return kauth_authorize_network(kauth_cred_get(), 4459 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if, 4460 (void *)cmd, NULL) == 0; 4461 } 4462 4463 static int 4464 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd) 4465 { 4466 int error = ENOMEM; 4467 prop_dictionary_t prop_dict; 4468 prop_array_t peers = NULL; 4469 char *buf; 4470 struct wg_peer *wgp; 4471 int s, i; 4472 4473 prop_dict = prop_dictionary_create(); 4474 if (prop_dict == NULL) 4475 goto error; 4476 4477 if (wg_is_authorized(wg, SIOCGDRVSPEC)) { 4478 if (!prop_dictionary_set_data(prop_dict, "private_key", 4479 wg->wg_privkey, WG_STATIC_KEY_LEN)) 4480 goto error; 4481 } 4482 4483 if (wg->wg_listen_port != 0) { 4484 if (!prop_dictionary_set_uint16(prop_dict, "listen_port", 4485 wg->wg_listen_port)) 4486 goto error; 4487 } 4488 4489 if (wg->wg_npeers == 0) 4490 goto skip_peers; 4491 4492 peers = prop_array_create(); 4493 if (peers == NULL) 4494 goto error; 4495 4496 s = pserialize_read_enter(); 4497 i = 0; 4498 WG_PEER_READER_FOREACH(wgp, wg) { 4499 struct wg_sockaddr *wgsa; 4500 struct psref wgp_psref, wgsa_psref; 4501 prop_dictionary_t prop_peer; 4502 4503 wg_get_peer(wgp, &wgp_psref); 4504 pserialize_read_exit(s); 4505 4506 prop_peer = prop_dictionary_create(); 4507 if (prop_peer == NULL) 4508 goto next; 4509 4510 if (strlen(wgp->wgp_name) > 0) { 4511 if (!prop_dictionary_set_string(prop_peer, "name", 4512 wgp->wgp_name)) 4513 goto next; 4514 } 4515 4516 if (!prop_dictionary_set_data(prop_peer, "public_key", 4517 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey))) 4518 goto next; 4519 4520 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0}; 4521 if (!consttime_memequal(wgp->wgp_psk, psk_zero, 4522 sizeof(wgp->wgp_psk))) { 4523 if (wg_is_authorized(wg, SIOCGDRVSPEC)) { 4524 if (!prop_dictionary_set_data(prop_peer, 4525 "preshared_key", 4526 wgp->wgp_psk, sizeof(wgp->wgp_psk))) 4527 goto next; 4528 } 4529 } 4530 4531 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref); 4532 CTASSERT(AF_UNSPEC == 0); 4533 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ && 4534 !prop_dictionary_set_data(prop_peer, "endpoint", 4535 wgsatoss(wgsa), 4536 sockaddr_getsize_by_family(wgsa_family(wgsa)))) { 4537 wg_put_sa(wgp, wgsa, &wgsa_psref); 4538 goto next; 4539 } 4540 wg_put_sa(wgp, wgsa, &wgsa_psref); 4541 4542 const struct timespec *t = &wgp->wgp_last_handshake_time; 4543 4544 if (!prop_dictionary_set_uint64(prop_peer, 4545 "last_handshake_time_sec", (uint64_t)t->tv_sec)) 4546 goto next; 4547 if (!prop_dictionary_set_uint32(prop_peer, 4548 "last_handshake_time_nsec", (uint32_t)t->tv_nsec)) 4549 goto next; 4550 4551 if (wgp->wgp_n_allowedips == 0) 4552 goto skip_allowedips; 4553 4554 prop_array_t allowedips = prop_array_create(); 4555 if (allowedips == NULL) 4556 goto next; 4557 for (int j = 0; j < wgp->wgp_n_allowedips; j++) { 4558 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4559 prop_dictionary_t prop_allowedip; 4560 4561 prop_allowedip = prop_dictionary_create(); 4562 if (prop_allowedip == NULL) 4563 break; 4564 4565 if (!prop_dictionary_set_int(prop_allowedip, "family", 4566 wga->wga_family)) 4567 goto _next; 4568 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr", 4569 wga->wga_cidr)) 4570 goto _next; 4571 4572 switch (wga->wga_family) { 4573 case AF_INET: 4574 if (!prop_dictionary_set_data(prop_allowedip, 4575 "ip", &wga->wga_addr4, 4576 sizeof(wga->wga_addr4))) 4577 goto _next; 4578 break; 4579 #ifdef INET6 4580 case AF_INET6: 4581 if (!prop_dictionary_set_data(prop_allowedip, 4582 "ip", &wga->wga_addr6, 4583 sizeof(wga->wga_addr6))) 4584 goto _next; 4585 break; 4586 #endif 4587 default: 4588 break; 4589 } 4590 prop_array_set(allowedips, j, prop_allowedip); 4591 _next: 4592 prop_object_release(prop_allowedip); 4593 } 4594 prop_dictionary_set(prop_peer, "allowedips", allowedips); 4595 prop_object_release(allowedips); 4596 4597 skip_allowedips: 4598 4599 prop_array_set(peers, i, prop_peer); 4600 next: 4601 if (prop_peer) 4602 prop_object_release(prop_peer); 4603 i++; 4604 4605 s = pserialize_read_enter(); 4606 wg_put_peer(wgp, &wgp_psref); 4607 } 4608 pserialize_read_exit(s); 4609 4610 prop_dictionary_set(prop_dict, "peers", peers); 4611 prop_object_release(peers); 4612 peers = NULL; 4613 4614 skip_peers: 4615 buf = prop_dictionary_externalize(prop_dict); 4616 if (buf == NULL) 4617 goto error; 4618 if (ifd->ifd_len < (strlen(buf) + 1)) { 4619 error = EINVAL; 4620 goto error; 4621 } 4622 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1); 4623 4624 free(buf, 0); 4625 error: 4626 if (peers != NULL) 4627 prop_object_release(peers); 4628 if (prop_dict != NULL) 4629 prop_object_release(prop_dict); 4630 4631 return error; 4632 } 4633 4634 static int 4635 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data) 4636 { 4637 struct wg_softc *wg = ifp->if_softc; 4638 struct ifreq *ifr = data; 4639 struct ifaddr *ifa = data; 4640 struct ifdrv *ifd = data; 4641 int error = 0; 4642 4643 switch (cmd) { 4644 case SIOCINITIFADDR: 4645 if (ifa->ifa_addr->sa_family != AF_LINK && 4646 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) != 4647 (IFF_UP | IFF_RUNNING)) { 4648 ifp->if_flags |= IFF_UP; 4649 error = if_init(ifp); 4650 } 4651 return error; 4652 case SIOCADDMULTI: 4653 case SIOCDELMULTI: 4654 switch (ifr->ifr_addr.sa_family) { 4655 case AF_INET: /* IP supports Multicast */ 4656 break; 4657 #ifdef INET6 4658 case AF_INET6: /* IP6 supports Multicast */ 4659 break; 4660 #endif 4661 default: /* Other protocols doesn't support Multicast */ 4662 error = EAFNOSUPPORT; 4663 break; 4664 } 4665 return error; 4666 case SIOCSDRVSPEC: 4667 if (!wg_is_authorized(wg, cmd)) { 4668 return EPERM; 4669 } 4670 switch (ifd->ifd_cmd) { 4671 case WG_IOCTL_SET_PRIVATE_KEY: 4672 error = wg_ioctl_set_private_key(wg, ifd); 4673 break; 4674 case WG_IOCTL_SET_LISTEN_PORT: 4675 error = wg_ioctl_set_listen_port(wg, ifd); 4676 break; 4677 case WG_IOCTL_ADD_PEER: 4678 error = wg_ioctl_add_peer(wg, ifd); 4679 break; 4680 case WG_IOCTL_DELETE_PEER: 4681 error = wg_ioctl_delete_peer(wg, ifd); 4682 break; 4683 default: 4684 error = EINVAL; 4685 break; 4686 } 4687 return error; 4688 case SIOCGDRVSPEC: 4689 return wg_ioctl_get(wg, ifd); 4690 case SIOCSIFFLAGS: 4691 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 4692 break; 4693 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 4694 case IFF_RUNNING: 4695 /* 4696 * If interface is marked down and it is running, 4697 * then stop and disable it. 4698 */ 4699 if_stop(ifp, 1); 4700 break; 4701 case IFF_UP: 4702 /* 4703 * If interface is marked up and it is stopped, then 4704 * start it. 4705 */ 4706 error = if_init(ifp); 4707 break; 4708 default: 4709 break; 4710 } 4711 return error; 4712 #ifdef WG_RUMPKERNEL 4713 case SIOCSLINKSTR: 4714 error = wg_ioctl_linkstr(wg, ifd); 4715 if (error == 0) 4716 wg->wg_ops = &wg_ops_rumpuser; 4717 return error; 4718 #endif 4719 default: 4720 break; 4721 } 4722 4723 error = ifioctl_common(ifp, cmd, data); 4724 4725 #ifdef WG_RUMPKERNEL 4726 if (!wg_user_mode(wg)) 4727 return error; 4728 4729 /* Do the same to the corresponding tun device on the host */ 4730 /* 4731 * XXX Actually the command has not been handled yet. It 4732 * will be handled via pr_ioctl form doifioctl later. 4733 */ 4734 switch (cmd) { 4735 case SIOCAIFADDR: 4736 case SIOCDIFADDR: { 4737 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data; 4738 struct in_aliasreq *ifra = &_ifra; 4739 KASSERT(error == ENOTTY); 4740 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4741 IFNAMSIZ); 4742 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET); 4743 if (error == 0) 4744 error = ENOTTY; 4745 break; 4746 } 4747 #ifdef INET6 4748 case SIOCAIFADDR_IN6: 4749 case SIOCDIFADDR_IN6: { 4750 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data; 4751 struct in6_aliasreq *ifra = &_ifra; 4752 KASSERT(error == ENOTTY); 4753 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4754 IFNAMSIZ); 4755 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6); 4756 if (error == 0) 4757 error = ENOTTY; 4758 break; 4759 } 4760 #endif 4761 } 4762 #endif /* WG_RUMPKERNEL */ 4763 4764 return error; 4765 } 4766 4767 static int 4768 wg_init(struct ifnet *ifp) 4769 { 4770 4771 ifp->if_flags |= IFF_RUNNING; 4772 4773 /* TODO flush pending packets. */ 4774 return 0; 4775 } 4776 4777 #ifdef ALTQ 4778 static void 4779 wg_start(struct ifnet *ifp) 4780 { 4781 struct mbuf *m; 4782 4783 for (;;) { 4784 IFQ_DEQUEUE(&ifp->if_snd, m); 4785 if (m == NULL) 4786 break; 4787 4788 kpreempt_disable(); 4789 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 4790 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 4791 WGLOG(LOG_ERR, "pktq full, dropping\n"); 4792 m_freem(m); 4793 } 4794 kpreempt_enable(); 4795 } 4796 } 4797 #endif 4798 4799 static void 4800 wg_stop(struct ifnet *ifp, int disable) 4801 { 4802 4803 KASSERT((ifp->if_flags & IFF_RUNNING) != 0); 4804 ifp->if_flags &= ~IFF_RUNNING; 4805 4806 /* Need to do something? */ 4807 } 4808 4809 #ifdef WG_DEBUG_PARAMS 4810 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup") 4811 { 4812 const struct sysctlnode *node = NULL; 4813 4814 sysctl_createv(clog, 0, NULL, &node, 4815 CTLFLAG_PERMANENT, 4816 CTLTYPE_NODE, "wg", 4817 SYSCTL_DESCR("wg(4)"), 4818 NULL, 0, NULL, 0, 4819 CTL_NET, CTL_CREATE, CTL_EOL); 4820 sysctl_createv(clog, 0, &node, NULL, 4821 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4822 CTLTYPE_QUAD, "rekey_after_messages", 4823 SYSCTL_DESCR("session liftime by messages"), 4824 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL); 4825 sysctl_createv(clog, 0, &node, NULL, 4826 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4827 CTLTYPE_INT, "rekey_after_time", 4828 SYSCTL_DESCR("session liftime"), 4829 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL); 4830 sysctl_createv(clog, 0, &node, NULL, 4831 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4832 CTLTYPE_INT, "rekey_timeout", 4833 SYSCTL_DESCR("session handshake retry time"), 4834 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL); 4835 sysctl_createv(clog, 0, &node, NULL, 4836 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4837 CTLTYPE_INT, "rekey_attempt_time", 4838 SYSCTL_DESCR("session handshake timeout"), 4839 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL); 4840 sysctl_createv(clog, 0, &node, NULL, 4841 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4842 CTLTYPE_INT, "keepalive_timeout", 4843 SYSCTL_DESCR("keepalive timeout"), 4844 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL); 4845 sysctl_createv(clog, 0, &node, NULL, 4846 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4847 CTLTYPE_BOOL, "force_underload", 4848 SYSCTL_DESCR("force to detemine under load"), 4849 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL); 4850 } 4851 #endif 4852 4853 #ifdef WG_RUMPKERNEL 4854 static bool 4855 wg_user_mode(struct wg_softc *wg) 4856 { 4857 4858 return wg->wg_user != NULL; 4859 } 4860 4861 static int 4862 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd) 4863 { 4864 struct ifnet *ifp = &wg->wg_if; 4865 int error; 4866 4867 if (ifp->if_flags & IFF_UP) 4868 return EBUSY; 4869 4870 if (ifd->ifd_cmd == IFLINKSTR_UNSET) { 4871 /* XXX do nothing */ 4872 return 0; 4873 } else if (ifd->ifd_cmd != 0) { 4874 return EINVAL; 4875 } else if (wg->wg_user != NULL) { 4876 return EBUSY; 4877 } 4878 4879 /* Assume \0 included */ 4880 if (ifd->ifd_len > IFNAMSIZ) { 4881 return E2BIG; 4882 } else if (ifd->ifd_len < 1) { 4883 return EINVAL; 4884 } 4885 4886 char tun_name[IFNAMSIZ]; 4887 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL); 4888 if (error != 0) 4889 return error; 4890 4891 if (strncmp(tun_name, "tun", 3) != 0) 4892 return EINVAL; 4893 4894 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user); 4895 4896 return error; 4897 } 4898 4899 static int 4900 wg_send_user(struct wg_peer *wgp, struct mbuf *m) 4901 { 4902 int error; 4903 struct psref psref; 4904 struct wg_sockaddr *wgsa; 4905 struct wg_softc *wg = wgp->wgp_sc; 4906 struct iovec iov[1]; 4907 4908 wgsa = wg_get_endpoint_sa(wgp, &psref); 4909 4910 iov[0].iov_base = mtod(m, void *); 4911 iov[0].iov_len = m->m_len; 4912 4913 /* Send messages to a peer via an ordinary socket. */ 4914 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1); 4915 4916 wg_put_sa(wgp, wgsa, &psref); 4917 4918 m_freem(m); 4919 4920 return error; 4921 } 4922 4923 static void 4924 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af) 4925 { 4926 struct wg_softc *wg = ifp->if_softc; 4927 struct iovec iov[2]; 4928 struct sockaddr_storage ss; 4929 4930 KASSERT(af == AF_INET || af == AF_INET6); 4931 4932 WG_TRACE(""); 4933 4934 if (af == AF_INET) { 4935 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 4936 struct ip *ip; 4937 4938 KASSERT(m->m_len >= sizeof(struct ip)); 4939 ip = mtod(m, struct ip *); 4940 sockaddr_in_init(sin, &ip->ip_dst, 0); 4941 } else { 4942 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 4943 struct ip6_hdr *ip6; 4944 4945 KASSERT(m->m_len >= sizeof(struct ip6_hdr)); 4946 ip6 = mtod(m, struct ip6_hdr *); 4947 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0); 4948 } 4949 4950 iov[0].iov_base = &ss; 4951 iov[0].iov_len = ss.ss_len; 4952 iov[1].iov_base = mtod(m, void *); 4953 iov[1].iov_len = m->m_len; 4954 4955 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 4956 4957 /* Send decrypted packets to users via a tun. */ 4958 rumpuser_wg_send_user(wg->wg_user, iov, 2); 4959 4960 m_freem(m); 4961 } 4962 4963 static int 4964 wg_bind_port_user(struct wg_softc *wg, const uint16_t port) 4965 { 4966 int error; 4967 uint16_t old_port = wg->wg_listen_port; 4968 4969 if (port != 0 && old_port == port) 4970 return 0; 4971 4972 error = rumpuser_wg_sock_bind(wg->wg_user, port); 4973 if (error == 0) 4974 wg->wg_listen_port = port; 4975 return error; 4976 } 4977 4978 /* 4979 * Receive user packets. 4980 */ 4981 void 4982 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 4983 { 4984 struct ifnet *ifp = &wg->wg_if; 4985 struct mbuf *m; 4986 const struct sockaddr *dst; 4987 4988 WG_TRACE(""); 4989 4990 dst = iov[0].iov_base; 4991 4992 m = m_gethdr(M_DONTWAIT, MT_DATA); 4993 if (m == NULL) 4994 return; 4995 m->m_len = m->m_pkthdr.len = 0; 4996 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 4997 4998 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 4999 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 5000 5001 (void)wg_output(ifp, m, dst, NULL); 5002 } 5003 5004 /* 5005 * Receive packets from a peer. 5006 */ 5007 void 5008 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 5009 { 5010 struct mbuf *m; 5011 const struct sockaddr *src; 5012 5013 WG_TRACE(""); 5014 5015 src = iov[0].iov_base; 5016 5017 m = m_gethdr(M_DONTWAIT, MT_DATA); 5018 if (m == NULL) 5019 return; 5020 m->m_len = m->m_pkthdr.len = 0; 5021 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 5022 5023 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 5024 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 5025 5026 wg_handle_packet(wg, m, src); 5027 } 5028 #endif /* WG_RUMPKERNEL */ 5029 5030 /* 5031 * Module infrastructure 5032 */ 5033 #include "if_module.h" 5034 5035 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s") 5036