1 /* $NetBSD: if_wg.c,v 1.77 2023/08/01 07:04:16 mrg Exp $ */ 2 3 /* 4 * Copyright (C) Ryota Ozaki <ozaki.ryota@gmail.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * This network interface aims to implement the WireGuard protocol. 34 * The implementation is based on the paper of WireGuard as of 35 * 2018-06-30 [1]. The paper is referred in the source code with label 36 * [W]. Also the specification of the Noise protocol framework as of 37 * 2018-07-11 [2] is referred with label [N]. 38 * 39 * [1] https://www.wireguard.com/papers/wireguard.pdf 40 * [2] http://noiseprotocol.org/noise.pdf 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.77 2023/08/01 07:04:16 mrg Exp $"); 45 46 #ifdef _KERNEL_OPT 47 #include "opt_altq_enabled.h" 48 #include "opt_inet.h" 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/types.h> 53 54 #include <sys/atomic.h> 55 #include <sys/callout.h> 56 #include <sys/cprng.h> 57 #include <sys/cpu.h> 58 #include <sys/device.h> 59 #include <sys/domain.h> 60 #include <sys/errno.h> 61 #include <sys/intr.h> 62 #include <sys/ioctl.h> 63 #include <sys/kernel.h> 64 #include <sys/kmem.h> 65 #include <sys/mbuf.h> 66 #include <sys/module.h> 67 #include <sys/mutex.h> 68 #include <sys/once.h> 69 #include <sys/percpu.h> 70 #include <sys/pserialize.h> 71 #include <sys/psref.h> 72 #include <sys/queue.h> 73 #include <sys/rwlock.h> 74 #include <sys/socket.h> 75 #include <sys/socketvar.h> 76 #include <sys/sockio.h> 77 #include <sys/sysctl.h> 78 #include <sys/syslog.h> 79 #include <sys/systm.h> 80 #include <sys/thmap.h> 81 #include <sys/threadpool.h> 82 #include <sys/time.h> 83 #include <sys/timespec.h> 84 #include <sys/workqueue.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_types.h> 89 #include <net/if_wg.h> 90 #include <net/pktqueue.h> 91 #include <net/route.h> 92 93 #include <netinet/in.h> 94 #include <netinet/in_pcb.h> 95 #include <netinet/in_var.h> 96 #include <netinet/ip.h> 97 #include <netinet/ip_var.h> 98 #include <netinet/udp.h> 99 #include <netinet/udp_var.h> 100 101 #ifdef INET6 102 #include <netinet/ip6.h> 103 #include <netinet6/in6_pcb.h> 104 #include <netinet6/in6_var.h> 105 #include <netinet6/ip6_var.h> 106 #include <netinet6/udp6_var.h> 107 #endif /* INET6 */ 108 109 #include <prop/proplib.h> 110 111 #include <crypto/blake2/blake2s.h> 112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h> 113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h> 114 #include <crypto/sodium/crypto_scalarmult.h> 115 116 #include "ioconf.h" 117 118 #ifdef WG_RUMPKERNEL 119 #include "wg_user.h" 120 #endif 121 122 /* 123 * Data structures 124 * - struct wg_softc is an instance of wg interfaces 125 * - It has a list of peers (struct wg_peer) 126 * - It has a threadpool job that sends/receives handshake messages and 127 * runs event handlers 128 * - It has its own two routing tables: one is for IPv4 and the other IPv6 129 * - struct wg_peer is a representative of a peer 130 * - It has a struct work to handle handshakes and timer tasks 131 * - It has a pair of session instances (struct wg_session) 132 * - It has a pair of endpoint instances (struct wg_sockaddr) 133 * - Normally one endpoint is used and the second one is used only on 134 * a peer migration (a change of peer's IP address) 135 * - It has a list of IP addresses and sub networks called allowedips 136 * (struct wg_allowedip) 137 * - A packets sent over a session is allowed if its destination matches 138 * any IP addresses or sub networks of the list 139 * - struct wg_session represents a session of a secure tunnel with a peer 140 * - Two instances of sessions belong to a peer; a stable session and a 141 * unstable session 142 * - A handshake process of a session always starts with a unstable instance 143 * - Once a session is established, its instance becomes stable and the 144 * other becomes unstable instead 145 * - Data messages are always sent via a stable session 146 * 147 * Locking notes: 148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock 149 * - Changes to the peer list are serialized by wg_lock 150 * - The peer list may be read with pserialize(9) and psref(9) 151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46]) 152 * => XXX replace by pserialize when routing table is psz-safe 153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken 154 * only in thread context and serializes: 155 * - the stable and unstable session pointers 156 * - all unstable session state 157 * - Packet processing may be done in softint context: 158 * - The stable session can be read under pserialize(9) or psref(9) 159 * - The stable session is always ESTABLISHED 160 * - On a session swap, we must wait for all readers to release a 161 * reference to a stable session before changing wgs_state and 162 * session states 163 * - Lock order: wg_lock -> wgp_lock 164 */ 165 166 167 #define WGLOG(level, fmt, args...) \ 168 log(level, "%s: " fmt, __func__, ##args) 169 170 /* Debug options */ 171 #ifdef WG_DEBUG 172 /* Output debug logs */ 173 #ifndef WG_DEBUG_LOG 174 #define WG_DEBUG_LOG 175 #endif 176 /* Output trace logs */ 177 #ifndef WG_DEBUG_TRACE 178 #define WG_DEBUG_TRACE 179 #endif 180 /* Output hash values, etc. */ 181 #ifndef WG_DEBUG_DUMP 182 #define WG_DEBUG_DUMP 183 #endif 184 /* Make some internal parameters configurable for testing and debugging */ 185 #ifndef WG_DEBUG_PARAMS 186 #define WG_DEBUG_PARAMS 187 #endif 188 #endif 189 190 #ifdef WG_DEBUG_TRACE 191 #define WG_TRACE(msg) \ 192 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)) 193 #else 194 #define WG_TRACE(msg) __nothing 195 #endif 196 197 #ifdef WG_DEBUG_LOG 198 #define WG_DLOG(fmt, args...) log(LOG_DEBUG, "%s: " fmt, __func__, ##args) 199 #else 200 #define WG_DLOG(fmt, args...) __nothing 201 #endif 202 203 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \ 204 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \ 205 &(wgprc)->wgprc_curpps, 1)) { \ 206 log(level, fmt, ##args); \ 207 } \ 208 } while (0) 209 210 #ifdef WG_DEBUG_PARAMS 211 static bool wg_force_underload = false; 212 #endif 213 214 #ifdef WG_DEBUG_DUMP 215 216 static char * 217 gethexdump(const char *p, size_t n) 218 { 219 char *buf; 220 size_t i; 221 222 if (n > SIZE_MAX/3 - 1) 223 return NULL; 224 buf = kmem_alloc(3*n + 1, KM_NOSLEEP); 225 if (buf == NULL) 226 return NULL; 227 for (i = 0; i < n; i++) 228 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]); 229 return buf; 230 } 231 232 static void 233 puthexdump(char *buf, const void *p, size_t n) 234 { 235 236 if (buf == NULL) 237 return; 238 kmem_free(buf, 3*n + 1); 239 } 240 241 #ifdef WG_RUMPKERNEL 242 static void 243 wg_dump_buf(const char *func, const char *buf, const size_t size) 244 { 245 char *hex = gethexdump(buf, size); 246 247 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)"); 248 puthexdump(hex, buf, size); 249 } 250 #endif 251 252 static void 253 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash, 254 const size_t size) 255 { 256 char *hex = gethexdump(hash, size); 257 258 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)"); 259 puthexdump(hex, hash, size); 260 } 261 262 #define WG_DUMP_HASH(name, hash) \ 263 wg_dump_hash(__func__, name, hash, WG_HASH_LEN) 264 #define WG_DUMP_HASH48(name, hash) \ 265 wg_dump_hash(__func__, name, hash, 48) 266 #define WG_DUMP_BUF(buf, size) \ 267 wg_dump_buf(__func__, buf, size) 268 #else 269 #define WG_DUMP_HASH(name, hash) __nothing 270 #define WG_DUMP_HASH48(name, hash) __nothing 271 #define WG_DUMP_BUF(buf, size) __nothing 272 #endif /* WG_DEBUG_DUMP */ 273 274 /* chosen somewhat arbitrarily -- fits in signed 16 bits NUL-terminated */ 275 #define WG_MAX_PROPLEN 32766 276 277 #define WG_MTU 1420 278 #define WG_ALLOWEDIPS 16 279 280 #define CURVE25519_KEY_LEN 32 281 #define TAI64N_LEN sizeof(uint32_t) * 3 282 #define POLY1305_AUTHTAG_LEN 16 283 #define HMAC_BLOCK_LEN 64 284 285 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */ 286 /* [N] 4.3: Hash functions */ 287 #define NOISE_DHLEN 32 288 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */ 289 #define NOISE_HASHLEN 32 290 #define NOISE_BLOCKLEN 64 291 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN 292 /* [N] 5.1: "k" */ 293 #define NOISE_CIPHER_KEY_LEN 32 294 /* 295 * [N] 9.2: "psk" 296 * "... psk is a 32-byte secret value provided by the application." 297 */ 298 #define NOISE_PRESHARED_KEY_LEN 32 299 300 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN 301 #define WG_TIMESTAMP_LEN TAI64N_LEN 302 303 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN 304 305 #define WG_COOKIE_LEN 16 306 #define WG_MAC_LEN 16 307 #define WG_RANDVAL_LEN 24 308 309 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN 310 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */ 311 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN 312 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */ 313 #define WG_HASH_LEN NOISE_HASHLEN 314 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN 315 #define WG_DH_OUTPUT_LEN NOISE_DHLEN 316 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN 317 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN 318 #define WG_DATA_KEY_LEN 32 319 #define WG_SALT_LEN 24 320 321 /* 322 * The protocol messages 323 */ 324 struct wg_msg { 325 uint32_t wgm_type; 326 } __packed; 327 328 /* [W] 5.4.2 First Message: Initiator to Responder */ 329 struct wg_msg_init { 330 uint32_t wgmi_type; 331 uint32_t wgmi_sender; 332 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN]; 333 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN]; 334 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN]; 335 uint8_t wgmi_mac1[WG_MAC_LEN]; 336 uint8_t wgmi_mac2[WG_MAC_LEN]; 337 } __packed; 338 339 /* [W] 5.4.3 Second Message: Responder to Initiator */ 340 struct wg_msg_resp { 341 uint32_t wgmr_type; 342 uint32_t wgmr_sender; 343 uint32_t wgmr_receiver; 344 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN]; 345 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN]; 346 uint8_t wgmr_mac1[WG_MAC_LEN]; 347 uint8_t wgmr_mac2[WG_MAC_LEN]; 348 } __packed; 349 350 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */ 351 struct wg_msg_data { 352 uint32_t wgmd_type; 353 uint32_t wgmd_receiver; 354 uint64_t wgmd_counter; 355 uint32_t wgmd_packet[0]; 356 } __packed; 357 358 /* [W] 5.4.7 Under Load: Cookie Reply Message */ 359 struct wg_msg_cookie { 360 uint32_t wgmc_type; 361 uint32_t wgmc_receiver; 362 uint8_t wgmc_salt[WG_SALT_LEN]; 363 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN]; 364 } __packed; 365 366 #define WG_MSG_TYPE_INIT 1 367 #define WG_MSG_TYPE_RESP 2 368 #define WG_MSG_TYPE_COOKIE 3 369 #define WG_MSG_TYPE_DATA 4 370 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA 371 372 /* Sliding windows */ 373 374 #define SLIWIN_BITS 2048u 375 #define SLIWIN_TYPE uint32_t 376 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE) 377 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW) 378 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE)) 379 380 struct sliwin { 381 SLIWIN_TYPE B[SLIWIN_WORDS]; 382 uint64_t T; 383 }; 384 385 static void 386 sliwin_reset(struct sliwin *W) 387 { 388 389 memset(W, 0, sizeof(*W)); 390 } 391 392 static int 393 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S) 394 { 395 396 /* 397 * If it's more than one window older than the highest sequence 398 * number we've seen, reject. 399 */ 400 #ifdef __HAVE_ATOMIC64_LOADSTORE 401 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T)) 402 return EAUTH; 403 #endif 404 405 /* 406 * Otherwise, we need to take the lock to decide, so don't 407 * reject just yet. Caller must serialize a call to 408 * sliwin_update in this case. 409 */ 410 return 0; 411 } 412 413 static int 414 sliwin_update(struct sliwin *W, uint64_t S) 415 { 416 unsigned word, bit; 417 418 /* 419 * If it's more than one window older than the highest sequence 420 * number we've seen, reject. 421 */ 422 if (S + SLIWIN_NPKT < W->T) 423 return EAUTH; 424 425 /* 426 * If it's higher than the highest sequence number we've seen, 427 * advance the window. 428 */ 429 if (S > W->T) { 430 uint64_t i = W->T / SLIWIN_BPW; 431 uint64_t j = S / SLIWIN_BPW; 432 unsigned k; 433 434 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++) 435 W->B[(i + k + 1) % SLIWIN_WORDS] = 0; 436 #ifdef __HAVE_ATOMIC64_LOADSTORE 437 atomic_store_relaxed(&W->T, S); 438 #else 439 W->T = S; 440 #endif 441 } 442 443 /* Test and set the bit -- if already set, reject. */ 444 word = (S / SLIWIN_BPW) % SLIWIN_WORDS; 445 bit = S % SLIWIN_BPW; 446 if (W->B[word] & (1UL << bit)) 447 return EAUTH; 448 W->B[word] |= 1U << bit; 449 450 /* Accept! */ 451 return 0; 452 } 453 454 struct wg_session { 455 struct wg_peer *wgs_peer; 456 struct psref_target 457 wgs_psref; 458 459 int wgs_state; 460 #define WGS_STATE_UNKNOWN 0 461 #define WGS_STATE_INIT_ACTIVE 1 462 #define WGS_STATE_INIT_PASSIVE 2 463 #define WGS_STATE_ESTABLISHED 3 464 #define WGS_STATE_DESTROYING 4 465 466 time_t wgs_time_established; 467 time_t wgs_time_last_data_sent; 468 bool wgs_is_initiator; 469 470 uint32_t wgs_local_index; 471 uint32_t wgs_remote_index; 472 #ifdef __HAVE_ATOMIC64_LOADSTORE 473 volatile uint64_t 474 wgs_send_counter; 475 #else 476 kmutex_t wgs_send_counter_lock; 477 uint64_t wgs_send_counter; 478 #endif 479 480 struct { 481 kmutex_t lock; 482 struct sliwin window; 483 } *wgs_recvwin; 484 485 uint8_t wgs_handshake_hash[WG_HASH_LEN]; 486 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN]; 487 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN]; 488 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN]; 489 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN]; 490 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN]; 491 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN]; 492 }; 493 494 struct wg_sockaddr { 495 union { 496 struct sockaddr_storage _ss; 497 struct sockaddr _sa; 498 struct sockaddr_in _sin; 499 struct sockaddr_in6 _sin6; 500 }; 501 struct psref_target wgsa_psref; 502 }; 503 504 #define wgsatoss(wgsa) (&(wgsa)->_ss) 505 #define wgsatosa(wgsa) (&(wgsa)->_sa) 506 #define wgsatosin(wgsa) (&(wgsa)->_sin) 507 #define wgsatosin6(wgsa) (&(wgsa)->_sin6) 508 509 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family) 510 511 struct wg_peer; 512 struct wg_allowedip { 513 struct radix_node wga_nodes[2]; 514 struct wg_sockaddr _wga_sa_addr; 515 struct wg_sockaddr _wga_sa_mask; 516 #define wga_sa_addr _wga_sa_addr._sa 517 #define wga_sa_mask _wga_sa_mask._sa 518 519 int wga_family; 520 uint8_t wga_cidr; 521 union { 522 struct in_addr _ip4; 523 struct in6_addr _ip6; 524 } wga_addr; 525 #define wga_addr4 wga_addr._ip4 526 #define wga_addr6 wga_addr._ip6 527 528 struct wg_peer *wga_peer; 529 }; 530 531 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN]; 532 533 struct wg_ppsratecheck { 534 struct timeval wgprc_lasttime; 535 int wgprc_curpps; 536 }; 537 538 struct wg_softc; 539 struct wg_peer { 540 struct wg_softc *wgp_sc; 541 char wgp_name[WG_PEER_NAME_MAXLEN + 1]; 542 struct pslist_entry wgp_peerlist_entry; 543 pserialize_t wgp_psz; 544 struct psref_target wgp_psref; 545 kmutex_t *wgp_lock; 546 kmutex_t *wgp_intr_lock; 547 548 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN]; 549 struct wg_sockaddr *wgp_endpoint; 550 struct wg_sockaddr *wgp_endpoint0; 551 volatile unsigned wgp_endpoint_changing; 552 bool wgp_endpoint_available; 553 554 /* The preshared key (optional) */ 555 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN]; 556 557 struct wg_session *wgp_session_stable; 558 struct wg_session *wgp_session_unstable; 559 560 /* first outgoing packet awaiting session initiation */ 561 struct mbuf *wgp_pending; 562 563 /* timestamp in big-endian */ 564 wg_timestamp_t wgp_timestamp_latest_init; 565 566 struct timespec wgp_last_handshake_time; 567 568 callout_t wgp_rekey_timer; 569 callout_t wgp_handshake_timeout_timer; 570 callout_t wgp_session_dtor_timer; 571 572 time_t wgp_handshake_start_time; 573 574 int wgp_n_allowedips; 575 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS]; 576 577 time_t wgp_latest_cookie_time; 578 uint8_t wgp_latest_cookie[WG_COOKIE_LEN]; 579 uint8_t wgp_last_sent_mac1[WG_MAC_LEN]; 580 bool wgp_last_sent_mac1_valid; 581 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN]; 582 bool wgp_last_sent_cookie_valid; 583 584 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX]; 585 586 time_t wgp_last_genrandval_time; 587 uint32_t wgp_randval; 588 589 struct wg_ppsratecheck wgp_ppsratecheck; 590 591 struct work wgp_work; 592 unsigned int wgp_tasks; 593 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0) 594 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1) 595 #define WGP_TASK_ESTABLISH_SESSION __BIT(2) 596 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3) 597 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4) 598 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5) 599 }; 600 601 struct wg_ops; 602 603 struct wg_softc { 604 struct ifnet wg_if; 605 LIST_ENTRY(wg_softc) wg_list; 606 kmutex_t *wg_lock; 607 kmutex_t *wg_intr_lock; 608 krwlock_t *wg_rwlock; 609 610 uint8_t wg_privkey[WG_STATIC_KEY_LEN]; 611 uint8_t wg_pubkey[WG_STATIC_KEY_LEN]; 612 613 int wg_npeers; 614 struct pslist_head wg_peers; 615 struct thmap *wg_peers_bypubkey; 616 struct thmap *wg_peers_byname; 617 struct thmap *wg_sessions_byindex; 618 uint16_t wg_listen_port; 619 620 struct threadpool *wg_threadpool; 621 622 struct threadpool_job wg_job; 623 int wg_upcalls; 624 #define WG_UPCALL_INET __BIT(0) 625 #define WG_UPCALL_INET6 __BIT(1) 626 627 #ifdef INET 628 struct socket *wg_so4; 629 struct radix_node_head *wg_rtable_ipv4; 630 #endif 631 #ifdef INET6 632 struct socket *wg_so6; 633 struct radix_node_head *wg_rtable_ipv6; 634 #endif 635 636 struct wg_ppsratecheck wg_ppsratecheck; 637 638 struct wg_ops *wg_ops; 639 640 #ifdef WG_RUMPKERNEL 641 struct wg_user *wg_user; 642 #endif 643 }; 644 645 /* [W] 6.1 Preliminaries */ 646 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60) 647 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13)) 648 #define WG_REKEY_AFTER_TIME 120 649 #define WG_REJECT_AFTER_TIME 180 650 #define WG_REKEY_ATTEMPT_TIME 90 651 #define WG_REKEY_TIMEOUT 5 652 #define WG_KEEPALIVE_TIMEOUT 10 653 654 #define WG_COOKIE_TIME 120 655 #define WG_RANDVAL_TIME (2 * 60) 656 657 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES; 658 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES; 659 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME; 660 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME; 661 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME; 662 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT; 663 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT; 664 665 static struct mbuf * 666 wg_get_mbuf(size_t, size_t); 667 668 static int wg_send_data_msg(struct wg_peer *, struct wg_session *, 669 struct mbuf *); 670 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *, 671 const uint32_t, const uint8_t [WG_MAC_LEN], 672 const struct sockaddr *); 673 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *, 674 struct wg_session *, const struct wg_msg_init *); 675 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *); 676 677 static struct wg_peer * 678 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *, 679 struct psref *); 680 static struct wg_peer * 681 wg_lookup_peer_by_pubkey(struct wg_softc *, 682 const uint8_t [WG_STATIC_KEY_LEN], struct psref *); 683 684 static struct wg_session * 685 wg_lookup_session_by_index(struct wg_softc *, 686 const uint32_t, struct psref *); 687 688 static void wg_update_endpoint_if_necessary(struct wg_peer *, 689 const struct sockaddr *); 690 691 static void wg_schedule_rekey_timer(struct wg_peer *); 692 static void wg_schedule_session_dtor_timer(struct wg_peer *); 693 694 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int); 695 static void wg_calculate_keys(struct wg_session *, const bool); 696 697 static void wg_clear_states(struct wg_session *); 698 699 static void wg_get_peer(struct wg_peer *, struct psref *); 700 static void wg_put_peer(struct wg_peer *, struct psref *); 701 702 static int wg_send_so(struct wg_peer *, struct mbuf *); 703 static int wg_send_udp(struct wg_peer *, struct mbuf *); 704 static int wg_output(struct ifnet *, struct mbuf *, 705 const struct sockaddr *, const struct rtentry *); 706 static void wg_input(struct ifnet *, struct mbuf *, const int); 707 static int wg_ioctl(struct ifnet *, u_long, void *); 708 static int wg_bind_port(struct wg_softc *, const uint16_t); 709 static int wg_init(struct ifnet *); 710 #ifdef ALTQ 711 static void wg_start(struct ifnet *); 712 #endif 713 static void wg_stop(struct ifnet *, int); 714 715 static void wg_peer_work(struct work *, void *); 716 static void wg_job(struct threadpool_job *); 717 static void wgintr(void *); 718 static void wg_purge_pending_packets(struct wg_peer *); 719 720 static int wg_clone_create(struct if_clone *, int); 721 static int wg_clone_destroy(struct ifnet *); 722 723 struct wg_ops { 724 int (*send_hs_msg)(struct wg_peer *, struct mbuf *); 725 int (*send_data_msg)(struct wg_peer *, struct mbuf *); 726 void (*input)(struct ifnet *, struct mbuf *, const int); 727 int (*bind_port)(struct wg_softc *, const uint16_t); 728 }; 729 730 struct wg_ops wg_ops_rumpkernel = { 731 .send_hs_msg = wg_send_so, 732 .send_data_msg = wg_send_udp, 733 .input = wg_input, 734 .bind_port = wg_bind_port, 735 }; 736 737 #ifdef WG_RUMPKERNEL 738 static bool wg_user_mode(struct wg_softc *); 739 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *); 740 741 static int wg_send_user(struct wg_peer *, struct mbuf *); 742 static void wg_input_user(struct ifnet *, struct mbuf *, const int); 743 static int wg_bind_port_user(struct wg_softc *, const uint16_t); 744 745 struct wg_ops wg_ops_rumpuser = { 746 .send_hs_msg = wg_send_user, 747 .send_data_msg = wg_send_user, 748 .input = wg_input_user, 749 .bind_port = wg_bind_port_user, 750 }; 751 #endif 752 753 #define WG_PEER_READER_FOREACH(wgp, wg) \ 754 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 755 wgp_peerlist_entry) 756 #define WG_PEER_WRITER_FOREACH(wgp, wg) \ 757 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 758 wgp_peerlist_entry) 759 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \ 760 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry) 761 #define WG_PEER_WRITER_REMOVE(wgp) \ 762 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry) 763 764 struct wg_route { 765 struct radix_node wgr_nodes[2]; 766 struct wg_peer *wgr_peer; 767 }; 768 769 static struct radix_node_head * 770 wg_rnh(struct wg_softc *wg, const int family) 771 { 772 773 switch (family) { 774 case AF_INET: 775 return wg->wg_rtable_ipv4; 776 #ifdef INET6 777 case AF_INET6: 778 return wg->wg_rtable_ipv6; 779 #endif 780 default: 781 return NULL; 782 } 783 } 784 785 786 /* 787 * Global variables 788 */ 789 static volatile unsigned wg_count __cacheline_aligned; 790 791 struct psref_class *wg_psref_class __read_mostly; 792 793 static struct if_clone wg_cloner = 794 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy); 795 796 static struct pktqueue *wg_pktq __read_mostly; 797 static struct workqueue *wg_wq __read_mostly; 798 799 void wgattach(int); 800 /* ARGSUSED */ 801 void 802 wgattach(int count) 803 { 804 /* 805 * Nothing to do here, initialization is handled by the 806 * module initialization code in wginit() below). 807 */ 808 } 809 810 static void 811 wginit(void) 812 { 813 814 wg_psref_class = psref_class_create("wg", IPL_SOFTNET); 815 816 if_clone_attach(&wg_cloner); 817 } 818 819 /* 820 * XXX Kludge: This should just happen in wginit, but workqueue_create 821 * cannot be run until after CPUs have been detected, and wginit runs 822 * before configure. 823 */ 824 static int 825 wginitqueues(void) 826 { 827 int error __diagused; 828 829 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL); 830 KASSERT(wg_pktq != NULL); 831 832 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL, 833 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU); 834 KASSERT(error == 0); 835 836 return 0; 837 } 838 839 static void 840 wg_guarantee_initialized(void) 841 { 842 static ONCE_DECL(init); 843 int error __diagused; 844 845 error = RUN_ONCE(&init, wginitqueues); 846 KASSERT(error == 0); 847 } 848 849 static int 850 wg_count_inc(void) 851 { 852 unsigned o, n; 853 854 do { 855 o = atomic_load_relaxed(&wg_count); 856 if (o == UINT_MAX) 857 return ENFILE; 858 n = o + 1; 859 } while (atomic_cas_uint(&wg_count, o, n) != o); 860 861 return 0; 862 } 863 864 static void 865 wg_count_dec(void) 866 { 867 unsigned c __diagused; 868 869 c = atomic_dec_uint_nv(&wg_count); 870 KASSERT(c != UINT_MAX); 871 } 872 873 static int 874 wgdetach(void) 875 { 876 877 /* Prevent new interface creation. */ 878 if_clone_detach(&wg_cloner); 879 880 /* Check whether there are any existing interfaces. */ 881 if (atomic_load_relaxed(&wg_count)) { 882 /* Back out -- reattach the cloner. */ 883 if_clone_attach(&wg_cloner); 884 return EBUSY; 885 } 886 887 /* No interfaces left. Nuke it. */ 888 workqueue_destroy(wg_wq); 889 pktq_destroy(wg_pktq); 890 psref_class_destroy(wg_psref_class); 891 892 return 0; 893 } 894 895 static void 896 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN], 897 uint8_t hash[WG_HASH_LEN]) 898 { 899 /* [W] 5.4: CONSTRUCTION */ 900 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; 901 /* [W] 5.4: IDENTIFIER */ 902 const char *id = "WireGuard v1 zx2c4 Jason@zx2c4.com"; 903 struct blake2s state; 904 905 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0, 906 signature, strlen(signature)); 907 908 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN); 909 memcpy(hash, ckey, WG_CHAINING_KEY_LEN); 910 911 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 912 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN); 913 blake2s_update(&state, id, strlen(id)); 914 blake2s_final(&state, hash); 915 916 WG_DUMP_HASH("ckey", ckey); 917 WG_DUMP_HASH("hash", hash); 918 } 919 920 static void 921 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[], 922 const size_t inputsize) 923 { 924 struct blake2s state; 925 926 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 927 blake2s_update(&state, hash, WG_HASH_LEN); 928 blake2s_update(&state, input, inputsize); 929 blake2s_final(&state, hash); 930 } 931 932 static void 933 wg_algo_mac(uint8_t out[], const size_t outsize, 934 const uint8_t key[], const size_t keylen, 935 const uint8_t input1[], const size_t input1len, 936 const uint8_t input2[], const size_t input2len) 937 { 938 struct blake2s state; 939 940 blake2s_init(&state, outsize, key, keylen); 941 942 blake2s_update(&state, input1, input1len); 943 if (input2 != NULL) 944 blake2s_update(&state, input2, input2len); 945 blake2s_final(&state, out); 946 } 947 948 static void 949 wg_algo_mac_mac1(uint8_t out[], const size_t outsize, 950 const uint8_t input1[], const size_t input1len, 951 const uint8_t input2[], const size_t input2len) 952 { 953 struct blake2s state; 954 /* [W] 5.4: LABEL-MAC1 */ 955 const char *label = "mac1----"; 956 uint8_t key[WG_HASH_LEN]; 957 958 blake2s_init(&state, sizeof(key), NULL, 0); 959 blake2s_update(&state, label, strlen(label)); 960 blake2s_update(&state, input1, input1len); 961 blake2s_final(&state, key); 962 963 blake2s_init(&state, outsize, key, sizeof(key)); 964 if (input2 != NULL) 965 blake2s_update(&state, input2, input2len); 966 blake2s_final(&state, out); 967 } 968 969 static void 970 wg_algo_mac_cookie(uint8_t out[], const size_t outsize, 971 const uint8_t input1[], const size_t input1len) 972 { 973 struct blake2s state; 974 /* [W] 5.4: LABEL-COOKIE */ 975 const char *label = "cookie--"; 976 977 blake2s_init(&state, outsize, NULL, 0); 978 blake2s_update(&state, label, strlen(label)); 979 blake2s_update(&state, input1, input1len); 980 blake2s_final(&state, out); 981 } 982 983 static void 984 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN], 985 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]) 986 { 987 988 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 989 990 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0); 991 crypto_scalarmult_base(pubkey, privkey); 992 } 993 994 static void 995 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN], 996 const uint8_t privkey[WG_STATIC_KEY_LEN], 997 const uint8_t pubkey[WG_STATIC_KEY_LEN]) 998 { 999 1000 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 1001 1002 int ret __diagused = crypto_scalarmult(out, privkey, pubkey); 1003 KASSERT(ret == 0); 1004 } 1005 1006 static void 1007 wg_algo_hmac(uint8_t out[], const size_t outlen, 1008 const uint8_t key[], const size_t keylen, 1009 const uint8_t in[], const size_t inlen) 1010 { 1011 #define IPAD 0x36 1012 #define OPAD 0x5c 1013 uint8_t hmackey[HMAC_BLOCK_LEN] = {0}; 1014 uint8_t ipad[HMAC_BLOCK_LEN]; 1015 uint8_t opad[HMAC_BLOCK_LEN]; 1016 size_t i; 1017 struct blake2s state; 1018 1019 KASSERT(outlen == WG_HASH_LEN); 1020 KASSERT(keylen <= HMAC_BLOCK_LEN); 1021 1022 memcpy(hmackey, key, keylen); 1023 1024 for (i = 0; i < sizeof(hmackey); i++) { 1025 ipad[i] = hmackey[i] ^ IPAD; 1026 opad[i] = hmackey[i] ^ OPAD; 1027 } 1028 1029 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1030 blake2s_update(&state, ipad, sizeof(ipad)); 1031 blake2s_update(&state, in, inlen); 1032 blake2s_final(&state, out); 1033 1034 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1035 blake2s_update(&state, opad, sizeof(opad)); 1036 blake2s_update(&state, out, WG_HASH_LEN); 1037 blake2s_final(&state, out); 1038 #undef IPAD 1039 #undef OPAD 1040 } 1041 1042 static void 1043 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN], 1044 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN], 1045 const uint8_t input[], const size_t inputlen) 1046 { 1047 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1]; 1048 uint8_t one[1]; 1049 1050 /* 1051 * [N] 4.3: "an input_key_material byte sequence with length 1052 * either zero bytes, 32 bytes, or DHLEN bytes." 1053 */ 1054 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN); 1055 1056 WG_DUMP_HASH("ckey", ckey); 1057 if (input != NULL) 1058 WG_DUMP_HASH("input", input); 1059 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN, 1060 input, inputlen); 1061 WG_DUMP_HASH("tmp1", tmp1); 1062 one[0] = 1; 1063 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1064 one, sizeof(one)); 1065 WG_DUMP_HASH("out1", out1); 1066 if (out2 == NULL) 1067 return; 1068 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN); 1069 tmp2[WG_KDF_OUTPUT_LEN] = 2; 1070 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1071 tmp2, sizeof(tmp2)); 1072 WG_DUMP_HASH("out2", out2); 1073 if (out3 == NULL) 1074 return; 1075 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN); 1076 tmp2[WG_KDF_OUTPUT_LEN] = 3; 1077 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1078 tmp2, sizeof(tmp2)); 1079 WG_DUMP_HASH("out3", out3); 1080 } 1081 1082 static void __noinline 1083 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN], 1084 uint8_t cipher_key[WG_CIPHER_KEY_LEN], 1085 const uint8_t local_key[WG_STATIC_KEY_LEN], 1086 const uint8_t remote_key[WG_STATIC_KEY_LEN]) 1087 { 1088 uint8_t dhout[WG_DH_OUTPUT_LEN]; 1089 1090 wg_algo_dh(dhout, local_key, remote_key); 1091 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout)); 1092 1093 WG_DUMP_HASH("dhout", dhout); 1094 WG_DUMP_HASH("ckey", ckey); 1095 if (cipher_key != NULL) 1096 WG_DUMP_HASH("cipher_key", cipher_key); 1097 } 1098 1099 static void 1100 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1101 const uint64_t counter, const uint8_t plain[], const size_t plainsize, 1102 const uint8_t auth[], size_t authlen) 1103 { 1104 uint8_t nonce[(32 + 64) / 8] = {0}; 1105 long long unsigned int outsize; 1106 int error __diagused; 1107 1108 le64enc(&nonce[4], counter); 1109 1110 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain, 1111 plainsize, auth, authlen, NULL, nonce, key); 1112 KASSERT(error == 0); 1113 KASSERT(outsize == expected_outsize); 1114 } 1115 1116 static int 1117 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1118 const uint64_t counter, const uint8_t encrypted[], 1119 const size_t encryptedsize, const uint8_t auth[], size_t authlen) 1120 { 1121 uint8_t nonce[(32 + 64) / 8] = {0}; 1122 long long unsigned int outsize; 1123 int error; 1124 1125 le64enc(&nonce[4], counter); 1126 1127 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1128 encrypted, encryptedsize, auth, authlen, nonce, key); 1129 if (error == 0) 1130 KASSERT(outsize == expected_outsize); 1131 return error; 1132 } 1133 1134 static void 1135 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize, 1136 const uint8_t key[], const uint8_t plain[], const size_t plainsize, 1137 const uint8_t auth[], size_t authlen, 1138 const uint8_t nonce[WG_SALT_LEN]) 1139 { 1140 long long unsigned int outsize; 1141 int error __diagused; 1142 1143 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES); 1144 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize, 1145 plain, plainsize, auth, authlen, NULL, nonce, key); 1146 KASSERT(error == 0); 1147 KASSERT(outsize == expected_outsize); 1148 } 1149 1150 static int 1151 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize, 1152 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize, 1153 const uint8_t auth[], size_t authlen, 1154 const uint8_t nonce[WG_SALT_LEN]) 1155 { 1156 long long unsigned int outsize; 1157 int error; 1158 1159 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1160 encrypted, encryptedsize, auth, authlen, nonce, key); 1161 if (error == 0) 1162 KASSERT(outsize == expected_outsize); 1163 return error; 1164 } 1165 1166 static void 1167 wg_algo_tai64n(wg_timestamp_t timestamp) 1168 { 1169 struct timespec ts; 1170 1171 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */ 1172 getnanotime(&ts); 1173 /* TAI64 label in external TAI64 format */ 1174 be32enc(timestamp, 0x40000000U + (uint32_t)(ts.tv_sec >> 32)); 1175 /* second beginning from 1970 TAI */ 1176 be32enc(timestamp + 4, (uint32_t)(ts.tv_sec & 0xffffffffU)); 1177 /* nanosecond in big-endian format */ 1178 be32enc(timestamp + 8, (uint32_t)ts.tv_nsec); 1179 } 1180 1181 /* 1182 * wg_get_stable_session(wgp, psref) 1183 * 1184 * Get a passive reference to the current stable session, or 1185 * return NULL if there is no current stable session. 1186 * 1187 * The pointer is always there but the session is not necessarily 1188 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However, 1189 * the session may transition from ESTABLISHED to DESTROYING while 1190 * holding the passive reference. 1191 */ 1192 static struct wg_session * 1193 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref) 1194 { 1195 int s; 1196 struct wg_session *wgs; 1197 1198 s = pserialize_read_enter(); 1199 wgs = atomic_load_consume(&wgp->wgp_session_stable); 1200 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED)) 1201 wgs = NULL; 1202 else 1203 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 1204 pserialize_read_exit(s); 1205 1206 return wgs; 1207 } 1208 1209 static void 1210 wg_put_session(struct wg_session *wgs, struct psref *psref) 1211 { 1212 1213 psref_release(psref, &wgs->wgs_psref, wg_psref_class); 1214 } 1215 1216 static void 1217 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs) 1218 { 1219 struct wg_peer *wgp = wgs->wgs_peer; 1220 struct wg_session *wgs0 __diagused; 1221 void *garbage; 1222 1223 KASSERT(mutex_owned(wgp->wgp_lock)); 1224 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1225 1226 /* Remove the session from the table. */ 1227 wgs0 = thmap_del(wg->wg_sessions_byindex, 1228 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index)); 1229 KASSERT(wgs0 == wgs); 1230 garbage = thmap_stage_gc(wg->wg_sessions_byindex); 1231 1232 /* Wait for passive references to drain. */ 1233 pserialize_perform(wgp->wgp_psz); 1234 psref_target_destroy(&wgs->wgs_psref, wg_psref_class); 1235 1236 /* Free memory, zero state, and transition to UNKNOWN. */ 1237 thmap_gc(wg->wg_sessions_byindex, garbage); 1238 wg_clear_states(wgs); 1239 wgs->wgs_state = WGS_STATE_UNKNOWN; 1240 } 1241 1242 /* 1243 * wg_get_session_index(wg, wgs) 1244 * 1245 * Choose a session index for wgs->wgs_local_index, and store it 1246 * in wg's table of sessions by index. 1247 * 1248 * wgs must be the unstable session of its peer, and must be 1249 * transitioning out of the UNKNOWN state. 1250 */ 1251 static void 1252 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs) 1253 { 1254 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1255 struct wg_session *wgs0; 1256 uint32_t index; 1257 1258 KASSERT(mutex_owned(wgp->wgp_lock)); 1259 KASSERT(wgs == wgp->wgp_session_unstable); 1260 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN); 1261 1262 do { 1263 /* Pick a uniform random index. */ 1264 index = cprng_strong32(); 1265 1266 /* Try to take it. */ 1267 wgs->wgs_local_index = index; 1268 wgs0 = thmap_put(wg->wg_sessions_byindex, 1269 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs); 1270 1271 /* If someone else beat us, start over. */ 1272 } while (__predict_false(wgs0 != wgs)); 1273 } 1274 1275 /* 1276 * wg_put_session_index(wg, wgs) 1277 * 1278 * Remove wgs from the table of sessions by index, wait for any 1279 * passive references to drain, and transition the session to the 1280 * UNKNOWN state. 1281 * 1282 * wgs must be the unstable session of its peer, and must not be 1283 * UNKNOWN or ESTABLISHED. 1284 */ 1285 static void 1286 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs) 1287 { 1288 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1289 1290 KASSERT(mutex_owned(wgp->wgp_lock)); 1291 KASSERT(wgs == wgp->wgp_session_unstable); 1292 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1293 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED); 1294 1295 wg_destroy_session(wg, wgs); 1296 psref_target_init(&wgs->wgs_psref, wg_psref_class); 1297 } 1298 1299 /* 1300 * Handshake patterns 1301 * 1302 * [W] 5: "These messages use the "IK" pattern from Noise" 1303 * [N] 7.5. Interactive handshake patterns (fundamental) 1304 * "The first character refers to the initiator’s static key:" 1305 * "I = Static key for initiator Immediately transmitted to responder, 1306 * despite reduced or absent identity hiding" 1307 * "The second character refers to the responder’s static key:" 1308 * "K = Static key for responder Known to initiator" 1309 * "IK: 1310 * <- s 1311 * ... 1312 * -> e, es, s, ss 1313 * <- e, ee, se" 1314 * [N] 9.4. Pattern modifiers 1315 * "IKpsk2: 1316 * <- s 1317 * ... 1318 * -> e, es, s, ss 1319 * <- e, ee, se, psk" 1320 */ 1321 static void 1322 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp, 1323 struct wg_session *wgs, struct wg_msg_init *wgmi) 1324 { 1325 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1326 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1327 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1328 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1329 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1330 1331 KASSERT(mutex_owned(wgp->wgp_lock)); 1332 KASSERT(wgs == wgp->wgp_session_unstable); 1333 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1334 1335 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT); 1336 wgmi->wgmi_sender = wgs->wgs_local_index; 1337 1338 /* [W] 5.4.2: First Message: Initiator to Responder */ 1339 1340 /* Ci := HASH(CONSTRUCTION) */ 1341 /* Hi := HASH(Ci || IDENTIFIER) */ 1342 wg_init_key_and_hash(ckey, hash); 1343 /* Hi := HASH(Hi || Sr^pub) */ 1344 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)); 1345 1346 WG_DUMP_HASH("hash", hash); 1347 1348 /* [N] 2.2: "e" */ 1349 /* Ei^priv, Ei^pub := DH-GENERATE() */ 1350 wg_algo_generate_keypair(pubkey, privkey); 1351 /* Ci := KDF1(Ci, Ei^pub) */ 1352 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1353 /* msg.ephemeral := Ei^pub */ 1354 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral)); 1355 /* Hi := HASH(Hi || msg.ephemeral) */ 1356 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1357 1358 WG_DUMP_HASH("ckey", ckey); 1359 WG_DUMP_HASH("hash", hash); 1360 1361 /* [N] 2.2: "es" */ 1362 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1363 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey); 1364 1365 /* [N] 2.2: "s" */ 1366 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1367 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static), 1368 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey), 1369 hash, sizeof(hash)); 1370 /* Hi := HASH(Hi || msg.static) */ 1371 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1372 1373 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1374 1375 /* [N] 2.2: "ss" */ 1376 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1377 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1378 1379 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1380 wg_timestamp_t timestamp; 1381 wg_algo_tai64n(timestamp); 1382 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1383 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash)); 1384 /* Hi := HASH(Hi || msg.timestamp) */ 1385 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1386 1387 /* [W] 5.4.4 Cookie MACs */ 1388 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1), 1389 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1390 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1391 /* Need mac1 to decrypt a cookie from a cookie message */ 1392 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1, 1393 sizeof(wgp->wgp_last_sent_mac1)); 1394 wgp->wgp_last_sent_mac1_valid = true; 1395 1396 if (wgp->wgp_latest_cookie_time == 0 || 1397 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1398 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2)); 1399 else { 1400 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2), 1401 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1402 (const uint8_t *)wgmi, 1403 offsetof(struct wg_msg_init, wgmi_mac2), 1404 NULL, 0); 1405 } 1406 1407 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1408 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1409 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1410 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1411 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index); 1412 } 1413 1414 static void __noinline 1415 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi, 1416 const struct sockaddr *src) 1417 { 1418 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1419 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1420 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1421 uint8_t peer_pubkey[WG_STATIC_KEY_LEN]; 1422 struct wg_peer *wgp; 1423 struct wg_session *wgs; 1424 int error, ret; 1425 struct psref psref_peer; 1426 uint8_t mac1[WG_MAC_LEN]; 1427 1428 WG_TRACE("init msg received"); 1429 1430 wg_algo_mac_mac1(mac1, sizeof(mac1), 1431 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1432 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1433 1434 /* 1435 * [W] 5.3: Denial of Service Mitigation & Cookies 1436 * "the responder, ..., must always reject messages with an invalid 1437 * msg.mac1" 1438 */ 1439 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) { 1440 WG_DLOG("mac1 is invalid\n"); 1441 return; 1442 } 1443 1444 /* 1445 * [W] 5.4.2: First Message: Initiator to Responder 1446 * "When the responder receives this message, it does the same 1447 * operations so that its final state variables are identical, 1448 * replacing the operands of the DH function to produce equivalent 1449 * values." 1450 * Note that the following comments of operations are just copies of 1451 * the initiator's ones. 1452 */ 1453 1454 /* Ci := HASH(CONSTRUCTION) */ 1455 /* Hi := HASH(Ci || IDENTIFIER) */ 1456 wg_init_key_and_hash(ckey, hash); 1457 /* Hi := HASH(Hi || Sr^pub) */ 1458 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey)); 1459 1460 /* [N] 2.2: "e" */ 1461 /* Ci := KDF1(Ci, Ei^pub) */ 1462 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral, 1463 sizeof(wgmi->wgmi_ephemeral)); 1464 /* Hi := HASH(Hi || msg.ephemeral) */ 1465 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral)); 1466 1467 WG_DUMP_HASH("ckey", ckey); 1468 1469 /* [N] 2.2: "es" */ 1470 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1471 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral); 1472 1473 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1474 1475 /* [N] 2.2: "s" */ 1476 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1477 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0, 1478 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash)); 1479 if (error != 0) { 1480 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 1481 "%s: wg_algo_aead_dec for secret key failed\n", 1482 if_name(&wg->wg_if)); 1483 return; 1484 } 1485 /* Hi := HASH(Hi || msg.static) */ 1486 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1487 1488 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer); 1489 if (wgp == NULL) { 1490 WG_DLOG("peer not found\n"); 1491 return; 1492 } 1493 1494 /* 1495 * Lock the peer to serialize access to cookie state. 1496 * 1497 * XXX Can we safely avoid holding the lock across DH? Take it 1498 * just to verify mac2 and then unlock/DH/lock? 1499 */ 1500 mutex_enter(wgp->wgp_lock); 1501 1502 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) { 1503 WG_TRACE("under load"); 1504 /* 1505 * [W] 5.3: Denial of Service Mitigation & Cookies 1506 * "the responder, ..., and when under load may reject messages 1507 * with an invalid msg.mac2. If the responder receives a 1508 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1509 * and is under load, it may respond with a cookie reply 1510 * message" 1511 */ 1512 uint8_t zero[WG_MAC_LEN] = {0}; 1513 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) { 1514 WG_TRACE("sending a cookie message: no cookie included"); 1515 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1516 wgmi->wgmi_mac1, src); 1517 goto out; 1518 } 1519 if (!wgp->wgp_last_sent_cookie_valid) { 1520 WG_TRACE("sending a cookie message: no cookie sent ever"); 1521 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1522 wgmi->wgmi_mac1, src); 1523 goto out; 1524 } 1525 uint8_t mac2[WG_MAC_LEN]; 1526 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1527 WG_COOKIE_LEN, (const uint8_t *)wgmi, 1528 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0); 1529 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) { 1530 WG_DLOG("mac2 is invalid\n"); 1531 goto out; 1532 } 1533 WG_TRACE("under load, but continue to sending"); 1534 } 1535 1536 /* [N] 2.2: "ss" */ 1537 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1538 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1539 1540 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1541 wg_timestamp_t timestamp; 1542 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0, 1543 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1544 hash, sizeof(hash)); 1545 if (error != 0) { 1546 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1547 "%s: peer %s: wg_algo_aead_dec for timestamp failed\n", 1548 if_name(&wg->wg_if), wgp->wgp_name); 1549 goto out; 1550 } 1551 /* Hi := HASH(Hi || msg.timestamp) */ 1552 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1553 1554 /* 1555 * [W] 5.1 "The responder keeps track of the greatest timestamp 1556 * received per peer and discards packets containing 1557 * timestamps less than or equal to it." 1558 */ 1559 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init, 1560 sizeof(timestamp)); 1561 if (ret <= 0) { 1562 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1563 "%s: peer %s: invalid init msg: timestamp is old\n", 1564 if_name(&wg->wg_if), wgp->wgp_name); 1565 goto out; 1566 } 1567 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp)); 1568 1569 /* 1570 * Message is good -- we're committing to handle it now, unless 1571 * we were already initiating a session. 1572 */ 1573 wgs = wgp->wgp_session_unstable; 1574 switch (wgs->wgs_state) { 1575 case WGS_STATE_UNKNOWN: /* new session initiated by peer */ 1576 wg_get_session_index(wg, wgs); 1577 break; 1578 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */ 1579 WG_TRACE("Session already initializing, ignoring the message"); 1580 goto out; 1581 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */ 1582 WG_TRACE("Session already initializing, destroying old states"); 1583 wg_clear_states(wgs); 1584 /* keep session index */ 1585 break; 1586 case WGS_STATE_ESTABLISHED: /* can't happen */ 1587 panic("unstable session can't be established"); 1588 break; 1589 case WGS_STATE_DESTROYING: /* rekey initiated by peer */ 1590 WG_TRACE("Session destroying, but force to clear"); 1591 callout_stop(&wgp->wgp_session_dtor_timer); 1592 wg_clear_states(wgs); 1593 /* keep session index */ 1594 break; 1595 default: 1596 panic("invalid session state: %d", wgs->wgs_state); 1597 } 1598 wgs->wgs_state = WGS_STATE_INIT_PASSIVE; 1599 1600 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1601 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1602 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral, 1603 sizeof(wgmi->wgmi_ephemeral)); 1604 1605 wg_update_endpoint_if_necessary(wgp, src); 1606 1607 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi); 1608 1609 wg_calculate_keys(wgs, false); 1610 wg_clear_states(wgs); 1611 1612 out: 1613 mutex_exit(wgp->wgp_lock); 1614 wg_put_peer(wgp, &psref_peer); 1615 } 1616 1617 static struct socket * 1618 wg_get_so_by_af(struct wg_softc *wg, const int af) 1619 { 1620 1621 switch (af) { 1622 #ifdef INET 1623 case AF_INET: 1624 return wg->wg_so4; 1625 #endif 1626 #ifdef INET6 1627 case AF_INET6: 1628 return wg->wg_so6; 1629 #endif 1630 default: 1631 panic("wg: no such af: %d", af); 1632 } 1633 } 1634 1635 static struct socket * 1636 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa) 1637 { 1638 1639 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa)); 1640 } 1641 1642 static struct wg_sockaddr * 1643 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref) 1644 { 1645 struct wg_sockaddr *wgsa; 1646 int s; 1647 1648 s = pserialize_read_enter(); 1649 wgsa = atomic_load_consume(&wgp->wgp_endpoint); 1650 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class); 1651 pserialize_read_exit(s); 1652 1653 return wgsa; 1654 } 1655 1656 static void 1657 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref) 1658 { 1659 1660 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class); 1661 } 1662 1663 static int 1664 wg_send_so(struct wg_peer *wgp, struct mbuf *m) 1665 { 1666 int error; 1667 struct socket *so; 1668 struct psref psref; 1669 struct wg_sockaddr *wgsa; 1670 1671 wgsa = wg_get_endpoint_sa(wgp, &psref); 1672 so = wg_get_so_by_peer(wgp, wgsa); 1673 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp); 1674 wg_put_sa(wgp, wgsa, &psref); 1675 1676 return error; 1677 } 1678 1679 static int 1680 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp) 1681 { 1682 int error; 1683 struct mbuf *m; 1684 struct wg_msg_init *wgmi; 1685 struct wg_session *wgs; 1686 1687 KASSERT(mutex_owned(wgp->wgp_lock)); 1688 1689 wgs = wgp->wgp_session_unstable; 1690 /* XXX pull dispatch out into wg_task_send_init_message */ 1691 switch (wgs->wgs_state) { 1692 case WGS_STATE_UNKNOWN: /* new session initiated by us */ 1693 wg_get_session_index(wg, wgs); 1694 break; 1695 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */ 1696 WG_TRACE("Session already initializing, skip starting new one"); 1697 return EBUSY; 1698 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */ 1699 WG_TRACE("Session already initializing, destroying old states"); 1700 wg_clear_states(wgs); 1701 /* keep session index */ 1702 break; 1703 case WGS_STATE_ESTABLISHED: /* can't happen */ 1704 panic("unstable session can't be established"); 1705 break; 1706 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */ 1707 WG_TRACE("Session destroying"); 1708 /* XXX should wait? */ 1709 return EBUSY; 1710 } 1711 wgs->wgs_state = WGS_STATE_INIT_ACTIVE; 1712 1713 m = m_gethdr(M_WAIT, MT_DATA); 1714 if (sizeof(*wgmi) > MHLEN) { 1715 m_clget(m, M_WAIT); 1716 CTASSERT(sizeof(*wgmi) <= MCLBYTES); 1717 } 1718 m->m_pkthdr.len = m->m_len = sizeof(*wgmi); 1719 wgmi = mtod(m, struct wg_msg_init *); 1720 wg_fill_msg_init(wg, wgp, wgs, wgmi); 1721 1722 error = wg->wg_ops->send_hs_msg(wgp, m); 1723 if (error == 0) { 1724 WG_TRACE("init msg sent"); 1725 1726 if (wgp->wgp_handshake_start_time == 0) 1727 wgp->wgp_handshake_start_time = time_uptime; 1728 callout_schedule(&wgp->wgp_handshake_timeout_timer, 1729 MIN(wg_rekey_timeout, (unsigned)(INT_MAX / hz)) * hz); 1730 } else { 1731 wg_put_session_index(wg, wgs); 1732 /* Initiation failed; toss packet waiting for it if any. */ 1733 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 1734 m_freem(m); 1735 } 1736 1737 return error; 1738 } 1739 1740 static void 1741 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 1742 struct wg_session *wgs, struct wg_msg_resp *wgmr, 1743 const struct wg_msg_init *wgmi) 1744 { 1745 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1746 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */ 1747 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1748 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1749 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1750 1751 KASSERT(mutex_owned(wgp->wgp_lock)); 1752 KASSERT(wgs == wgp->wgp_session_unstable); 1753 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 1754 1755 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1756 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1757 1758 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP); 1759 wgmr->wgmr_sender = wgs->wgs_local_index; 1760 wgmr->wgmr_receiver = wgmi->wgmi_sender; 1761 1762 /* [W] 5.4.3 Second Message: Responder to Initiator */ 1763 1764 /* [N] 2.2: "e" */ 1765 /* Er^priv, Er^pub := DH-GENERATE() */ 1766 wg_algo_generate_keypair(pubkey, privkey); 1767 /* Cr := KDF1(Cr, Er^pub) */ 1768 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1769 /* msg.ephemeral := Er^pub */ 1770 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral)); 1771 /* Hr := HASH(Hr || msg.ephemeral) */ 1772 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1773 1774 WG_DUMP_HASH("ckey", ckey); 1775 WG_DUMP_HASH("hash", hash); 1776 1777 /* [N] 2.2: "ee" */ 1778 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1779 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer); 1780 1781 /* [N] 2.2: "se" */ 1782 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1783 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey); 1784 1785 /* [N] 9.2: "psk" */ 1786 { 1787 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1788 /* Cr, r, k := KDF3(Cr, Q) */ 1789 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1790 sizeof(wgp->wgp_psk)); 1791 /* Hr := HASH(Hr || r) */ 1792 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1793 } 1794 1795 /* msg.empty := AEAD(k, 0, e, Hr) */ 1796 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty), 1797 cipher_key, 0, NULL, 0, hash, sizeof(hash)); 1798 /* Hr := HASH(Hr || msg.empty) */ 1799 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1800 1801 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1802 1803 /* [W] 5.4.4: Cookie MACs */ 1804 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */ 1805 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1), 1806 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1807 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1808 /* Need mac1 to decrypt a cookie from a cookie message */ 1809 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1, 1810 sizeof(wgp->wgp_last_sent_mac1)); 1811 wgp->wgp_last_sent_mac1_valid = true; 1812 1813 if (wgp->wgp_latest_cookie_time == 0 || 1814 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1815 /* msg.mac2 := 0^16 */ 1816 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2)); 1817 else { 1818 /* msg.mac2 := MAC(Lm, msg_b) */ 1819 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2), 1820 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1821 (const uint8_t *)wgmr, 1822 offsetof(struct wg_msg_resp, wgmr_mac2), 1823 NULL, 0); 1824 } 1825 1826 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1827 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1828 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1829 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1830 wgs->wgs_remote_index = wgmi->wgmi_sender; 1831 WG_DLOG("sender=%x\n", wgs->wgs_local_index); 1832 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1833 } 1834 1835 static void 1836 wg_swap_sessions(struct wg_peer *wgp) 1837 { 1838 struct wg_session *wgs, *wgs_prev; 1839 1840 KASSERT(mutex_owned(wgp->wgp_lock)); 1841 1842 wgs = wgp->wgp_session_unstable; 1843 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED); 1844 1845 wgs_prev = wgp->wgp_session_stable; 1846 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED || 1847 wgs_prev->wgs_state == WGS_STATE_UNKNOWN); 1848 atomic_store_release(&wgp->wgp_session_stable, wgs); 1849 wgp->wgp_session_unstable = wgs_prev; 1850 } 1851 1852 static void __noinline 1853 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr, 1854 const struct sockaddr *src) 1855 { 1856 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1857 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */ 1858 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1859 struct wg_peer *wgp; 1860 struct wg_session *wgs; 1861 struct psref psref; 1862 int error; 1863 uint8_t mac1[WG_MAC_LEN]; 1864 struct wg_session *wgs_prev; 1865 struct mbuf *m; 1866 1867 wg_algo_mac_mac1(mac1, sizeof(mac1), 1868 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1869 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1870 1871 /* 1872 * [W] 5.3: Denial of Service Mitigation & Cookies 1873 * "the responder, ..., must always reject messages with an invalid 1874 * msg.mac1" 1875 */ 1876 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) { 1877 WG_DLOG("mac1 is invalid\n"); 1878 return; 1879 } 1880 1881 WG_TRACE("resp msg received"); 1882 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref); 1883 if (wgs == NULL) { 1884 WG_TRACE("No session found"); 1885 return; 1886 } 1887 1888 wgp = wgs->wgs_peer; 1889 1890 mutex_enter(wgp->wgp_lock); 1891 1892 /* If we weren't waiting for a handshake response, drop it. */ 1893 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) { 1894 WG_TRACE("peer sent spurious handshake response, ignoring"); 1895 goto out; 1896 } 1897 1898 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) { 1899 WG_TRACE("under load"); 1900 /* 1901 * [W] 5.3: Denial of Service Mitigation & Cookies 1902 * "the responder, ..., and when under load may reject messages 1903 * with an invalid msg.mac2. If the responder receives a 1904 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1905 * and is under load, it may respond with a cookie reply 1906 * message" 1907 */ 1908 uint8_t zero[WG_MAC_LEN] = {0}; 1909 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) { 1910 WG_TRACE("sending a cookie message: no cookie included"); 1911 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1912 wgmr->wgmr_mac1, src); 1913 goto out; 1914 } 1915 if (!wgp->wgp_last_sent_cookie_valid) { 1916 WG_TRACE("sending a cookie message: no cookie sent ever"); 1917 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1918 wgmr->wgmr_mac1, src); 1919 goto out; 1920 } 1921 uint8_t mac2[WG_MAC_LEN]; 1922 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1923 WG_COOKIE_LEN, (const uint8_t *)wgmr, 1924 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0); 1925 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) { 1926 WG_DLOG("mac2 is invalid\n"); 1927 goto out; 1928 } 1929 WG_TRACE("under load, but continue to sending"); 1930 } 1931 1932 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1933 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1934 1935 /* 1936 * [W] 5.4.3 Second Message: Responder to Initiator 1937 * "When the initiator receives this message, it does the same 1938 * operations so that its final state variables are identical, 1939 * replacing the operands of the DH function to produce equivalent 1940 * values." 1941 * Note that the following comments of operations are just copies of 1942 * the initiator's ones. 1943 */ 1944 1945 /* [N] 2.2: "e" */ 1946 /* Cr := KDF1(Cr, Er^pub) */ 1947 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral, 1948 sizeof(wgmr->wgmr_ephemeral)); 1949 /* Hr := HASH(Hr || msg.ephemeral) */ 1950 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral)); 1951 1952 WG_DUMP_HASH("ckey", ckey); 1953 WG_DUMP_HASH("hash", hash); 1954 1955 /* [N] 2.2: "ee" */ 1956 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1957 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv, 1958 wgmr->wgmr_ephemeral); 1959 1960 /* [N] 2.2: "se" */ 1961 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1962 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral); 1963 1964 /* [N] 9.2: "psk" */ 1965 { 1966 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1967 /* Cr, r, k := KDF3(Cr, Q) */ 1968 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1969 sizeof(wgp->wgp_psk)); 1970 /* Hr := HASH(Hr || r) */ 1971 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1972 } 1973 1974 { 1975 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */ 1976 /* msg.empty := AEAD(k, 0, e, Hr) */ 1977 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty, 1978 sizeof(wgmr->wgmr_empty), hash, sizeof(hash)); 1979 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1980 if (error != 0) { 1981 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1982 "%s: peer %s: wg_algo_aead_dec for empty message failed\n", 1983 if_name(&wg->wg_if), wgp->wgp_name); 1984 goto out; 1985 } 1986 /* Hr := HASH(Hr || msg.empty) */ 1987 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1988 } 1989 1990 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash)); 1991 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key)); 1992 wgs->wgs_remote_index = wgmr->wgmr_sender; 1993 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1994 1995 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1996 wgs->wgs_state = WGS_STATE_ESTABLISHED; 1997 wgs->wgs_time_established = time_uptime; 1998 wgs->wgs_time_last_data_sent = 0; 1999 wgs->wgs_is_initiator = true; 2000 wg_calculate_keys(wgs, true); 2001 wg_clear_states(wgs); 2002 WG_TRACE("WGS_STATE_ESTABLISHED"); 2003 2004 callout_stop(&wgp->wgp_handshake_timeout_timer); 2005 2006 wg_swap_sessions(wgp); 2007 KASSERT(wgs == wgp->wgp_session_stable); 2008 wgs_prev = wgp->wgp_session_unstable; 2009 getnanotime(&wgp->wgp_last_handshake_time); 2010 wgp->wgp_handshake_start_time = 0; 2011 wgp->wgp_last_sent_mac1_valid = false; 2012 wgp->wgp_last_sent_cookie_valid = false; 2013 2014 wg_schedule_rekey_timer(wgp); 2015 2016 wg_update_endpoint_if_necessary(wgp, src); 2017 2018 /* 2019 * If we had a data packet queued up, send it; otherwise send a 2020 * keepalive message -- either way we have to send something 2021 * immediately or else the responder will never answer. 2022 */ 2023 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 2024 kpreempt_disable(); 2025 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 2026 M_SETCTX(m, wgp); 2027 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 2028 WGLOG(LOG_ERR, "%s: pktq full, dropping\n", 2029 if_name(&wg->wg_if)); 2030 m_freem(m); 2031 } 2032 kpreempt_enable(); 2033 } else { 2034 wg_send_keepalive_msg(wgp, wgs); 2035 } 2036 2037 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 2038 /* Wait for wg_get_stable_session to drain. */ 2039 pserialize_perform(wgp->wgp_psz); 2040 2041 /* Transition ESTABLISHED->DESTROYING. */ 2042 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 2043 2044 /* We can't destroy the old session immediately */ 2045 wg_schedule_session_dtor_timer(wgp); 2046 } else { 2047 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 2048 "state=%d", wgs_prev->wgs_state); 2049 } 2050 2051 out: 2052 mutex_exit(wgp->wgp_lock); 2053 wg_put_session(wgs, &psref); 2054 } 2055 2056 static int 2057 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 2058 struct wg_session *wgs, const struct wg_msg_init *wgmi) 2059 { 2060 int error; 2061 struct mbuf *m; 2062 struct wg_msg_resp *wgmr; 2063 2064 KASSERT(mutex_owned(wgp->wgp_lock)); 2065 KASSERT(wgs == wgp->wgp_session_unstable); 2066 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 2067 2068 m = m_gethdr(M_WAIT, MT_DATA); 2069 if (sizeof(*wgmr) > MHLEN) { 2070 m_clget(m, M_WAIT); 2071 CTASSERT(sizeof(*wgmr) <= MCLBYTES); 2072 } 2073 m->m_pkthdr.len = m->m_len = sizeof(*wgmr); 2074 wgmr = mtod(m, struct wg_msg_resp *); 2075 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi); 2076 2077 error = wg->wg_ops->send_hs_msg(wgp, m); 2078 if (error == 0) 2079 WG_TRACE("resp msg sent"); 2080 return error; 2081 } 2082 2083 static struct wg_peer * 2084 wg_lookup_peer_by_pubkey(struct wg_softc *wg, 2085 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref) 2086 { 2087 struct wg_peer *wgp; 2088 2089 int s = pserialize_read_enter(); 2090 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN); 2091 if (wgp != NULL) 2092 wg_get_peer(wgp, psref); 2093 pserialize_read_exit(s); 2094 2095 return wgp; 2096 } 2097 2098 static void 2099 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp, 2100 struct wg_msg_cookie *wgmc, const uint32_t sender, 2101 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src) 2102 { 2103 uint8_t cookie[WG_COOKIE_LEN]; 2104 uint8_t key[WG_HASH_LEN]; 2105 uint8_t addr[sizeof(struct in6_addr)]; 2106 size_t addrlen; 2107 uint16_t uh_sport; /* be */ 2108 2109 KASSERT(mutex_owned(wgp->wgp_lock)); 2110 2111 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE); 2112 wgmc->wgmc_receiver = sender; 2113 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt)); 2114 2115 /* 2116 * [W] 5.4.7: Under Load: Cookie Reply Message 2117 * "The secret variable, Rm, changes every two minutes to a 2118 * random value" 2119 */ 2120 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) { 2121 wgp->wgp_randval = cprng_strong32(); 2122 wgp->wgp_last_genrandval_time = time_uptime; 2123 } 2124 2125 switch (src->sa_family) { 2126 case AF_INET: { 2127 const struct sockaddr_in *sin = satocsin(src); 2128 addrlen = sizeof(sin->sin_addr); 2129 memcpy(addr, &sin->sin_addr, addrlen); 2130 uh_sport = sin->sin_port; 2131 break; 2132 } 2133 #ifdef INET6 2134 case AF_INET6: { 2135 const struct sockaddr_in6 *sin6 = satocsin6(src); 2136 addrlen = sizeof(sin6->sin6_addr); 2137 memcpy(addr, &sin6->sin6_addr, addrlen); 2138 uh_sport = sin6->sin6_port; 2139 break; 2140 } 2141 #endif 2142 default: 2143 panic("invalid af=%d", src->sa_family); 2144 } 2145 2146 wg_algo_mac(cookie, sizeof(cookie), 2147 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval), 2148 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport)); 2149 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey, 2150 sizeof(wg->wg_pubkey)); 2151 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key, 2152 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt); 2153 2154 /* Need to store to calculate mac2 */ 2155 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie)); 2156 wgp->wgp_last_sent_cookie_valid = true; 2157 } 2158 2159 static int 2160 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp, 2161 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN], 2162 const struct sockaddr *src) 2163 { 2164 int error; 2165 struct mbuf *m; 2166 struct wg_msg_cookie *wgmc; 2167 2168 KASSERT(mutex_owned(wgp->wgp_lock)); 2169 2170 m = m_gethdr(M_WAIT, MT_DATA); 2171 if (sizeof(*wgmc) > MHLEN) { 2172 m_clget(m, M_WAIT); 2173 CTASSERT(sizeof(*wgmc) <= MCLBYTES); 2174 } 2175 m->m_pkthdr.len = m->m_len = sizeof(*wgmc); 2176 wgmc = mtod(m, struct wg_msg_cookie *); 2177 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src); 2178 2179 error = wg->wg_ops->send_hs_msg(wgp, m); 2180 if (error == 0) 2181 WG_TRACE("cookie msg sent"); 2182 return error; 2183 } 2184 2185 static bool 2186 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype) 2187 { 2188 #ifdef WG_DEBUG_PARAMS 2189 if (wg_force_underload) 2190 return true; 2191 #endif 2192 2193 /* 2194 * XXX we don't have a means of a load estimation. The purpose of 2195 * the mechanism is a DoS mitigation, so we consider frequent handshake 2196 * messages as (a kind of) load; if a message of the same type comes 2197 * to a peer within 1 second, we consider we are under load. 2198 */ 2199 time_t last = wgp->wgp_last_msg_received_time[msgtype]; 2200 wgp->wgp_last_msg_received_time[msgtype] = time_uptime; 2201 return (time_uptime - last) == 0; 2202 } 2203 2204 static void 2205 wg_calculate_keys(struct wg_session *wgs, const bool initiator) 2206 { 2207 2208 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2209 2210 /* 2211 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e) 2212 */ 2213 if (initiator) { 2214 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL, 2215 wgs->wgs_chaining_key, NULL, 0); 2216 } else { 2217 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL, 2218 wgs->wgs_chaining_key, NULL, 0); 2219 } 2220 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send); 2221 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv); 2222 } 2223 2224 static uint64_t 2225 wg_session_get_send_counter(struct wg_session *wgs) 2226 { 2227 #ifdef __HAVE_ATOMIC64_LOADSTORE 2228 return atomic_load_relaxed(&wgs->wgs_send_counter); 2229 #else 2230 uint64_t send_counter; 2231 2232 mutex_enter(&wgs->wgs_send_counter_lock); 2233 send_counter = wgs->wgs_send_counter; 2234 mutex_exit(&wgs->wgs_send_counter_lock); 2235 2236 return send_counter; 2237 #endif 2238 } 2239 2240 static uint64_t 2241 wg_session_inc_send_counter(struct wg_session *wgs) 2242 { 2243 #ifdef __HAVE_ATOMIC64_LOADSTORE 2244 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1; 2245 #else 2246 uint64_t send_counter; 2247 2248 mutex_enter(&wgs->wgs_send_counter_lock); 2249 send_counter = wgs->wgs_send_counter++; 2250 mutex_exit(&wgs->wgs_send_counter_lock); 2251 2252 return send_counter; 2253 #endif 2254 } 2255 2256 static void 2257 wg_clear_states(struct wg_session *wgs) 2258 { 2259 2260 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2261 2262 wgs->wgs_send_counter = 0; 2263 sliwin_reset(&wgs->wgs_recvwin->window); 2264 2265 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v)) 2266 wgs_clear(handshake_hash); 2267 wgs_clear(chaining_key); 2268 wgs_clear(ephemeral_key_pub); 2269 wgs_clear(ephemeral_key_priv); 2270 wgs_clear(ephemeral_key_peer); 2271 #undef wgs_clear 2272 } 2273 2274 static struct wg_session * 2275 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index, 2276 struct psref *psref) 2277 { 2278 struct wg_session *wgs; 2279 2280 int s = pserialize_read_enter(); 2281 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index); 2282 if (wgs != NULL) { 2283 KASSERT(atomic_load_relaxed(&wgs->wgs_state) != 2284 WGS_STATE_UNKNOWN); 2285 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 2286 } 2287 pserialize_read_exit(s); 2288 2289 return wgs; 2290 } 2291 2292 static void 2293 wg_schedule_rekey_timer(struct wg_peer *wgp) 2294 { 2295 int timeout = MIN(wg_rekey_after_time, (unsigned)(INT_MAX / hz)); 2296 2297 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz); 2298 } 2299 2300 static void 2301 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs) 2302 { 2303 struct mbuf *m; 2304 2305 /* 2306 * [W] 6.5 Passive Keepalive 2307 * "A keepalive message is simply a transport data message with 2308 * a zero-length encapsulated encrypted inner-packet." 2309 */ 2310 m = m_gethdr(M_WAIT, MT_DATA); 2311 wg_send_data_msg(wgp, wgs, m); 2312 } 2313 2314 static bool 2315 wg_need_to_send_init_message(struct wg_session *wgs) 2316 { 2317 /* 2318 * [W] 6.2 Transport Message Limits 2319 * "if a peer is the initiator of a current secure session, 2320 * WireGuard will send a handshake initiation message to begin 2321 * a new secure session ... if after receiving a transport data 2322 * message, the current secure session is (REJECT-AFTER-TIME − 2323 * KEEPALIVE-TIMEOUT − REKEY-TIMEOUT) seconds old and it has 2324 * not yet acted upon this event." 2325 */ 2326 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 && 2327 (time_uptime - wgs->wgs_time_established) >= 2328 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout); 2329 } 2330 2331 static void 2332 wg_schedule_peer_task(struct wg_peer *wgp, unsigned int task) 2333 { 2334 2335 mutex_enter(wgp->wgp_intr_lock); 2336 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task); 2337 if (wgp->wgp_tasks == 0) 2338 /* 2339 * XXX If the current CPU is already loaded -- e.g., if 2340 * there's already a bunch of handshakes queued up -- 2341 * consider tossing this over to another CPU to 2342 * distribute the load. 2343 */ 2344 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL); 2345 wgp->wgp_tasks |= task; 2346 mutex_exit(wgp->wgp_intr_lock); 2347 } 2348 2349 static void 2350 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new) 2351 { 2352 struct wg_sockaddr *wgsa_prev; 2353 2354 WG_TRACE("Changing endpoint"); 2355 2356 memcpy(wgp->wgp_endpoint0, new, new->sa_len); 2357 wgsa_prev = wgp->wgp_endpoint; 2358 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0); 2359 wgp->wgp_endpoint0 = wgsa_prev; 2360 atomic_store_release(&wgp->wgp_endpoint_available, true); 2361 2362 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED); 2363 } 2364 2365 static bool 2366 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af) 2367 { 2368 uint16_t packet_len; 2369 const struct ip *ip; 2370 2371 if (__predict_false(decrypted_len < sizeof(struct ip))) 2372 return false; 2373 2374 ip = (const struct ip *)packet; 2375 if (ip->ip_v == 4) 2376 *af = AF_INET; 2377 else if (ip->ip_v == 6) 2378 *af = AF_INET6; 2379 else 2380 return false; 2381 2382 WG_DLOG("af=%d\n", *af); 2383 2384 switch (*af) { 2385 #ifdef INET 2386 case AF_INET: 2387 packet_len = ntohs(ip->ip_len); 2388 break; 2389 #endif 2390 #ifdef INET6 2391 case AF_INET6: { 2392 const struct ip6_hdr *ip6; 2393 2394 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr))) 2395 return false; 2396 2397 ip6 = (const struct ip6_hdr *)packet; 2398 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); 2399 break; 2400 } 2401 #endif 2402 default: 2403 return false; 2404 } 2405 2406 WG_DLOG("packet_len=%u\n", packet_len); 2407 if (packet_len > decrypted_len) 2408 return false; 2409 2410 return true; 2411 } 2412 2413 static bool 2414 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected, 2415 int af, char *packet) 2416 { 2417 struct sockaddr_storage ss; 2418 struct sockaddr *sa; 2419 struct psref psref; 2420 struct wg_peer *wgp; 2421 bool ok; 2422 2423 /* 2424 * II CRYPTOKEY ROUTING 2425 * "it will only accept it if its source IP resolves in the 2426 * table to the public key used in the secure session for 2427 * decrypting it." 2428 */ 2429 2430 if (af == AF_INET) { 2431 const struct ip *ip = (const struct ip *)packet; 2432 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 2433 sockaddr_in_init(sin, &ip->ip_src, 0); 2434 sa = sintosa(sin); 2435 #ifdef INET6 2436 } else { 2437 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet; 2438 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 2439 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0); 2440 sa = sin6tosa(sin6); 2441 #endif 2442 } 2443 2444 wgp = wg_pick_peer_by_sa(wg, sa, &psref); 2445 ok = (wgp == wgp_expected); 2446 if (wgp != NULL) 2447 wg_put_peer(wgp, &psref); 2448 2449 return ok; 2450 } 2451 2452 static void 2453 wg_session_dtor_timer(void *arg) 2454 { 2455 struct wg_peer *wgp = arg; 2456 2457 WG_TRACE("enter"); 2458 2459 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION); 2460 } 2461 2462 static void 2463 wg_schedule_session_dtor_timer(struct wg_peer *wgp) 2464 { 2465 2466 /* 1 second grace period */ 2467 callout_schedule(&wgp->wgp_session_dtor_timer, hz); 2468 } 2469 2470 static bool 2471 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2) 2472 { 2473 if (sa1->sa_family != sa2->sa_family) 2474 return false; 2475 2476 switch (sa1->sa_family) { 2477 #ifdef INET 2478 case AF_INET: 2479 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port; 2480 #endif 2481 #ifdef INET6 2482 case AF_INET6: 2483 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port; 2484 #endif 2485 default: 2486 return false; 2487 } 2488 } 2489 2490 static void 2491 wg_update_endpoint_if_necessary(struct wg_peer *wgp, 2492 const struct sockaddr *src) 2493 { 2494 struct wg_sockaddr *wgsa; 2495 struct psref psref; 2496 2497 wgsa = wg_get_endpoint_sa(wgp, &psref); 2498 2499 #ifdef WG_DEBUG_LOG 2500 char oldaddr[128], newaddr[128]; 2501 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr)); 2502 sockaddr_format(src, newaddr, sizeof(newaddr)); 2503 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr); 2504 #endif 2505 2506 /* 2507 * III: "Since the packet has authenticated correctly, the source IP of 2508 * the outer UDP/IP packet is used to update the endpoint for peer..." 2509 */ 2510 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 || 2511 !sockaddr_port_match(src, wgsatosa(wgsa)))) { 2512 /* XXX We can't change the endpoint twice in a short period */ 2513 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) { 2514 wg_change_endpoint(wgp, src); 2515 } 2516 } 2517 2518 wg_put_sa(wgp, wgsa, &psref); 2519 } 2520 2521 static void __noinline 2522 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m, 2523 const struct sockaddr *src) 2524 { 2525 struct wg_msg_data *wgmd; 2526 char *encrypted_buf = NULL, *decrypted_buf; 2527 size_t encrypted_len, decrypted_len; 2528 struct wg_session *wgs; 2529 struct wg_peer *wgp; 2530 int state; 2531 size_t mlen; 2532 struct psref psref; 2533 int error, af; 2534 bool success, free_encrypted_buf = false, ok; 2535 struct mbuf *n; 2536 2537 KASSERT(m->m_len >= sizeof(struct wg_msg_data)); 2538 wgmd = mtod(m, struct wg_msg_data *); 2539 2540 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA)); 2541 WG_TRACE("data"); 2542 2543 /* Find the putative session, or drop. */ 2544 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref); 2545 if (wgs == NULL) { 2546 WG_TRACE("No session found"); 2547 m_freem(m); 2548 return; 2549 } 2550 2551 /* 2552 * We are only ready to handle data when in INIT_PASSIVE, 2553 * ESTABLISHED, or DESTROYING. All transitions out of that 2554 * state dissociate the session index and drain psrefs. 2555 */ 2556 state = atomic_load_relaxed(&wgs->wgs_state); 2557 switch (state) { 2558 case WGS_STATE_UNKNOWN: 2559 panic("wg session %p in unknown state has session index %u", 2560 wgs, wgmd->wgmd_receiver); 2561 case WGS_STATE_INIT_ACTIVE: 2562 WG_TRACE("not yet ready for data"); 2563 goto out; 2564 case WGS_STATE_INIT_PASSIVE: 2565 case WGS_STATE_ESTABLISHED: 2566 case WGS_STATE_DESTROYING: 2567 break; 2568 } 2569 2570 /* 2571 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and 2572 * to update the endpoint if authentication succeeds. 2573 */ 2574 wgp = wgs->wgs_peer; 2575 2576 /* 2577 * Reject outrageously wrong sequence numbers before doing any 2578 * crypto work or taking any locks. 2579 */ 2580 error = sliwin_check_fast(&wgs->wgs_recvwin->window, 2581 le64toh(wgmd->wgmd_counter)); 2582 if (error) { 2583 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2584 "%s: peer %s: out-of-window packet: %"PRIu64"\n", 2585 if_name(&wg->wg_if), wgp->wgp_name, 2586 le64toh(wgmd->wgmd_counter)); 2587 goto out; 2588 } 2589 2590 /* Ensure the payload and authenticator are contiguous. */ 2591 mlen = m_length(m); 2592 encrypted_len = mlen - sizeof(*wgmd); 2593 if (encrypted_len < WG_AUTHTAG_LEN) { 2594 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len); 2595 goto out; 2596 } 2597 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len); 2598 if (success) { 2599 encrypted_buf = mtod(m, char *) + sizeof(*wgmd); 2600 } else { 2601 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP); 2602 if (encrypted_buf == NULL) { 2603 WG_DLOG("failed to allocate encrypted_buf\n"); 2604 goto out; 2605 } 2606 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf); 2607 free_encrypted_buf = true; 2608 } 2609 /* m_ensure_contig may change m regardless of its result */ 2610 KASSERT(m->m_len >= sizeof(*wgmd)); 2611 wgmd = mtod(m, struct wg_msg_data *); 2612 2613 /* 2614 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid 2615 * a zero-length buffer (XXX). Drop if plaintext is longer 2616 * than MCLBYTES (XXX). 2617 */ 2618 decrypted_len = encrypted_len - WG_AUTHTAG_LEN; 2619 if (decrypted_len > MCLBYTES) { 2620 /* FIXME handle larger data than MCLBYTES */ 2621 WG_DLOG("couldn't handle larger data than MCLBYTES\n"); 2622 goto out; 2623 } 2624 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN); 2625 if (n == NULL) { 2626 WG_DLOG("wg_get_mbuf failed\n"); 2627 goto out; 2628 } 2629 decrypted_buf = mtod(n, char *); 2630 2631 /* Decrypt and verify the packet. */ 2632 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len); 2633 error = wg_algo_aead_dec(decrypted_buf, 2634 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */, 2635 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf, 2636 encrypted_len, NULL, 0); 2637 if (error != 0) { 2638 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2639 "%s: peer %s: failed to wg_algo_aead_dec\n", 2640 if_name(&wg->wg_if), wgp->wgp_name); 2641 m_freem(n); 2642 goto out; 2643 } 2644 WG_DLOG("outsize=%u\n", (u_int)decrypted_len); 2645 2646 /* Packet is genuine. Reject it if a replay or just too old. */ 2647 mutex_enter(&wgs->wgs_recvwin->lock); 2648 error = sliwin_update(&wgs->wgs_recvwin->window, 2649 le64toh(wgmd->wgmd_counter)); 2650 mutex_exit(&wgs->wgs_recvwin->lock); 2651 if (error) { 2652 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2653 "%s: peer %s: replay or out-of-window packet: %"PRIu64"\n", 2654 if_name(&wg->wg_if), wgp->wgp_name, 2655 le64toh(wgmd->wgmd_counter)); 2656 m_freem(n); 2657 goto out; 2658 } 2659 2660 /* We're done with m now; free it and chuck the pointers. */ 2661 m_freem(m); 2662 m = NULL; 2663 wgmd = NULL; 2664 2665 /* 2666 * Validate the encapsulated packet header and get the address 2667 * family, or drop. 2668 */ 2669 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af); 2670 if (!ok) { 2671 m_freem(n); 2672 goto out; 2673 } 2674 2675 /* 2676 * The packet is genuine. Update the peer's endpoint if the 2677 * source address changed. 2678 * 2679 * XXX How to prevent DoS by replaying genuine packets from the 2680 * wrong source address? 2681 */ 2682 wg_update_endpoint_if_necessary(wgp, src); 2683 2684 /* Submit it into our network stack if routable. */ 2685 ok = wg_validate_route(wg, wgp, af, decrypted_buf); 2686 if (ok) { 2687 wg->wg_ops->input(&wg->wg_if, n, af); 2688 } else { 2689 char addrstr[INET6_ADDRSTRLEN]; 2690 memset(addrstr, 0, sizeof(addrstr)); 2691 if (af == AF_INET) { 2692 const struct ip *ip = (const struct ip *)decrypted_buf; 2693 IN_PRINT(addrstr, &ip->ip_src); 2694 #ifdef INET6 2695 } else if (af == AF_INET6) { 2696 const struct ip6_hdr *ip6 = 2697 (const struct ip6_hdr *)decrypted_buf; 2698 IN6_PRINT(addrstr, &ip6->ip6_src); 2699 #endif 2700 } 2701 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2702 "%s: peer %s: invalid source address (%s)\n", 2703 if_name(&wg->wg_if), wgp->wgp_name, addrstr); 2704 m_freem(n); 2705 /* 2706 * The inner address is invalid however the session is valid 2707 * so continue the session processing below. 2708 */ 2709 } 2710 n = NULL; 2711 2712 /* Update the state machine if necessary. */ 2713 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) { 2714 /* 2715 * We were waiting for the initiator to send their 2716 * first data transport message, and that has happened. 2717 * Schedule a task to establish this session. 2718 */ 2719 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION); 2720 } else { 2721 if (__predict_false(wg_need_to_send_init_message(wgs))) { 2722 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 2723 } 2724 /* 2725 * [W] 6.5 Passive Keepalive 2726 * "If a peer has received a validly-authenticated transport 2727 * data message (section 5.4.6), but does not have any packets 2728 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends 2729 * a keepalive message." 2730 */ 2731 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n", 2732 (uintmax_t)time_uptime, 2733 (uintmax_t)wgs->wgs_time_last_data_sent); 2734 if ((time_uptime - wgs->wgs_time_last_data_sent) >= 2735 wg_keepalive_timeout) { 2736 WG_TRACE("Schedule sending keepalive message"); 2737 /* 2738 * We can't send a keepalive message here to avoid 2739 * a deadlock; we already hold the solock of a socket 2740 * that is used to send the message. 2741 */ 2742 wg_schedule_peer_task(wgp, 2743 WGP_TASK_SEND_KEEPALIVE_MESSAGE); 2744 } 2745 } 2746 out: 2747 wg_put_session(wgs, &psref); 2748 if (m != NULL) 2749 m_freem(m); 2750 if (free_encrypted_buf) 2751 kmem_intr_free(encrypted_buf, encrypted_len); 2752 } 2753 2754 static void __noinline 2755 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc) 2756 { 2757 struct wg_session *wgs; 2758 struct wg_peer *wgp; 2759 struct psref psref; 2760 int error; 2761 uint8_t key[WG_HASH_LEN]; 2762 uint8_t cookie[WG_COOKIE_LEN]; 2763 2764 WG_TRACE("cookie msg received"); 2765 2766 /* Find the putative session. */ 2767 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref); 2768 if (wgs == NULL) { 2769 WG_TRACE("No session found"); 2770 return; 2771 } 2772 2773 /* Lock the peer so we can update the cookie state. */ 2774 wgp = wgs->wgs_peer; 2775 mutex_enter(wgp->wgp_lock); 2776 2777 if (!wgp->wgp_last_sent_mac1_valid) { 2778 WG_TRACE("No valid mac1 sent (or expired)"); 2779 goto out; 2780 } 2781 2782 /* Decrypt the cookie and store it for later handshake retry. */ 2783 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey, 2784 sizeof(wgp->wgp_pubkey)); 2785 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key, 2786 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), 2787 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1), 2788 wgmc->wgmc_salt); 2789 if (error != 0) { 2790 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2791 "%s: peer %s: wg_algo_aead_dec for cookie failed: " 2792 "error=%d\n", if_name(&wg->wg_if), wgp->wgp_name, error); 2793 goto out; 2794 } 2795 /* 2796 * [W] 6.6: Interaction with Cookie Reply System 2797 * "it should simply store the decrypted cookie value from the cookie 2798 * reply message, and wait for the expiration of the REKEY-TIMEOUT 2799 * timer for retrying a handshake initiation message." 2800 */ 2801 wgp->wgp_latest_cookie_time = time_uptime; 2802 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie)); 2803 out: 2804 mutex_exit(wgp->wgp_lock); 2805 wg_put_session(wgs, &psref); 2806 } 2807 2808 static struct mbuf * 2809 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m) 2810 { 2811 struct wg_msg wgm; 2812 size_t mbuflen; 2813 size_t msglen; 2814 2815 /* 2816 * Get the mbuf chain length. It is already guaranteed, by 2817 * wg_overudp_cb, to be large enough for a struct wg_msg. 2818 */ 2819 mbuflen = m_length(m); 2820 KASSERT(mbuflen >= sizeof(struct wg_msg)); 2821 2822 /* 2823 * Copy the message header (32-bit message type) out -- we'll 2824 * worry about contiguity and alignment later. 2825 */ 2826 m_copydata(m, 0, sizeof(wgm), &wgm); 2827 switch (le32toh(wgm.wgm_type)) { 2828 case WG_MSG_TYPE_INIT: 2829 msglen = sizeof(struct wg_msg_init); 2830 break; 2831 case WG_MSG_TYPE_RESP: 2832 msglen = sizeof(struct wg_msg_resp); 2833 break; 2834 case WG_MSG_TYPE_COOKIE: 2835 msglen = sizeof(struct wg_msg_cookie); 2836 break; 2837 case WG_MSG_TYPE_DATA: 2838 msglen = sizeof(struct wg_msg_data); 2839 break; 2840 default: 2841 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 2842 "%s: Unexpected msg type: %u\n", if_name(&wg->wg_if), 2843 le32toh(wgm.wgm_type)); 2844 goto error; 2845 } 2846 2847 /* Verify the mbuf chain is long enough for this type of message. */ 2848 if (__predict_false(mbuflen < msglen)) { 2849 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen, 2850 le32toh(wgm.wgm_type)); 2851 goto error; 2852 } 2853 2854 /* Make the message header contiguous if necessary. */ 2855 if (__predict_false(m->m_len < msglen)) { 2856 m = m_pullup(m, msglen); 2857 if (m == NULL) 2858 return NULL; 2859 } 2860 2861 return m; 2862 2863 error: 2864 m_freem(m); 2865 return NULL; 2866 } 2867 2868 static void 2869 wg_handle_packet(struct wg_softc *wg, struct mbuf *m, 2870 const struct sockaddr *src) 2871 { 2872 struct wg_msg *wgm; 2873 2874 m = wg_validate_msg_header(wg, m); 2875 if (__predict_false(m == NULL)) 2876 return; 2877 2878 KASSERT(m->m_len >= sizeof(struct wg_msg)); 2879 wgm = mtod(m, struct wg_msg *); 2880 switch (le32toh(wgm->wgm_type)) { 2881 case WG_MSG_TYPE_INIT: 2882 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src); 2883 break; 2884 case WG_MSG_TYPE_RESP: 2885 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src); 2886 break; 2887 case WG_MSG_TYPE_COOKIE: 2888 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm); 2889 break; 2890 case WG_MSG_TYPE_DATA: 2891 wg_handle_msg_data(wg, m, src); 2892 /* wg_handle_msg_data frees m for us */ 2893 return; 2894 default: 2895 panic("invalid message type: %d", le32toh(wgm->wgm_type)); 2896 } 2897 2898 m_freem(m); 2899 } 2900 2901 static void 2902 wg_receive_packets(struct wg_softc *wg, const int af) 2903 { 2904 2905 for (;;) { 2906 int error, flags; 2907 struct socket *so; 2908 struct mbuf *m = NULL; 2909 struct uio dummy_uio; 2910 struct mbuf *paddr = NULL; 2911 struct sockaddr *src; 2912 2913 so = wg_get_so_by_af(wg, af); 2914 flags = MSG_DONTWAIT; 2915 dummy_uio.uio_resid = 1000000000; 2916 2917 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL, 2918 &flags); 2919 if (error || m == NULL) { 2920 //if (error == EWOULDBLOCK) 2921 return; 2922 } 2923 2924 KASSERT(paddr != NULL); 2925 KASSERT(paddr->m_len >= sizeof(struct sockaddr)); 2926 src = mtod(paddr, struct sockaddr *); 2927 2928 wg_handle_packet(wg, m, src); 2929 } 2930 } 2931 2932 static void 2933 wg_get_peer(struct wg_peer *wgp, struct psref *psref) 2934 { 2935 2936 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class); 2937 } 2938 2939 static void 2940 wg_put_peer(struct wg_peer *wgp, struct psref *psref) 2941 { 2942 2943 psref_release(psref, &wgp->wgp_psref, wg_psref_class); 2944 } 2945 2946 static void 2947 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp) 2948 { 2949 struct wg_session *wgs; 2950 2951 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE"); 2952 2953 KASSERT(mutex_owned(wgp->wgp_lock)); 2954 2955 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) { 2956 WGLOG(LOG_DEBUG, "%s: No endpoint available\n", 2957 if_name(&wg->wg_if)); 2958 /* XXX should do something? */ 2959 return; 2960 } 2961 2962 wgs = wgp->wgp_session_stable; 2963 if (wgs->wgs_state == WGS_STATE_UNKNOWN) { 2964 /* XXX What if the unstable session is already INIT_ACTIVE? */ 2965 wg_send_handshake_msg_init(wg, wgp); 2966 } else { 2967 /* rekey */ 2968 wgs = wgp->wgp_session_unstable; 2969 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2970 wg_send_handshake_msg_init(wg, wgp); 2971 } 2972 } 2973 2974 static void 2975 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp) 2976 { 2977 struct wg_session *wgs; 2978 2979 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE"); 2980 2981 KASSERT(mutex_owned(wgp->wgp_lock)); 2982 KASSERT(wgp->wgp_handshake_start_time != 0); 2983 2984 wgs = wgp->wgp_session_unstable; 2985 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2986 return; 2987 2988 /* 2989 * XXX no real need to assign a new index here, but we do need 2990 * to transition to UNKNOWN temporarily 2991 */ 2992 wg_put_session_index(wg, wgs); 2993 2994 /* [W] 6.4 Handshake Initiation Retransmission */ 2995 if ((time_uptime - wgp->wgp_handshake_start_time) > 2996 wg_rekey_attempt_time) { 2997 /* Give up handshaking */ 2998 wgp->wgp_handshake_start_time = 0; 2999 WG_TRACE("give up"); 3000 3001 /* 3002 * If a new data packet comes, handshaking will be retried 3003 * and a new session would be established at that time, 3004 * however we don't want to send pending packets then. 3005 */ 3006 wg_purge_pending_packets(wgp); 3007 return; 3008 } 3009 3010 wg_task_send_init_message(wg, wgp); 3011 } 3012 3013 static void 3014 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp) 3015 { 3016 struct wg_session *wgs, *wgs_prev; 3017 struct mbuf *m; 3018 3019 KASSERT(mutex_owned(wgp->wgp_lock)); 3020 3021 wgs = wgp->wgp_session_unstable; 3022 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE) 3023 /* XXX Can this happen? */ 3024 return; 3025 3026 wgs->wgs_state = WGS_STATE_ESTABLISHED; 3027 wgs->wgs_time_established = time_uptime; 3028 wgs->wgs_time_last_data_sent = 0; 3029 wgs->wgs_is_initiator = false; 3030 WG_TRACE("WGS_STATE_ESTABLISHED"); 3031 3032 wg_swap_sessions(wgp); 3033 KASSERT(wgs == wgp->wgp_session_stable); 3034 wgs_prev = wgp->wgp_session_unstable; 3035 getnanotime(&wgp->wgp_last_handshake_time); 3036 wgp->wgp_handshake_start_time = 0; 3037 wgp->wgp_last_sent_mac1_valid = false; 3038 wgp->wgp_last_sent_cookie_valid = false; 3039 3040 /* If we had a data packet queued up, send it. */ 3041 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 3042 kpreempt_disable(); 3043 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 3044 M_SETCTX(m, wgp); 3045 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 3046 WGLOG(LOG_ERR, "%s: pktq full, dropping\n", 3047 if_name(&wg->wg_if)); 3048 m_freem(m); 3049 } 3050 kpreempt_enable(); 3051 } 3052 3053 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 3054 /* Wait for wg_get_stable_session to drain. */ 3055 pserialize_perform(wgp->wgp_psz); 3056 3057 /* Transition ESTABLISHED->DESTROYING. */ 3058 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 3059 3060 /* We can't destroy the old session immediately */ 3061 wg_schedule_session_dtor_timer(wgp); 3062 } else { 3063 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 3064 "state=%d", wgs_prev->wgs_state); 3065 wg_clear_states(wgs_prev); 3066 wgs_prev->wgs_state = WGS_STATE_UNKNOWN; 3067 } 3068 } 3069 3070 static void 3071 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp) 3072 { 3073 3074 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED"); 3075 3076 KASSERT(mutex_owned(wgp->wgp_lock)); 3077 3078 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) { 3079 pserialize_perform(wgp->wgp_psz); 3080 mutex_exit(wgp->wgp_lock); 3081 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, 3082 wg_psref_class); 3083 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, 3084 wg_psref_class); 3085 mutex_enter(wgp->wgp_lock); 3086 atomic_store_release(&wgp->wgp_endpoint_changing, 0); 3087 } 3088 } 3089 3090 static void 3091 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp) 3092 { 3093 struct wg_session *wgs; 3094 3095 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE"); 3096 3097 KASSERT(mutex_owned(wgp->wgp_lock)); 3098 3099 wgs = wgp->wgp_session_stable; 3100 if (wgs->wgs_state != WGS_STATE_ESTABLISHED) 3101 return; 3102 3103 wg_send_keepalive_msg(wgp, wgs); 3104 } 3105 3106 static void 3107 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp) 3108 { 3109 struct wg_session *wgs; 3110 3111 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION"); 3112 3113 KASSERT(mutex_owned(wgp->wgp_lock)); 3114 3115 wgs = wgp->wgp_session_unstable; 3116 if (wgs->wgs_state == WGS_STATE_DESTROYING) { 3117 wg_put_session_index(wg, wgs); 3118 } 3119 } 3120 3121 static void 3122 wg_peer_work(struct work *wk, void *cookie) 3123 { 3124 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work); 3125 struct wg_softc *wg = wgp->wgp_sc; 3126 unsigned int tasks; 3127 3128 mutex_enter(wgp->wgp_intr_lock); 3129 while ((tasks = wgp->wgp_tasks) != 0) { 3130 wgp->wgp_tasks = 0; 3131 mutex_exit(wgp->wgp_intr_lock); 3132 3133 mutex_enter(wgp->wgp_lock); 3134 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE)) 3135 wg_task_send_init_message(wg, wgp); 3136 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE)) 3137 wg_task_retry_handshake(wg, wgp); 3138 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION)) 3139 wg_task_establish_session(wg, wgp); 3140 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED)) 3141 wg_task_endpoint_changed(wg, wgp); 3142 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE)) 3143 wg_task_send_keepalive_message(wg, wgp); 3144 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION)) 3145 wg_task_destroy_prev_session(wg, wgp); 3146 mutex_exit(wgp->wgp_lock); 3147 3148 mutex_enter(wgp->wgp_intr_lock); 3149 } 3150 mutex_exit(wgp->wgp_intr_lock); 3151 } 3152 3153 static void 3154 wg_job(struct threadpool_job *job) 3155 { 3156 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job); 3157 int bound, upcalls; 3158 3159 mutex_enter(wg->wg_intr_lock); 3160 while ((upcalls = wg->wg_upcalls) != 0) { 3161 wg->wg_upcalls = 0; 3162 mutex_exit(wg->wg_intr_lock); 3163 bound = curlwp_bind(); 3164 if (ISSET(upcalls, WG_UPCALL_INET)) 3165 wg_receive_packets(wg, AF_INET); 3166 if (ISSET(upcalls, WG_UPCALL_INET6)) 3167 wg_receive_packets(wg, AF_INET6); 3168 curlwp_bindx(bound); 3169 mutex_enter(wg->wg_intr_lock); 3170 } 3171 threadpool_job_done(job); 3172 mutex_exit(wg->wg_intr_lock); 3173 } 3174 3175 static int 3176 wg_bind_port(struct wg_softc *wg, const uint16_t port) 3177 { 3178 int error; 3179 uint16_t old_port = wg->wg_listen_port; 3180 3181 if (port != 0 && old_port == port) 3182 return 0; 3183 3184 struct sockaddr_in _sin, *sin = &_sin; 3185 sin->sin_len = sizeof(*sin); 3186 sin->sin_family = AF_INET; 3187 sin->sin_addr.s_addr = INADDR_ANY; 3188 sin->sin_port = htons(port); 3189 3190 error = sobind(wg->wg_so4, sintosa(sin), curlwp); 3191 if (error != 0) 3192 return error; 3193 3194 #ifdef INET6 3195 struct sockaddr_in6 _sin6, *sin6 = &_sin6; 3196 sin6->sin6_len = sizeof(*sin6); 3197 sin6->sin6_family = AF_INET6; 3198 sin6->sin6_addr = in6addr_any; 3199 sin6->sin6_port = htons(port); 3200 3201 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp); 3202 if (error != 0) 3203 return error; 3204 #endif 3205 3206 wg->wg_listen_port = port; 3207 3208 return 0; 3209 } 3210 3211 static void 3212 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag) 3213 { 3214 struct wg_softc *wg = cookie; 3215 int reason; 3216 3217 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ? 3218 WG_UPCALL_INET : 3219 WG_UPCALL_INET6; 3220 3221 mutex_enter(wg->wg_intr_lock); 3222 wg->wg_upcalls |= reason; 3223 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job); 3224 mutex_exit(wg->wg_intr_lock); 3225 } 3226 3227 static int 3228 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so, 3229 struct sockaddr *src, void *arg) 3230 { 3231 struct wg_softc *wg = arg; 3232 struct wg_msg wgm; 3233 struct mbuf *m = *mp; 3234 3235 WG_TRACE("enter"); 3236 3237 /* Verify the mbuf chain is long enough to have a wg msg header. */ 3238 KASSERT(offset <= m_length(m)); 3239 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) { 3240 /* drop on the floor */ 3241 m_freem(m); 3242 return -1; 3243 } 3244 3245 /* 3246 * Copy the message header (32-bit message type) out -- we'll 3247 * worry about contiguity and alignment later. 3248 */ 3249 m_copydata(m, offset, sizeof(struct wg_msg), &wgm); 3250 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type)); 3251 3252 /* 3253 * Handle DATA packets promptly as they arrive. Other packets 3254 * may require expensive public-key crypto and are not as 3255 * sensitive to latency, so defer them to the worker thread. 3256 */ 3257 switch (le32toh(wgm.wgm_type)) { 3258 case WG_MSG_TYPE_DATA: 3259 /* handle immediately */ 3260 m_adj(m, offset); 3261 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) { 3262 m = m_pullup(m, sizeof(struct wg_msg_data)); 3263 if (m == NULL) 3264 return -1; 3265 } 3266 wg_handle_msg_data(wg, m, src); 3267 *mp = NULL; 3268 return 1; 3269 case WG_MSG_TYPE_INIT: 3270 case WG_MSG_TYPE_RESP: 3271 case WG_MSG_TYPE_COOKIE: 3272 /* pass through to so_receive in wg_receive_packets */ 3273 return 0; 3274 default: 3275 /* drop on the floor */ 3276 m_freem(m); 3277 return -1; 3278 } 3279 } 3280 3281 static int 3282 wg_socreate(struct wg_softc *wg, int af, struct socket **sop) 3283 { 3284 int error; 3285 struct socket *so; 3286 3287 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL); 3288 if (error != 0) 3289 return error; 3290 3291 solock(so); 3292 so->so_upcallarg = wg; 3293 so->so_upcall = wg_so_upcall; 3294 so->so_rcv.sb_flags |= SB_UPCALL; 3295 inpcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg); 3296 sounlock(so); 3297 3298 *sop = so; 3299 3300 return 0; 3301 } 3302 3303 static bool 3304 wg_session_hit_limits(struct wg_session *wgs) 3305 { 3306 3307 /* 3308 * [W] 6.2: Transport Message Limits 3309 * "After REJECT-AFTER-MESSAGES transport data messages or after the 3310 * current secure session is REJECT-AFTER-TIME seconds old, whichever 3311 * comes first, WireGuard will refuse to send any more transport data 3312 * messages using the current secure session, ..." 3313 */ 3314 KASSERT(wgs->wgs_time_established != 0); 3315 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) { 3316 WG_DLOG("The session hits REJECT_AFTER_TIME\n"); 3317 return true; 3318 } else if (wg_session_get_send_counter(wgs) > 3319 wg_reject_after_messages) { 3320 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n"); 3321 return true; 3322 } 3323 3324 return false; 3325 } 3326 3327 static void 3328 wgintr(void *cookie) 3329 { 3330 struct wg_peer *wgp; 3331 struct wg_session *wgs; 3332 struct mbuf *m; 3333 struct psref psref; 3334 3335 while ((m = pktq_dequeue(wg_pktq)) != NULL) { 3336 wgp = M_GETCTX(m, struct wg_peer *); 3337 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) { 3338 WG_TRACE("no stable session"); 3339 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3340 goto next0; 3341 } 3342 if (__predict_false(wg_session_hit_limits(wgs))) { 3343 WG_TRACE("stable session hit limits"); 3344 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3345 goto next1; 3346 } 3347 wg_send_data_msg(wgp, wgs, m); 3348 m = NULL; /* consumed */ 3349 next1: wg_put_session(wgs, &psref); 3350 next0: if (m) 3351 m_freem(m); 3352 /* XXX Yield to avoid userland starvation? */ 3353 } 3354 } 3355 3356 static void 3357 wg_rekey_timer(void *arg) 3358 { 3359 struct wg_peer *wgp = arg; 3360 3361 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3362 } 3363 3364 static void 3365 wg_purge_pending_packets(struct wg_peer *wgp) 3366 { 3367 struct mbuf *m; 3368 3369 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 3370 m_freem(m); 3371 pktq_barrier(wg_pktq); 3372 } 3373 3374 static void 3375 wg_handshake_timeout_timer(void *arg) 3376 { 3377 struct wg_peer *wgp = arg; 3378 3379 WG_TRACE("enter"); 3380 3381 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE); 3382 } 3383 3384 static struct wg_peer * 3385 wg_alloc_peer(struct wg_softc *wg) 3386 { 3387 struct wg_peer *wgp; 3388 3389 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP); 3390 3391 wgp->wgp_sc = wg; 3392 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE); 3393 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp); 3394 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE); 3395 callout_setfunc(&wgp->wgp_handshake_timeout_timer, 3396 wg_handshake_timeout_timer, wgp); 3397 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE); 3398 callout_setfunc(&wgp->wgp_session_dtor_timer, 3399 wg_session_dtor_timer, wgp); 3400 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry); 3401 wgp->wgp_endpoint_changing = false; 3402 wgp->wgp_endpoint_available = false; 3403 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3404 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3405 wgp->wgp_psz = pserialize_create(); 3406 psref_target_init(&wgp->wgp_psref, wg_psref_class); 3407 3408 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP); 3409 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP); 3410 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3411 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3412 3413 struct wg_session *wgs; 3414 wgp->wgp_session_stable = 3415 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP); 3416 wgp->wgp_session_unstable = 3417 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP); 3418 wgs = wgp->wgp_session_stable; 3419 wgs->wgs_peer = wgp; 3420 wgs->wgs_state = WGS_STATE_UNKNOWN; 3421 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3422 #ifndef __HAVE_ATOMIC64_LOADSTORE 3423 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3424 #endif 3425 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3426 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3427 3428 wgs = wgp->wgp_session_unstable; 3429 wgs->wgs_peer = wgp; 3430 wgs->wgs_state = WGS_STATE_UNKNOWN; 3431 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3432 #ifndef __HAVE_ATOMIC64_LOADSTORE 3433 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3434 #endif 3435 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3436 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3437 3438 return wgp; 3439 } 3440 3441 static void 3442 wg_destroy_peer(struct wg_peer *wgp) 3443 { 3444 struct wg_session *wgs; 3445 struct wg_softc *wg = wgp->wgp_sc; 3446 3447 /* Prevent new packets from this peer on any source address. */ 3448 rw_enter(wg->wg_rwlock, RW_WRITER); 3449 for (int i = 0; i < wgp->wgp_n_allowedips; i++) { 3450 struct wg_allowedip *wga = &wgp->wgp_allowedips[i]; 3451 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family); 3452 struct radix_node *rn; 3453 3454 KASSERT(rnh != NULL); 3455 rn = rnh->rnh_deladdr(&wga->wga_sa_addr, 3456 &wga->wga_sa_mask, rnh); 3457 if (rn == NULL) { 3458 char addrstr[128]; 3459 sockaddr_format(&wga->wga_sa_addr, addrstr, 3460 sizeof(addrstr)); 3461 WGLOG(LOG_WARNING, "%s: Couldn't delete %s", 3462 if_name(&wg->wg_if), addrstr); 3463 } 3464 } 3465 rw_exit(wg->wg_rwlock); 3466 3467 /* Purge pending packets. */ 3468 wg_purge_pending_packets(wgp); 3469 3470 /* Halt all packet processing and timeouts. */ 3471 callout_halt(&wgp->wgp_rekey_timer, NULL); 3472 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL); 3473 callout_halt(&wgp->wgp_session_dtor_timer, NULL); 3474 3475 /* Wait for any queued work to complete. */ 3476 workqueue_wait(wg_wq, &wgp->wgp_work); 3477 3478 wgs = wgp->wgp_session_unstable; 3479 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3480 mutex_enter(wgp->wgp_lock); 3481 wg_destroy_session(wg, wgs); 3482 mutex_exit(wgp->wgp_lock); 3483 } 3484 mutex_destroy(&wgs->wgs_recvwin->lock); 3485 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3486 #ifndef __HAVE_ATOMIC64_LOADSTORE 3487 mutex_destroy(&wgs->wgs_send_counter_lock); 3488 #endif 3489 kmem_free(wgs, sizeof(*wgs)); 3490 3491 wgs = wgp->wgp_session_stable; 3492 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3493 mutex_enter(wgp->wgp_lock); 3494 wg_destroy_session(wg, wgs); 3495 mutex_exit(wgp->wgp_lock); 3496 } 3497 mutex_destroy(&wgs->wgs_recvwin->lock); 3498 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3499 #ifndef __HAVE_ATOMIC64_LOADSTORE 3500 mutex_destroy(&wgs->wgs_send_counter_lock); 3501 #endif 3502 kmem_free(wgs, sizeof(*wgs)); 3503 3504 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3505 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3506 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint)); 3507 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0)); 3508 3509 pserialize_destroy(wgp->wgp_psz); 3510 mutex_obj_free(wgp->wgp_intr_lock); 3511 mutex_obj_free(wgp->wgp_lock); 3512 3513 kmem_free(wgp, sizeof(*wgp)); 3514 } 3515 3516 static void 3517 wg_destroy_all_peers(struct wg_softc *wg) 3518 { 3519 struct wg_peer *wgp, *wgp0 __diagused; 3520 void *garbage_byname, *garbage_bypubkey; 3521 3522 restart: 3523 garbage_byname = garbage_bypubkey = NULL; 3524 mutex_enter(wg->wg_lock); 3525 WG_PEER_WRITER_FOREACH(wgp, wg) { 3526 if (wgp->wgp_name[0]) { 3527 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name, 3528 strlen(wgp->wgp_name)); 3529 KASSERT(wgp0 == wgp); 3530 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3531 } 3532 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3533 sizeof(wgp->wgp_pubkey)); 3534 KASSERT(wgp0 == wgp); 3535 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3536 WG_PEER_WRITER_REMOVE(wgp); 3537 wg->wg_npeers--; 3538 mutex_enter(wgp->wgp_lock); 3539 pserialize_perform(wgp->wgp_psz); 3540 mutex_exit(wgp->wgp_lock); 3541 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3542 break; 3543 } 3544 mutex_exit(wg->wg_lock); 3545 3546 if (wgp == NULL) 3547 return; 3548 3549 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3550 3551 wg_destroy_peer(wgp); 3552 thmap_gc(wg->wg_peers_byname, garbage_byname); 3553 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3554 3555 goto restart; 3556 } 3557 3558 static int 3559 wg_destroy_peer_name(struct wg_softc *wg, const char *name) 3560 { 3561 struct wg_peer *wgp, *wgp0 __diagused; 3562 void *garbage_byname, *garbage_bypubkey; 3563 3564 mutex_enter(wg->wg_lock); 3565 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name)); 3566 if (wgp != NULL) { 3567 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3568 sizeof(wgp->wgp_pubkey)); 3569 KASSERT(wgp0 == wgp); 3570 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3571 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3572 WG_PEER_WRITER_REMOVE(wgp); 3573 wg->wg_npeers--; 3574 if (wg->wg_npeers == 0) 3575 if_link_state_change(&wg->wg_if, LINK_STATE_DOWN); 3576 mutex_enter(wgp->wgp_lock); 3577 pserialize_perform(wgp->wgp_psz); 3578 mutex_exit(wgp->wgp_lock); 3579 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3580 } 3581 mutex_exit(wg->wg_lock); 3582 3583 if (wgp == NULL) 3584 return ENOENT; 3585 3586 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3587 3588 wg_destroy_peer(wgp); 3589 thmap_gc(wg->wg_peers_byname, garbage_byname); 3590 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3591 3592 return 0; 3593 } 3594 3595 static int 3596 wg_if_attach(struct wg_softc *wg) 3597 { 3598 3599 wg->wg_if.if_addrlen = 0; 3600 wg->wg_if.if_mtu = WG_MTU; 3601 wg->wg_if.if_flags = IFF_MULTICAST; 3602 wg->wg_if.if_extflags = IFEF_MPSAFE; 3603 wg->wg_if.if_ioctl = wg_ioctl; 3604 wg->wg_if.if_output = wg_output; 3605 wg->wg_if.if_init = wg_init; 3606 #ifdef ALTQ 3607 wg->wg_if.if_start = wg_start; 3608 #endif 3609 wg->wg_if.if_stop = wg_stop; 3610 wg->wg_if.if_type = IFT_OTHER; 3611 wg->wg_if.if_dlt = DLT_NULL; 3612 wg->wg_if.if_softc = wg; 3613 #ifdef ALTQ 3614 IFQ_SET_READY(&wg->wg_if.if_snd); 3615 #endif 3616 if_initialize(&wg->wg_if); 3617 3618 wg->wg_if.if_link_state = LINK_STATE_DOWN; 3619 if_alloc_sadl(&wg->wg_if); 3620 if_register(&wg->wg_if); 3621 3622 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t)); 3623 3624 return 0; 3625 } 3626 3627 static void 3628 wg_if_detach(struct wg_softc *wg) 3629 { 3630 struct ifnet *ifp = &wg->wg_if; 3631 3632 bpf_detach(ifp); 3633 if_detach(ifp); 3634 } 3635 3636 static int 3637 wg_clone_create(struct if_clone *ifc, int unit) 3638 { 3639 struct wg_softc *wg; 3640 int error; 3641 3642 wg_guarantee_initialized(); 3643 3644 error = wg_count_inc(); 3645 if (error) 3646 return error; 3647 3648 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP); 3649 3650 if_initname(&wg->wg_if, ifc->ifc_name, unit); 3651 3652 PSLIST_INIT(&wg->wg_peers); 3653 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY); 3654 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY); 3655 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY); 3656 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3657 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3658 wg->wg_rwlock = rw_obj_alloc(); 3659 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock, 3660 "%s", if_name(&wg->wg_if)); 3661 wg->wg_ops = &wg_ops_rumpkernel; 3662 3663 error = threadpool_get(&wg->wg_threadpool, PRI_NONE); 3664 if (error) 3665 goto fail0; 3666 3667 #ifdef INET 3668 error = wg_socreate(wg, AF_INET, &wg->wg_so4); 3669 if (error) 3670 goto fail1; 3671 rn_inithead((void **)&wg->wg_rtable_ipv4, 3672 offsetof(struct sockaddr_in, sin_addr) * NBBY); 3673 #endif 3674 #ifdef INET6 3675 error = wg_socreate(wg, AF_INET6, &wg->wg_so6); 3676 if (error) 3677 goto fail2; 3678 rn_inithead((void **)&wg->wg_rtable_ipv6, 3679 offsetof(struct sockaddr_in6, sin6_addr) * NBBY); 3680 #endif 3681 3682 error = wg_if_attach(wg); 3683 if (error) 3684 goto fail3; 3685 3686 return 0; 3687 3688 fail4: __unused 3689 wg_if_detach(wg); 3690 fail3: wg_destroy_all_peers(wg); 3691 #ifdef INET6 3692 solock(wg->wg_so6); 3693 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3694 sounlock(wg->wg_so6); 3695 #endif 3696 #ifdef INET 3697 solock(wg->wg_so4); 3698 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3699 sounlock(wg->wg_so4); 3700 #endif 3701 mutex_enter(wg->wg_intr_lock); 3702 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3703 mutex_exit(wg->wg_intr_lock); 3704 #ifdef INET6 3705 if (wg->wg_rtable_ipv6 != NULL) 3706 free(wg->wg_rtable_ipv6, M_RTABLE); 3707 soclose(wg->wg_so6); 3708 fail2: 3709 #endif 3710 #ifdef INET 3711 if (wg->wg_rtable_ipv4 != NULL) 3712 free(wg->wg_rtable_ipv4, M_RTABLE); 3713 soclose(wg->wg_so4); 3714 fail1: 3715 #endif 3716 threadpool_put(wg->wg_threadpool, PRI_NONE); 3717 fail0: threadpool_job_destroy(&wg->wg_job); 3718 rw_obj_free(wg->wg_rwlock); 3719 mutex_obj_free(wg->wg_intr_lock); 3720 mutex_obj_free(wg->wg_lock); 3721 thmap_destroy(wg->wg_sessions_byindex); 3722 thmap_destroy(wg->wg_peers_byname); 3723 thmap_destroy(wg->wg_peers_bypubkey); 3724 PSLIST_DESTROY(&wg->wg_peers); 3725 kmem_free(wg, sizeof(*wg)); 3726 wg_count_dec(); 3727 return error; 3728 } 3729 3730 static int 3731 wg_clone_destroy(struct ifnet *ifp) 3732 { 3733 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if); 3734 3735 #ifdef WG_RUMPKERNEL 3736 if (wg_user_mode(wg)) { 3737 rumpuser_wg_destroy(wg->wg_user); 3738 wg->wg_user = NULL; 3739 } 3740 #endif 3741 3742 wg_if_detach(wg); 3743 wg_destroy_all_peers(wg); 3744 #ifdef INET6 3745 solock(wg->wg_so6); 3746 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3747 sounlock(wg->wg_so6); 3748 #endif 3749 #ifdef INET 3750 solock(wg->wg_so4); 3751 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3752 sounlock(wg->wg_so4); 3753 #endif 3754 mutex_enter(wg->wg_intr_lock); 3755 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3756 mutex_exit(wg->wg_intr_lock); 3757 #ifdef INET6 3758 if (wg->wg_rtable_ipv6 != NULL) 3759 free(wg->wg_rtable_ipv6, M_RTABLE); 3760 soclose(wg->wg_so6); 3761 #endif 3762 #ifdef INET 3763 if (wg->wg_rtable_ipv4 != NULL) 3764 free(wg->wg_rtable_ipv4, M_RTABLE); 3765 soclose(wg->wg_so4); 3766 #endif 3767 threadpool_put(wg->wg_threadpool, PRI_NONE); 3768 threadpool_job_destroy(&wg->wg_job); 3769 rw_obj_free(wg->wg_rwlock); 3770 mutex_obj_free(wg->wg_intr_lock); 3771 mutex_obj_free(wg->wg_lock); 3772 thmap_destroy(wg->wg_sessions_byindex); 3773 thmap_destroy(wg->wg_peers_byname); 3774 thmap_destroy(wg->wg_peers_bypubkey); 3775 PSLIST_DESTROY(&wg->wg_peers); 3776 kmem_free(wg, sizeof(*wg)); 3777 wg_count_dec(); 3778 3779 return 0; 3780 } 3781 3782 static struct wg_peer * 3783 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa, 3784 struct psref *psref) 3785 { 3786 struct radix_node_head *rnh; 3787 struct radix_node *rn; 3788 struct wg_peer *wgp = NULL; 3789 struct wg_allowedip *wga; 3790 3791 #ifdef WG_DEBUG_LOG 3792 char addrstr[128]; 3793 sockaddr_format(sa, addrstr, sizeof(addrstr)); 3794 WG_DLOG("sa=%s\n", addrstr); 3795 #endif 3796 3797 rw_enter(wg->wg_rwlock, RW_READER); 3798 3799 rnh = wg_rnh(wg, sa->sa_family); 3800 if (rnh == NULL) 3801 goto out; 3802 3803 rn = rnh->rnh_matchaddr(sa, rnh); 3804 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0) 3805 goto out; 3806 3807 WG_TRACE("success"); 3808 3809 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]); 3810 wgp = wga->wga_peer; 3811 wg_get_peer(wgp, psref); 3812 3813 out: 3814 rw_exit(wg->wg_rwlock); 3815 return wgp; 3816 } 3817 3818 static void 3819 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp, 3820 struct wg_session *wgs, struct wg_msg_data *wgmd) 3821 { 3822 3823 memset(wgmd, 0, sizeof(*wgmd)); 3824 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA); 3825 wgmd->wgmd_receiver = wgs->wgs_remote_index; 3826 /* [W] 5.4.6: msg.counter := Nm^send */ 3827 /* [W] 5.4.6: Nm^send := Nm^send + 1 */ 3828 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs)); 3829 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter)); 3830 } 3831 3832 static int 3833 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 3834 const struct rtentry *rt) 3835 { 3836 struct wg_softc *wg = ifp->if_softc; 3837 struct wg_peer *wgp = NULL; 3838 struct wg_session *wgs = NULL; 3839 struct psref wgp_psref, wgs_psref; 3840 int bound; 3841 int error; 3842 3843 bound = curlwp_bind(); 3844 3845 /* TODO make the nest limit configurable via sysctl */ 3846 error = if_tunnel_check_nesting(ifp, m, 1); 3847 if (error) { 3848 WGLOG(LOG_ERR, 3849 "%s: tunneling loop detected and packet dropped\n", 3850 if_name(&wg->wg_if)); 3851 goto out0; 3852 } 3853 3854 #ifdef ALTQ 3855 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags) 3856 & ALTQF_ENABLED; 3857 if (altq) 3858 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family); 3859 #endif 3860 3861 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT); 3862 3863 m->m_flags &= ~(M_BCAST|M_MCAST); 3864 3865 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref); 3866 if (wgp == NULL) { 3867 WG_TRACE("peer not found"); 3868 error = EHOSTUNREACH; 3869 goto out0; 3870 } 3871 3872 /* Clear checksum-offload flags. */ 3873 m->m_pkthdr.csum_flags = 0; 3874 m->m_pkthdr.csum_data = 0; 3875 3876 /* Check whether there's an established session. */ 3877 wgs = wg_get_stable_session(wgp, &wgs_psref); 3878 if (wgs == NULL) { 3879 /* 3880 * No established session. If we're the first to try 3881 * sending data, schedule a handshake and queue the 3882 * packet for when the handshake is done; otherwise 3883 * just drop the packet and let the ongoing handshake 3884 * attempt continue. We could queue more data packets 3885 * but it's not clear that's worthwhile. 3886 */ 3887 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) { 3888 m = NULL; /* consume */ 3889 WG_TRACE("queued first packet; init handshake"); 3890 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3891 } else { 3892 WG_TRACE("first packet already queued, dropping"); 3893 } 3894 goto out1; 3895 } 3896 3897 /* There's an established session. Toss it in the queue. */ 3898 #ifdef ALTQ 3899 if (altq) { 3900 mutex_enter(ifp->if_snd.ifq_lock); 3901 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 3902 M_SETCTX(m, wgp); 3903 ALTQ_ENQUEUE(&ifp->if_snd, m, error); 3904 m = NULL; /* consume */ 3905 } 3906 mutex_exit(ifp->if_snd.ifq_lock); 3907 if (m == NULL) { 3908 wg_start(ifp); 3909 goto out2; 3910 } 3911 } 3912 #endif 3913 kpreempt_disable(); 3914 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 3915 M_SETCTX(m, wgp); 3916 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 3917 WGLOG(LOG_ERR, "%s: pktq full, dropping\n", 3918 if_name(&wg->wg_if)); 3919 error = ENOBUFS; 3920 goto out3; 3921 } 3922 m = NULL; /* consumed */ 3923 error = 0; 3924 out3: kpreempt_enable(); 3925 3926 #ifdef ALTQ 3927 out2: 3928 #endif 3929 wg_put_session(wgs, &wgs_psref); 3930 out1: wg_put_peer(wgp, &wgp_psref); 3931 out0: if (m) 3932 m_freem(m); 3933 curlwp_bindx(bound); 3934 return error; 3935 } 3936 3937 static int 3938 wg_send_udp(struct wg_peer *wgp, struct mbuf *m) 3939 { 3940 struct psref psref; 3941 struct wg_sockaddr *wgsa; 3942 int error; 3943 struct socket *so; 3944 3945 wgsa = wg_get_endpoint_sa(wgp, &psref); 3946 so = wg_get_so_by_peer(wgp, wgsa); 3947 solock(so); 3948 if (wgsatosa(wgsa)->sa_family == AF_INET) { 3949 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp); 3950 } else { 3951 #ifdef INET6 3952 error = udp6_output(sotoinpcb(so), m, wgsatosin6(wgsa), 3953 NULL, curlwp); 3954 #else 3955 m_freem(m); 3956 error = EPFNOSUPPORT; 3957 #endif 3958 } 3959 sounlock(so); 3960 wg_put_sa(wgp, wgsa, &psref); 3961 3962 return error; 3963 } 3964 3965 /* Inspired by pppoe_get_mbuf */ 3966 static struct mbuf * 3967 wg_get_mbuf(size_t leading_len, size_t len) 3968 { 3969 struct mbuf *m; 3970 3971 KASSERT(leading_len <= MCLBYTES); 3972 KASSERT(len <= MCLBYTES - leading_len); 3973 3974 m = m_gethdr(M_DONTWAIT, MT_DATA); 3975 if (m == NULL) 3976 return NULL; 3977 if (len + leading_len > MHLEN) { 3978 m_clget(m, M_DONTWAIT); 3979 if ((m->m_flags & M_EXT) == 0) { 3980 m_free(m); 3981 return NULL; 3982 } 3983 } 3984 m->m_data += leading_len; 3985 m->m_pkthdr.len = m->m_len = len; 3986 3987 return m; 3988 } 3989 3990 static int 3991 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs, 3992 struct mbuf *m) 3993 { 3994 struct wg_softc *wg = wgp->wgp_sc; 3995 int error; 3996 size_t inner_len, padded_len, encrypted_len; 3997 char *padded_buf = NULL; 3998 size_t mlen; 3999 struct wg_msg_data *wgmd; 4000 bool free_padded_buf = false; 4001 struct mbuf *n; 4002 size_t leading_len = max_hdr + sizeof(struct udphdr); 4003 4004 mlen = m_length(m); 4005 inner_len = mlen; 4006 padded_len = roundup(mlen, 16); 4007 encrypted_len = padded_len + WG_AUTHTAG_LEN; 4008 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n", 4009 inner_len, padded_len, encrypted_len); 4010 if (mlen != 0) { 4011 bool success; 4012 success = m_ensure_contig(&m, padded_len); 4013 if (success) { 4014 padded_buf = mtod(m, char *); 4015 } else { 4016 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP); 4017 if (padded_buf == NULL) { 4018 error = ENOBUFS; 4019 goto end; 4020 } 4021 free_padded_buf = true; 4022 m_copydata(m, 0, mlen, padded_buf); 4023 } 4024 memset(padded_buf + mlen, 0, padded_len - inner_len); 4025 } 4026 4027 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len); 4028 if (n == NULL) { 4029 error = ENOBUFS; 4030 goto end; 4031 } 4032 KASSERT(n->m_len >= sizeof(*wgmd)); 4033 wgmd = mtod(n, struct wg_msg_data *); 4034 wg_fill_msg_data(wg, wgp, wgs, wgmd); 4035 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */ 4036 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len, 4037 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter), 4038 padded_buf, padded_len, 4039 NULL, 0); 4040 4041 error = wg->wg_ops->send_data_msg(wgp, n); 4042 if (error == 0) { 4043 struct ifnet *ifp = &wg->wg_if; 4044 if_statadd(ifp, if_obytes, mlen); 4045 if_statinc(ifp, if_opackets); 4046 if (wgs->wgs_is_initiator && 4047 wgs->wgs_time_last_data_sent == 0) { 4048 /* 4049 * [W] 6.2 Transport Message Limits 4050 * "if a peer is the initiator of a current secure 4051 * session, WireGuard will send a handshake initiation 4052 * message to begin a new secure session if, after 4053 * transmitting a transport data message, the current 4054 * secure session is REKEY-AFTER-TIME seconds old," 4055 */ 4056 wg_schedule_rekey_timer(wgp); 4057 } 4058 wgs->wgs_time_last_data_sent = time_uptime; 4059 if (wg_session_get_send_counter(wgs) >= 4060 wg_rekey_after_messages) { 4061 /* 4062 * [W] 6.2 Transport Message Limits 4063 * "WireGuard will try to create a new session, by 4064 * sending a handshake initiation message (section 4065 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES 4066 * transport data messages..." 4067 */ 4068 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 4069 } 4070 } 4071 end: 4072 m_freem(m); 4073 if (free_padded_buf) 4074 kmem_intr_free(padded_buf, padded_len); 4075 return error; 4076 } 4077 4078 static void 4079 wg_input(struct ifnet *ifp, struct mbuf *m, const int af) 4080 { 4081 pktqueue_t *pktq; 4082 size_t pktlen; 4083 4084 KASSERT(af == AF_INET || af == AF_INET6); 4085 4086 WG_TRACE(""); 4087 4088 m_set_rcvif(m, ifp); 4089 pktlen = m->m_pkthdr.len; 4090 4091 bpf_mtap_af(ifp, af, m, BPF_D_IN); 4092 4093 switch (af) { 4094 case AF_INET: 4095 pktq = ip_pktq; 4096 break; 4097 #ifdef INET6 4098 case AF_INET6: 4099 pktq = ip6_pktq; 4100 break; 4101 #endif 4102 default: 4103 panic("invalid af=%d", af); 4104 } 4105 4106 kpreempt_disable(); 4107 const u_int h = curcpu()->ci_index; 4108 if (__predict_true(pktq_enqueue(pktq, m, h))) { 4109 if_statadd(ifp, if_ibytes, pktlen); 4110 if_statinc(ifp, if_ipackets); 4111 } else { 4112 m_freem(m); 4113 } 4114 kpreempt_enable(); 4115 } 4116 4117 static void 4118 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN], 4119 const uint8_t privkey[WG_STATIC_KEY_LEN]) 4120 { 4121 4122 crypto_scalarmult_base(pubkey, privkey); 4123 } 4124 4125 static int 4126 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga) 4127 { 4128 struct radix_node_head *rnh; 4129 struct radix_node *rn; 4130 int error = 0; 4131 4132 rw_enter(wg->wg_rwlock, RW_WRITER); 4133 rnh = wg_rnh(wg, wga->wga_family); 4134 KASSERT(rnh != NULL); 4135 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh, 4136 wga->wga_nodes); 4137 rw_exit(wg->wg_rwlock); 4138 4139 if (rn == NULL) 4140 error = EEXIST; 4141 4142 return error; 4143 } 4144 4145 static int 4146 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer, 4147 struct wg_peer **wgpp) 4148 { 4149 int error = 0; 4150 const void *pubkey; 4151 size_t pubkey_len; 4152 const void *psk; 4153 size_t psk_len; 4154 const char *name = NULL; 4155 4156 if (prop_dictionary_get_string(peer, "name", &name)) { 4157 if (strlen(name) > WG_PEER_NAME_MAXLEN) { 4158 error = EINVAL; 4159 goto out; 4160 } 4161 } 4162 4163 if (!prop_dictionary_get_data(peer, "public_key", 4164 &pubkey, &pubkey_len)) { 4165 error = EINVAL; 4166 goto out; 4167 } 4168 #ifdef WG_DEBUG_DUMP 4169 { 4170 char *hex = gethexdump(pubkey, pubkey_len); 4171 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n", 4172 pubkey, pubkey_len, hex); 4173 puthexdump(hex, pubkey, pubkey_len); 4174 } 4175 #endif 4176 4177 struct wg_peer *wgp = wg_alloc_peer(wg); 4178 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey)); 4179 if (name != NULL) 4180 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name)); 4181 4182 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) { 4183 if (psk_len != sizeof(wgp->wgp_psk)) { 4184 error = EINVAL; 4185 goto out; 4186 } 4187 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk)); 4188 } 4189 4190 const void *addr; 4191 size_t addr_len; 4192 struct wg_sockaddr *wgsa = wgp->wgp_endpoint; 4193 4194 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len)) 4195 goto skip_endpoint; 4196 if (addr_len < sizeof(*wgsatosa(wgsa)) || 4197 addr_len > sizeof(*wgsatoss(wgsa))) { 4198 error = EINVAL; 4199 goto out; 4200 } 4201 memcpy(wgsatoss(wgsa), addr, addr_len); 4202 switch (wgsa_family(wgsa)) { 4203 case AF_INET: 4204 #ifdef INET6 4205 case AF_INET6: 4206 #endif 4207 break; 4208 default: 4209 error = EPFNOSUPPORT; 4210 goto out; 4211 } 4212 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) { 4213 error = EINVAL; 4214 goto out; 4215 } 4216 { 4217 char addrstr[128]; 4218 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr)); 4219 WG_DLOG("addr=%s\n", addrstr); 4220 } 4221 wgp->wgp_endpoint_available = true; 4222 4223 prop_array_t allowedips; 4224 skip_endpoint: 4225 allowedips = prop_dictionary_get(peer, "allowedips"); 4226 if (allowedips == NULL) 4227 goto skip; 4228 4229 prop_object_iterator_t _it = prop_array_iterator(allowedips); 4230 prop_dictionary_t prop_allowedip; 4231 int j = 0; 4232 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) { 4233 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4234 4235 if (!prop_dictionary_get_int(prop_allowedip, "family", 4236 &wga->wga_family)) 4237 continue; 4238 if (!prop_dictionary_get_data(prop_allowedip, "ip", 4239 &addr, &addr_len)) 4240 continue; 4241 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr", 4242 &wga->wga_cidr)) 4243 continue; 4244 4245 switch (wga->wga_family) { 4246 case AF_INET: { 4247 struct sockaddr_in sin; 4248 char addrstr[128]; 4249 struct in_addr mask; 4250 struct sockaddr_in sin_mask; 4251 4252 if (addr_len != sizeof(struct in_addr)) 4253 return EINVAL; 4254 memcpy(&wga->wga_addr4, addr, addr_len); 4255 4256 sockaddr_in_init(&sin, (const struct in_addr *)addr, 4257 0); 4258 sockaddr_copy(&wga->wga_sa_addr, 4259 sizeof(sin), sintosa(&sin)); 4260 4261 sockaddr_format(sintosa(&sin), 4262 addrstr, sizeof(addrstr)); 4263 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4264 4265 in_len2mask(&mask, wga->wga_cidr); 4266 sockaddr_in_init(&sin_mask, &mask, 0); 4267 sockaddr_copy(&wga->wga_sa_mask, 4268 sizeof(sin_mask), sintosa(&sin_mask)); 4269 4270 break; 4271 } 4272 #ifdef INET6 4273 case AF_INET6: { 4274 struct sockaddr_in6 sin6; 4275 char addrstr[128]; 4276 struct in6_addr mask; 4277 struct sockaddr_in6 sin6_mask; 4278 4279 if (addr_len != sizeof(struct in6_addr)) 4280 return EINVAL; 4281 memcpy(&wga->wga_addr6, addr, addr_len); 4282 4283 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr, 4284 0, 0, 0); 4285 sockaddr_copy(&wga->wga_sa_addr, 4286 sizeof(sin6), sin6tosa(&sin6)); 4287 4288 sockaddr_format(sin6tosa(&sin6), 4289 addrstr, sizeof(addrstr)); 4290 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4291 4292 in6_prefixlen2mask(&mask, wga->wga_cidr); 4293 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0); 4294 sockaddr_copy(&wga->wga_sa_mask, 4295 sizeof(sin6_mask), sin6tosa(&sin6_mask)); 4296 4297 break; 4298 } 4299 #endif 4300 default: 4301 error = EINVAL; 4302 goto out; 4303 } 4304 wga->wga_peer = wgp; 4305 4306 error = wg_rtable_add_route(wg, wga); 4307 if (error != 0) 4308 goto out; 4309 4310 j++; 4311 } 4312 wgp->wgp_n_allowedips = j; 4313 skip: 4314 *wgpp = wgp; 4315 out: 4316 return error; 4317 } 4318 4319 static int 4320 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd) 4321 { 4322 int error; 4323 char *buf; 4324 4325 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len); 4326 if (ifd->ifd_len >= WG_MAX_PROPLEN) 4327 return E2BIG; 4328 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP); 4329 error = copyin(ifd->ifd_data, buf, ifd->ifd_len); 4330 if (error != 0) 4331 return error; 4332 buf[ifd->ifd_len] = '\0'; 4333 #ifdef WG_DEBUG_DUMP 4334 log(LOG_DEBUG, "%.*s\n", 4335 (int)MIN(INT_MAX, ifd->ifd_len), 4336 (const char *)buf); 4337 #endif 4338 *_buf = buf; 4339 return 0; 4340 } 4341 4342 static int 4343 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd) 4344 { 4345 int error; 4346 prop_dictionary_t prop_dict; 4347 char *buf = NULL; 4348 const void *privkey; 4349 size_t privkey_len; 4350 4351 error = wg_alloc_prop_buf(&buf, ifd); 4352 if (error != 0) 4353 return error; 4354 error = EINVAL; 4355 prop_dict = prop_dictionary_internalize(buf); 4356 if (prop_dict == NULL) 4357 goto out; 4358 if (!prop_dictionary_get_data(prop_dict, "private_key", 4359 &privkey, &privkey_len)) 4360 goto out; 4361 #ifdef WG_DEBUG_DUMP 4362 { 4363 char *hex = gethexdump(privkey, privkey_len); 4364 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n", 4365 privkey, privkey_len, hex); 4366 puthexdump(hex, privkey, privkey_len); 4367 } 4368 #endif 4369 if (privkey_len != WG_STATIC_KEY_LEN) 4370 goto out; 4371 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN); 4372 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey); 4373 error = 0; 4374 4375 out: 4376 kmem_free(buf, ifd->ifd_len + 1); 4377 return error; 4378 } 4379 4380 static int 4381 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd) 4382 { 4383 int error; 4384 prop_dictionary_t prop_dict; 4385 char *buf = NULL; 4386 uint16_t port; 4387 4388 error = wg_alloc_prop_buf(&buf, ifd); 4389 if (error != 0) 4390 return error; 4391 error = EINVAL; 4392 prop_dict = prop_dictionary_internalize(buf); 4393 if (prop_dict == NULL) 4394 goto out; 4395 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port)) 4396 goto out; 4397 4398 error = wg->wg_ops->bind_port(wg, (uint16_t)port); 4399 4400 out: 4401 kmem_free(buf, ifd->ifd_len + 1); 4402 return error; 4403 } 4404 4405 static int 4406 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd) 4407 { 4408 int error; 4409 prop_dictionary_t prop_dict; 4410 char *buf = NULL; 4411 struct wg_peer *wgp = NULL, *wgp0 __diagused; 4412 4413 error = wg_alloc_prop_buf(&buf, ifd); 4414 if (error != 0) 4415 return error; 4416 error = EINVAL; 4417 prop_dict = prop_dictionary_internalize(buf); 4418 if (prop_dict == NULL) 4419 goto out; 4420 4421 error = wg_handle_prop_peer(wg, prop_dict, &wgp); 4422 if (error != 0) 4423 goto out; 4424 4425 mutex_enter(wg->wg_lock); 4426 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4427 sizeof(wgp->wgp_pubkey)) != NULL || 4428 (wgp->wgp_name[0] && 4429 thmap_get(wg->wg_peers_byname, wgp->wgp_name, 4430 strlen(wgp->wgp_name)) != NULL)) { 4431 mutex_exit(wg->wg_lock); 4432 wg_destroy_peer(wgp); 4433 error = EEXIST; 4434 goto out; 4435 } 4436 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4437 sizeof(wgp->wgp_pubkey), wgp); 4438 KASSERT(wgp0 == wgp); 4439 if (wgp->wgp_name[0]) { 4440 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name, 4441 strlen(wgp->wgp_name), wgp); 4442 KASSERT(wgp0 == wgp); 4443 } 4444 WG_PEER_WRITER_INSERT_HEAD(wgp, wg); 4445 wg->wg_npeers++; 4446 mutex_exit(wg->wg_lock); 4447 4448 if_link_state_change(&wg->wg_if, LINK_STATE_UP); 4449 4450 out: 4451 kmem_free(buf, ifd->ifd_len + 1); 4452 return error; 4453 } 4454 4455 static int 4456 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd) 4457 { 4458 int error; 4459 prop_dictionary_t prop_dict; 4460 char *buf = NULL; 4461 const char *name; 4462 4463 error = wg_alloc_prop_buf(&buf, ifd); 4464 if (error != 0) 4465 return error; 4466 error = EINVAL; 4467 prop_dict = prop_dictionary_internalize(buf); 4468 if (prop_dict == NULL) 4469 goto out; 4470 4471 if (!prop_dictionary_get_string(prop_dict, "name", &name)) 4472 goto out; 4473 if (strlen(name) > WG_PEER_NAME_MAXLEN) 4474 goto out; 4475 4476 error = wg_destroy_peer_name(wg, name); 4477 out: 4478 kmem_free(buf, ifd->ifd_len + 1); 4479 return error; 4480 } 4481 4482 static bool 4483 wg_is_authorized(struct wg_softc *wg, u_long cmd) 4484 { 4485 int au = cmd == SIOCGDRVSPEC ? 4486 KAUTH_REQ_NETWORK_INTERFACE_WG_GETPRIV : 4487 KAUTH_REQ_NETWORK_INTERFACE_WG_SETPRIV; 4488 return kauth_authorize_network(kauth_cred_get(), 4489 KAUTH_NETWORK_INTERFACE_WG, au, &wg->wg_if, 4490 (void *)cmd, NULL) == 0; 4491 } 4492 4493 static int 4494 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd) 4495 { 4496 int error = ENOMEM; 4497 prop_dictionary_t prop_dict; 4498 prop_array_t peers = NULL; 4499 char *buf; 4500 struct wg_peer *wgp; 4501 int s, i; 4502 4503 prop_dict = prop_dictionary_create(); 4504 if (prop_dict == NULL) 4505 goto error; 4506 4507 if (wg_is_authorized(wg, SIOCGDRVSPEC)) { 4508 if (!prop_dictionary_set_data(prop_dict, "private_key", 4509 wg->wg_privkey, WG_STATIC_KEY_LEN)) 4510 goto error; 4511 } 4512 4513 if (wg->wg_listen_port != 0) { 4514 if (!prop_dictionary_set_uint16(prop_dict, "listen_port", 4515 wg->wg_listen_port)) 4516 goto error; 4517 } 4518 4519 if (wg->wg_npeers == 0) 4520 goto skip_peers; 4521 4522 peers = prop_array_create(); 4523 if (peers == NULL) 4524 goto error; 4525 4526 s = pserialize_read_enter(); 4527 i = 0; 4528 WG_PEER_READER_FOREACH(wgp, wg) { 4529 struct wg_sockaddr *wgsa; 4530 struct psref wgp_psref, wgsa_psref; 4531 prop_dictionary_t prop_peer; 4532 4533 wg_get_peer(wgp, &wgp_psref); 4534 pserialize_read_exit(s); 4535 4536 prop_peer = prop_dictionary_create(); 4537 if (prop_peer == NULL) 4538 goto next; 4539 4540 if (strlen(wgp->wgp_name) > 0) { 4541 if (!prop_dictionary_set_string(prop_peer, "name", 4542 wgp->wgp_name)) 4543 goto next; 4544 } 4545 4546 if (!prop_dictionary_set_data(prop_peer, "public_key", 4547 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey))) 4548 goto next; 4549 4550 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0}; 4551 if (!consttime_memequal(wgp->wgp_psk, psk_zero, 4552 sizeof(wgp->wgp_psk))) { 4553 if (wg_is_authorized(wg, SIOCGDRVSPEC)) { 4554 if (!prop_dictionary_set_data(prop_peer, 4555 "preshared_key", 4556 wgp->wgp_psk, sizeof(wgp->wgp_psk))) 4557 goto next; 4558 } 4559 } 4560 4561 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref); 4562 CTASSERT(AF_UNSPEC == 0); 4563 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ && 4564 !prop_dictionary_set_data(prop_peer, "endpoint", 4565 wgsatoss(wgsa), 4566 sockaddr_getsize_by_family(wgsa_family(wgsa)))) { 4567 wg_put_sa(wgp, wgsa, &wgsa_psref); 4568 goto next; 4569 } 4570 wg_put_sa(wgp, wgsa, &wgsa_psref); 4571 4572 const struct timespec *t = &wgp->wgp_last_handshake_time; 4573 4574 if (!prop_dictionary_set_uint64(prop_peer, 4575 "last_handshake_time_sec", (uint64_t)t->tv_sec)) 4576 goto next; 4577 if (!prop_dictionary_set_uint32(prop_peer, 4578 "last_handshake_time_nsec", (uint32_t)t->tv_nsec)) 4579 goto next; 4580 4581 if (wgp->wgp_n_allowedips == 0) 4582 goto skip_allowedips; 4583 4584 prop_array_t allowedips = prop_array_create(); 4585 if (allowedips == NULL) 4586 goto next; 4587 for (int j = 0; j < wgp->wgp_n_allowedips; j++) { 4588 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4589 prop_dictionary_t prop_allowedip; 4590 4591 prop_allowedip = prop_dictionary_create(); 4592 if (prop_allowedip == NULL) 4593 break; 4594 4595 if (!prop_dictionary_set_int(prop_allowedip, "family", 4596 wga->wga_family)) 4597 goto _next; 4598 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr", 4599 wga->wga_cidr)) 4600 goto _next; 4601 4602 switch (wga->wga_family) { 4603 case AF_INET: 4604 if (!prop_dictionary_set_data(prop_allowedip, 4605 "ip", &wga->wga_addr4, 4606 sizeof(wga->wga_addr4))) 4607 goto _next; 4608 break; 4609 #ifdef INET6 4610 case AF_INET6: 4611 if (!prop_dictionary_set_data(prop_allowedip, 4612 "ip", &wga->wga_addr6, 4613 sizeof(wga->wga_addr6))) 4614 goto _next; 4615 break; 4616 #endif 4617 default: 4618 break; 4619 } 4620 prop_array_set(allowedips, j, prop_allowedip); 4621 _next: 4622 prop_object_release(prop_allowedip); 4623 } 4624 prop_dictionary_set(prop_peer, "allowedips", allowedips); 4625 prop_object_release(allowedips); 4626 4627 skip_allowedips: 4628 4629 prop_array_set(peers, i, prop_peer); 4630 next: 4631 if (prop_peer) 4632 prop_object_release(prop_peer); 4633 i++; 4634 4635 s = pserialize_read_enter(); 4636 wg_put_peer(wgp, &wgp_psref); 4637 } 4638 pserialize_read_exit(s); 4639 4640 prop_dictionary_set(prop_dict, "peers", peers); 4641 prop_object_release(peers); 4642 peers = NULL; 4643 4644 skip_peers: 4645 buf = prop_dictionary_externalize(prop_dict); 4646 if (buf == NULL) 4647 goto error; 4648 if (ifd->ifd_len < (strlen(buf) + 1)) { 4649 error = EINVAL; 4650 goto error; 4651 } 4652 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1); 4653 4654 free(buf, 0); 4655 error: 4656 if (peers != NULL) 4657 prop_object_release(peers); 4658 if (prop_dict != NULL) 4659 prop_object_release(prop_dict); 4660 4661 return error; 4662 } 4663 4664 static int 4665 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data) 4666 { 4667 struct wg_softc *wg = ifp->if_softc; 4668 struct ifreq *ifr = data; 4669 struct ifaddr *ifa = data; 4670 struct ifdrv *ifd = data; 4671 int error = 0; 4672 4673 switch (cmd) { 4674 case SIOCINITIFADDR: 4675 if (ifa->ifa_addr->sa_family != AF_LINK && 4676 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) != 4677 (IFF_UP | IFF_RUNNING)) { 4678 ifp->if_flags |= IFF_UP; 4679 error = if_init(ifp); 4680 } 4681 return error; 4682 case SIOCADDMULTI: 4683 case SIOCDELMULTI: 4684 switch (ifr->ifr_addr.sa_family) { 4685 case AF_INET: /* IP supports Multicast */ 4686 break; 4687 #ifdef INET6 4688 case AF_INET6: /* IP6 supports Multicast */ 4689 break; 4690 #endif 4691 default: /* Other protocols doesn't support Multicast */ 4692 error = EAFNOSUPPORT; 4693 break; 4694 } 4695 return error; 4696 case SIOCSDRVSPEC: 4697 if (!wg_is_authorized(wg, cmd)) { 4698 return EPERM; 4699 } 4700 switch (ifd->ifd_cmd) { 4701 case WG_IOCTL_SET_PRIVATE_KEY: 4702 error = wg_ioctl_set_private_key(wg, ifd); 4703 break; 4704 case WG_IOCTL_SET_LISTEN_PORT: 4705 error = wg_ioctl_set_listen_port(wg, ifd); 4706 break; 4707 case WG_IOCTL_ADD_PEER: 4708 error = wg_ioctl_add_peer(wg, ifd); 4709 break; 4710 case WG_IOCTL_DELETE_PEER: 4711 error = wg_ioctl_delete_peer(wg, ifd); 4712 break; 4713 default: 4714 error = EINVAL; 4715 break; 4716 } 4717 return error; 4718 case SIOCGDRVSPEC: 4719 return wg_ioctl_get(wg, ifd); 4720 case SIOCSIFFLAGS: 4721 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 4722 break; 4723 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 4724 case IFF_RUNNING: 4725 /* 4726 * If interface is marked down and it is running, 4727 * then stop and disable it. 4728 */ 4729 if_stop(ifp, 1); 4730 break; 4731 case IFF_UP: 4732 /* 4733 * If interface is marked up and it is stopped, then 4734 * start it. 4735 */ 4736 error = if_init(ifp); 4737 break; 4738 default: 4739 break; 4740 } 4741 return error; 4742 #ifdef WG_RUMPKERNEL 4743 case SIOCSLINKSTR: 4744 error = wg_ioctl_linkstr(wg, ifd); 4745 if (error == 0) 4746 wg->wg_ops = &wg_ops_rumpuser; 4747 return error; 4748 #endif 4749 default: 4750 break; 4751 } 4752 4753 error = ifioctl_common(ifp, cmd, data); 4754 4755 #ifdef WG_RUMPKERNEL 4756 if (!wg_user_mode(wg)) 4757 return error; 4758 4759 /* Do the same to the corresponding tun device on the host */ 4760 /* 4761 * XXX Actually the command has not been handled yet. It 4762 * will be handled via pr_ioctl form doifioctl later. 4763 */ 4764 switch (cmd) { 4765 case SIOCAIFADDR: 4766 case SIOCDIFADDR: { 4767 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data; 4768 struct in_aliasreq *ifra = &_ifra; 4769 KASSERT(error == ENOTTY); 4770 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4771 IFNAMSIZ); 4772 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET); 4773 if (error == 0) 4774 error = ENOTTY; 4775 break; 4776 } 4777 #ifdef INET6 4778 case SIOCAIFADDR_IN6: 4779 case SIOCDIFADDR_IN6: { 4780 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data; 4781 struct in6_aliasreq *ifra = &_ifra; 4782 KASSERT(error == ENOTTY); 4783 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4784 IFNAMSIZ); 4785 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6); 4786 if (error == 0) 4787 error = ENOTTY; 4788 break; 4789 } 4790 #endif 4791 } 4792 #endif /* WG_RUMPKERNEL */ 4793 4794 return error; 4795 } 4796 4797 static int 4798 wg_init(struct ifnet *ifp) 4799 { 4800 4801 ifp->if_flags |= IFF_RUNNING; 4802 4803 /* TODO flush pending packets. */ 4804 return 0; 4805 } 4806 4807 #ifdef ALTQ 4808 static void 4809 wg_start(struct ifnet *ifp) 4810 { 4811 struct mbuf *m; 4812 4813 for (;;) { 4814 IFQ_DEQUEUE(&ifp->if_snd, m); 4815 if (m == NULL) 4816 break; 4817 4818 kpreempt_disable(); 4819 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 4820 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 4821 WGLOG(LOG_ERR, "%s: pktq full, dropping\n", 4822 if_name(ifp)); 4823 m_freem(m); 4824 } 4825 kpreempt_enable(); 4826 } 4827 } 4828 #endif 4829 4830 static void 4831 wg_stop(struct ifnet *ifp, int disable) 4832 { 4833 4834 KASSERT((ifp->if_flags & IFF_RUNNING) != 0); 4835 ifp->if_flags &= ~IFF_RUNNING; 4836 4837 /* Need to do something? */ 4838 } 4839 4840 #ifdef WG_DEBUG_PARAMS 4841 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup") 4842 { 4843 const struct sysctlnode *node = NULL; 4844 4845 sysctl_createv(clog, 0, NULL, &node, 4846 CTLFLAG_PERMANENT, 4847 CTLTYPE_NODE, "wg", 4848 SYSCTL_DESCR("wg(4)"), 4849 NULL, 0, NULL, 0, 4850 CTL_NET, CTL_CREATE, CTL_EOL); 4851 sysctl_createv(clog, 0, &node, NULL, 4852 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4853 CTLTYPE_QUAD, "rekey_after_messages", 4854 SYSCTL_DESCR("session liftime by messages"), 4855 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL); 4856 sysctl_createv(clog, 0, &node, NULL, 4857 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4858 CTLTYPE_INT, "rekey_after_time", 4859 SYSCTL_DESCR("session liftime"), 4860 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL); 4861 sysctl_createv(clog, 0, &node, NULL, 4862 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4863 CTLTYPE_INT, "rekey_timeout", 4864 SYSCTL_DESCR("session handshake retry time"), 4865 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL); 4866 sysctl_createv(clog, 0, &node, NULL, 4867 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4868 CTLTYPE_INT, "rekey_attempt_time", 4869 SYSCTL_DESCR("session handshake timeout"), 4870 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL); 4871 sysctl_createv(clog, 0, &node, NULL, 4872 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4873 CTLTYPE_INT, "keepalive_timeout", 4874 SYSCTL_DESCR("keepalive timeout"), 4875 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL); 4876 sysctl_createv(clog, 0, &node, NULL, 4877 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4878 CTLTYPE_BOOL, "force_underload", 4879 SYSCTL_DESCR("force to detemine under load"), 4880 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL); 4881 } 4882 #endif 4883 4884 #ifdef WG_RUMPKERNEL 4885 static bool 4886 wg_user_mode(struct wg_softc *wg) 4887 { 4888 4889 return wg->wg_user != NULL; 4890 } 4891 4892 static int 4893 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd) 4894 { 4895 struct ifnet *ifp = &wg->wg_if; 4896 int error; 4897 4898 if (ifp->if_flags & IFF_UP) 4899 return EBUSY; 4900 4901 if (ifd->ifd_cmd == IFLINKSTR_UNSET) { 4902 /* XXX do nothing */ 4903 return 0; 4904 } else if (ifd->ifd_cmd != 0) { 4905 return EINVAL; 4906 } else if (wg->wg_user != NULL) { 4907 return EBUSY; 4908 } 4909 4910 /* Assume \0 included */ 4911 if (ifd->ifd_len > IFNAMSIZ) { 4912 return E2BIG; 4913 } else if (ifd->ifd_len < 1) { 4914 return EINVAL; 4915 } 4916 4917 char tun_name[IFNAMSIZ]; 4918 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL); 4919 if (error != 0) 4920 return error; 4921 4922 if (strncmp(tun_name, "tun", 3) != 0) 4923 return EINVAL; 4924 4925 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user); 4926 4927 return error; 4928 } 4929 4930 static int 4931 wg_send_user(struct wg_peer *wgp, struct mbuf *m) 4932 { 4933 int error; 4934 struct psref psref; 4935 struct wg_sockaddr *wgsa; 4936 struct wg_softc *wg = wgp->wgp_sc; 4937 struct iovec iov[1]; 4938 4939 wgsa = wg_get_endpoint_sa(wgp, &psref); 4940 4941 iov[0].iov_base = mtod(m, void *); 4942 iov[0].iov_len = m->m_len; 4943 4944 /* Send messages to a peer via an ordinary socket. */ 4945 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1); 4946 4947 wg_put_sa(wgp, wgsa, &psref); 4948 4949 m_freem(m); 4950 4951 return error; 4952 } 4953 4954 static void 4955 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af) 4956 { 4957 struct wg_softc *wg = ifp->if_softc; 4958 struct iovec iov[2]; 4959 struct sockaddr_storage ss; 4960 4961 KASSERT(af == AF_INET || af == AF_INET6); 4962 4963 WG_TRACE(""); 4964 4965 if (af == AF_INET) { 4966 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 4967 struct ip *ip; 4968 4969 KASSERT(m->m_len >= sizeof(struct ip)); 4970 ip = mtod(m, struct ip *); 4971 sockaddr_in_init(sin, &ip->ip_dst, 0); 4972 } else { 4973 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 4974 struct ip6_hdr *ip6; 4975 4976 KASSERT(m->m_len >= sizeof(struct ip6_hdr)); 4977 ip6 = mtod(m, struct ip6_hdr *); 4978 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0); 4979 } 4980 4981 iov[0].iov_base = &ss; 4982 iov[0].iov_len = ss.ss_len; 4983 iov[1].iov_base = mtod(m, void *); 4984 iov[1].iov_len = m->m_len; 4985 4986 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 4987 4988 /* Send decrypted packets to users via a tun. */ 4989 rumpuser_wg_send_user(wg->wg_user, iov, 2); 4990 4991 m_freem(m); 4992 } 4993 4994 static int 4995 wg_bind_port_user(struct wg_softc *wg, const uint16_t port) 4996 { 4997 int error; 4998 uint16_t old_port = wg->wg_listen_port; 4999 5000 if (port != 0 && old_port == port) 5001 return 0; 5002 5003 error = rumpuser_wg_sock_bind(wg->wg_user, port); 5004 if (error == 0) 5005 wg->wg_listen_port = port; 5006 return error; 5007 } 5008 5009 /* 5010 * Receive user packets. 5011 */ 5012 void 5013 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 5014 { 5015 struct ifnet *ifp = &wg->wg_if; 5016 struct mbuf *m; 5017 const struct sockaddr *dst; 5018 5019 WG_TRACE(""); 5020 5021 dst = iov[0].iov_base; 5022 5023 m = m_gethdr(M_DONTWAIT, MT_DATA); 5024 if (m == NULL) 5025 return; 5026 m->m_len = m->m_pkthdr.len = 0; 5027 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 5028 5029 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 5030 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 5031 5032 (void)wg_output(ifp, m, dst, NULL); 5033 } 5034 5035 /* 5036 * Receive packets from a peer. 5037 */ 5038 void 5039 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 5040 { 5041 struct mbuf *m; 5042 const struct sockaddr *src; 5043 5044 WG_TRACE(""); 5045 5046 src = iov[0].iov_base; 5047 5048 m = m_gethdr(M_DONTWAIT, MT_DATA); 5049 if (m == NULL) 5050 return; 5051 m->m_len = m->m_pkthdr.len = 0; 5052 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 5053 5054 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 5055 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 5056 5057 wg_handle_packet(wg, m, src); 5058 } 5059 #endif /* WG_RUMPKERNEL */ 5060 5061 /* 5062 * Module infrastructure 5063 */ 5064 #include "if_module.h" 5065 5066 IF_MODULE(MODULE_CLASS_DRIVER, wg, "sodium,blake2s") 5067