1 /* $NetBSD: if_wg.c,v 1.60 2020/09/14 04:57:20 riastradh Exp $ */ 2 3 /* 4 * Copyright (C) Ryota Ozaki <ozaki.ryota@gmail.com> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the project nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32 /* 33 * This network interface aims to implement the WireGuard protocol. 34 * The implementation is based on the paper of WireGuard as of 35 * 2018-06-30 [1]. The paper is referred in the source code with label 36 * [W]. Also the specification of the Noise protocol framework as of 37 * 2018-07-11 [2] is referred with label [N]. 38 * 39 * [1] https://www.wireguard.com/papers/wireguard.pdf 40 * [2] http://noiseprotocol.org/noise.pdf 41 */ 42 43 #include <sys/cdefs.h> 44 __KERNEL_RCSID(0, "$NetBSD: if_wg.c,v 1.60 2020/09/14 04:57:20 riastradh Exp $"); 45 46 #ifdef _KERNEL_OPT 47 #include "opt_altq_enabled.h" 48 #include "opt_inet.h" 49 #endif 50 51 #include <sys/param.h> 52 #include <sys/types.h> 53 54 #include <sys/atomic.h> 55 #include <sys/callout.h> 56 #include <sys/cprng.h> 57 #include <sys/cpu.h> 58 #include <sys/device.h> 59 #include <sys/domain.h> 60 #include <sys/errno.h> 61 #include <sys/intr.h> 62 #include <sys/ioctl.h> 63 #include <sys/kernel.h> 64 #include <sys/kmem.h> 65 #include <sys/mbuf.h> 66 #include <sys/module.h> 67 #include <sys/mutex.h> 68 #include <sys/once.h> 69 #include <sys/percpu.h> 70 #include <sys/pserialize.h> 71 #include <sys/psref.h> 72 #include <sys/queue.h> 73 #include <sys/rwlock.h> 74 #include <sys/socket.h> 75 #include <sys/socketvar.h> 76 #include <sys/sockio.h> 77 #include <sys/sysctl.h> 78 #include <sys/syslog.h> 79 #include <sys/systm.h> 80 #include <sys/thmap.h> 81 #include <sys/threadpool.h> 82 #include <sys/time.h> 83 #include <sys/timespec.h> 84 #include <sys/workqueue.h> 85 86 #include <net/bpf.h> 87 #include <net/if.h> 88 #include <net/if_types.h> 89 #include <net/if_wg.h> 90 #include <net/pktqueue.h> 91 #include <net/route.h> 92 93 #include <netinet/in.h> 94 #include <netinet/in_pcb.h> 95 #include <netinet/in_var.h> 96 #include <netinet/ip.h> 97 #include <netinet/ip_var.h> 98 #include <netinet/udp.h> 99 #include <netinet/udp_var.h> 100 101 #ifdef INET6 102 #include <netinet/ip6.h> 103 #include <netinet6/in6_pcb.h> 104 #include <netinet6/in6_var.h> 105 #include <netinet6/ip6_var.h> 106 #include <netinet6/udp6_var.h> 107 #endif /* INET6 */ 108 109 #include <prop/proplib.h> 110 111 #include <crypto/blake2/blake2s.h> 112 #include <crypto/sodium/crypto_aead_chacha20poly1305.h> 113 #include <crypto/sodium/crypto_aead_xchacha20poly1305.h> 114 #include <crypto/sodium/crypto_scalarmult.h> 115 116 #include "ioconf.h" 117 118 #ifdef WG_RUMPKERNEL 119 #include "wg_user.h" 120 #endif 121 122 /* 123 * Data structures 124 * - struct wg_softc is an instance of wg interfaces 125 * - It has a list of peers (struct wg_peer) 126 * - It has a threadpool job that sends/receives handshake messages and 127 * runs event handlers 128 * - It has its own two routing tables: one is for IPv4 and the other IPv6 129 * - struct wg_peer is a representative of a peer 130 * - It has a struct work to handle handshakes and timer tasks 131 * - It has a pair of session instances (struct wg_session) 132 * - It has a pair of endpoint instances (struct wg_sockaddr) 133 * - Normally one endpoint is used and the second one is used only on 134 * a peer migration (a change of peer's IP address) 135 * - It has a list of IP addresses and sub networks called allowedips 136 * (struct wg_allowedip) 137 * - A packets sent over a session is allowed if its destination matches 138 * any IP addresses or sub networks of the list 139 * - struct wg_session represents a session of a secure tunnel with a peer 140 * - Two instances of sessions belong to a peer; a stable session and a 141 * unstable session 142 * - A handshake process of a session always starts with a unstable instance 143 * - Once a session is established, its instance becomes stable and the 144 * other becomes unstable instead 145 * - Data messages are always sent via a stable session 146 * 147 * Locking notes: 148 * - Each wg has a mutex(9) wg_lock, and a rwlock(9) wg_rwlock 149 * - Changes to the peer list are serialized by wg_lock 150 * - The peer list may be read with pserialize(9) and psref(9) 151 * - The rwlock (wg_rwlock) protects the routing tables (wg_rtable_ipv[46]) 152 * => XXX replace by pserialize when routing table is psz-safe 153 * - Each peer (struct wg_peer, wgp) has a mutex wgp_lock, which can be taken 154 * only in thread context and serializes: 155 * - the stable and unstable session pointers 156 * - all unstable session state 157 * - Packet processing may be done in softint context: 158 * - The stable session can be read under pserialize(9) or psref(9) 159 * - The stable session is always ESTABLISHED 160 * - On a session swap, we must wait for all readers to release a 161 * reference to a stable session before changing wgs_state and 162 * session states 163 * - Lock order: wg_lock -> wgp_lock 164 */ 165 166 167 #define WGLOG(level, fmt, args...) \ 168 log(level, "%s: " fmt, __func__, ##args) 169 170 /* Debug options */ 171 #ifdef WG_DEBUG 172 /* Output debug logs */ 173 #ifndef WG_DEBUG_LOG 174 #define WG_DEBUG_LOG 175 #endif 176 /* Output trace logs */ 177 #ifndef WG_DEBUG_TRACE 178 #define WG_DEBUG_TRACE 179 #endif 180 /* Output hash values, etc. */ 181 #ifndef WG_DEBUG_DUMP 182 #define WG_DEBUG_DUMP 183 #endif 184 /* Make some internal parameters configurable for testing and debugging */ 185 #ifndef WG_DEBUG_PARAMS 186 #define WG_DEBUG_PARAMS 187 #endif 188 #endif 189 190 #ifdef WG_DEBUG_TRACE 191 #define WG_TRACE(msg) \ 192 log(LOG_DEBUG, "%s:%d: %s\n", __func__, __LINE__, (msg)) 193 #else 194 #define WG_TRACE(msg) __nothing 195 #endif 196 197 #ifdef WG_DEBUG_LOG 198 #define WG_DLOG(fmt, args...) log(LOG_DEBUG, "%s: " fmt, __func__, ##args) 199 #else 200 #define WG_DLOG(fmt, args...) __nothing 201 #endif 202 203 #define WG_LOG_RATECHECK(wgprc, level, fmt, args...) do { \ 204 if (ppsratecheck(&(wgprc)->wgprc_lasttime, \ 205 &(wgprc)->wgprc_curpps, 1)) { \ 206 log(level, fmt, ##args); \ 207 } \ 208 } while (0) 209 210 #ifdef WG_DEBUG_PARAMS 211 static bool wg_force_underload = false; 212 #endif 213 214 #ifdef WG_DEBUG_DUMP 215 216 static char * 217 gethexdump(const char *p, size_t n) 218 { 219 char *buf; 220 size_t i; 221 222 if (n > SIZE_MAX/3 - 1) 223 return NULL; 224 buf = kmem_alloc(3*n + 1, KM_NOSLEEP); 225 if (buf == NULL) 226 return NULL; 227 for (i = 0; i < n; i++) 228 snprintf(buf + 3*i, 3 + 1, " %02hhx", p[i]); 229 return buf; 230 } 231 232 static void 233 puthexdump(char *buf, const void *p, size_t n) 234 { 235 236 if (buf == NULL) 237 return; 238 kmem_free(buf, 3*n + 1); 239 } 240 241 #ifdef WG_RUMPKERNEL 242 static void 243 wg_dump_buf(const char *func, const char *buf, const size_t size) 244 { 245 char *hex = gethexdump(buf, size); 246 247 log(LOG_DEBUG, "%s: %s\n", func, hex ? hex : "(enomem)"); 248 puthexdump(hex, buf, size); 249 } 250 #endif 251 252 static void 253 wg_dump_hash(const uint8_t *func, const uint8_t *name, const uint8_t *hash, 254 const size_t size) 255 { 256 char *hex = gethexdump(hash, size); 257 258 log(LOG_DEBUG, "%s: %s: %s\n", func, name, hex ? hex : "(enomem)"); 259 puthexdump(hex, hash, size); 260 } 261 262 #define WG_DUMP_HASH(name, hash) \ 263 wg_dump_hash(__func__, name, hash, WG_HASH_LEN) 264 #define WG_DUMP_HASH48(name, hash) \ 265 wg_dump_hash(__func__, name, hash, 48) 266 #define WG_DUMP_BUF(buf, size) \ 267 wg_dump_buf(__func__, buf, size) 268 #else 269 #define WG_DUMP_HASH(name, hash) __nothing 270 #define WG_DUMP_HASH48(name, hash) __nothing 271 #define WG_DUMP_BUF(buf, size) __nothing 272 #endif /* WG_DEBUG_DUMP */ 273 274 #define WG_MTU 1420 275 #define WG_ALLOWEDIPS 16 276 277 #define CURVE25519_KEY_LEN 32 278 #define TAI64N_LEN sizeof(uint32_t) * 3 279 #define POLY1305_AUTHTAG_LEN 16 280 #define HMAC_BLOCK_LEN 64 281 282 /* [N] 4.1: "DHLEN must be 32 or greater." WireGuard chooses 32. */ 283 /* [N] 4.3: Hash functions */ 284 #define NOISE_DHLEN 32 285 /* [N] 4.3: "Must be 32 or 64." WireGuard chooses 32. */ 286 #define NOISE_HASHLEN 32 287 #define NOISE_BLOCKLEN 64 288 #define NOISE_HKDF_OUTPUT_LEN NOISE_HASHLEN 289 /* [N] 5.1: "k" */ 290 #define NOISE_CIPHER_KEY_LEN 32 291 /* 292 * [N] 9.2: "psk" 293 * "... psk is a 32-byte secret value provided by the application." 294 */ 295 #define NOISE_PRESHARED_KEY_LEN 32 296 297 #define WG_STATIC_KEY_LEN CURVE25519_KEY_LEN 298 #define WG_TIMESTAMP_LEN TAI64N_LEN 299 300 #define WG_PRESHARED_KEY_LEN NOISE_PRESHARED_KEY_LEN 301 302 #define WG_COOKIE_LEN 16 303 #define WG_MAC_LEN 16 304 #define WG_RANDVAL_LEN 24 305 306 #define WG_EPHEMERAL_KEY_LEN CURVE25519_KEY_LEN 307 /* [N] 5.2: "ck: A chaining key of HASHLEN bytes" */ 308 #define WG_CHAINING_KEY_LEN NOISE_HASHLEN 309 /* [N] 5.2: "h: A hash output of HASHLEN bytes" */ 310 #define WG_HASH_LEN NOISE_HASHLEN 311 #define WG_CIPHER_KEY_LEN NOISE_CIPHER_KEY_LEN 312 #define WG_DH_OUTPUT_LEN NOISE_DHLEN 313 #define WG_KDF_OUTPUT_LEN NOISE_HKDF_OUTPUT_LEN 314 #define WG_AUTHTAG_LEN POLY1305_AUTHTAG_LEN 315 #define WG_DATA_KEY_LEN 32 316 #define WG_SALT_LEN 24 317 318 /* 319 * The protocol messages 320 */ 321 struct wg_msg { 322 uint32_t wgm_type; 323 } __packed; 324 325 /* [W] 5.4.2 First Message: Initiator to Responder */ 326 struct wg_msg_init { 327 uint32_t wgmi_type; 328 uint32_t wgmi_sender; 329 uint8_t wgmi_ephemeral[WG_EPHEMERAL_KEY_LEN]; 330 uint8_t wgmi_static[WG_STATIC_KEY_LEN + WG_AUTHTAG_LEN]; 331 uint8_t wgmi_timestamp[WG_TIMESTAMP_LEN + WG_AUTHTAG_LEN]; 332 uint8_t wgmi_mac1[WG_MAC_LEN]; 333 uint8_t wgmi_mac2[WG_MAC_LEN]; 334 } __packed; 335 336 /* [W] 5.4.3 Second Message: Responder to Initiator */ 337 struct wg_msg_resp { 338 uint32_t wgmr_type; 339 uint32_t wgmr_sender; 340 uint32_t wgmr_receiver; 341 uint8_t wgmr_ephemeral[WG_EPHEMERAL_KEY_LEN]; 342 uint8_t wgmr_empty[0 + WG_AUTHTAG_LEN]; 343 uint8_t wgmr_mac1[WG_MAC_LEN]; 344 uint8_t wgmr_mac2[WG_MAC_LEN]; 345 } __packed; 346 347 /* [W] 5.4.6 Subsequent Messages: Transport Data Messages */ 348 struct wg_msg_data { 349 uint32_t wgmd_type; 350 uint32_t wgmd_receiver; 351 uint64_t wgmd_counter; 352 uint32_t wgmd_packet[0]; 353 } __packed; 354 355 /* [W] 5.4.7 Under Load: Cookie Reply Message */ 356 struct wg_msg_cookie { 357 uint32_t wgmc_type; 358 uint32_t wgmc_receiver; 359 uint8_t wgmc_salt[WG_SALT_LEN]; 360 uint8_t wgmc_cookie[WG_COOKIE_LEN + WG_AUTHTAG_LEN]; 361 } __packed; 362 363 #define WG_MSG_TYPE_INIT 1 364 #define WG_MSG_TYPE_RESP 2 365 #define WG_MSG_TYPE_COOKIE 3 366 #define WG_MSG_TYPE_DATA 4 367 #define WG_MSG_TYPE_MAX WG_MSG_TYPE_DATA 368 369 /* Sliding windows */ 370 371 #define SLIWIN_BITS 2048u 372 #define SLIWIN_TYPE uint32_t 373 #define SLIWIN_BPW NBBY*sizeof(SLIWIN_TYPE) 374 #define SLIWIN_WORDS howmany(SLIWIN_BITS, SLIWIN_BPW) 375 #define SLIWIN_NPKT (SLIWIN_BITS - NBBY*sizeof(SLIWIN_TYPE)) 376 377 struct sliwin { 378 SLIWIN_TYPE B[SLIWIN_WORDS]; 379 uint64_t T; 380 }; 381 382 static void 383 sliwin_reset(struct sliwin *W) 384 { 385 386 memset(W, 0, sizeof(*W)); 387 } 388 389 static int 390 sliwin_check_fast(const volatile struct sliwin *W, uint64_t S) 391 { 392 393 /* 394 * If it's more than one window older than the highest sequence 395 * number we've seen, reject. 396 */ 397 #ifdef __HAVE_ATOMIC64_LOADSTORE 398 if (S + SLIWIN_NPKT < atomic_load_relaxed(&W->T)) 399 return EAUTH; 400 #endif 401 402 /* 403 * Otherwise, we need to take the lock to decide, so don't 404 * reject just yet. Caller must serialize a call to 405 * sliwin_update in this case. 406 */ 407 return 0; 408 } 409 410 static int 411 sliwin_update(struct sliwin *W, uint64_t S) 412 { 413 unsigned word, bit; 414 415 /* 416 * If it's more than one window older than the highest sequence 417 * number we've seen, reject. 418 */ 419 if (S + SLIWIN_NPKT < W->T) 420 return EAUTH; 421 422 /* 423 * If it's higher than the highest sequence number we've seen, 424 * advance the window. 425 */ 426 if (S > W->T) { 427 uint64_t i = W->T / SLIWIN_BPW; 428 uint64_t j = S / SLIWIN_BPW; 429 unsigned k; 430 431 for (k = 0; k < MIN(j - i, SLIWIN_WORDS); k++) 432 W->B[(i + k + 1) % SLIWIN_WORDS] = 0; 433 #ifdef __HAVE_ATOMIC64_LOADSTORE 434 atomic_store_relaxed(&W->T, S); 435 #else 436 W->T = S; 437 #endif 438 } 439 440 /* Test and set the bit -- if already set, reject. */ 441 word = (S / SLIWIN_BPW) % SLIWIN_WORDS; 442 bit = S % SLIWIN_BPW; 443 if (W->B[word] & (1UL << bit)) 444 return EAUTH; 445 W->B[word] |= 1UL << bit; 446 447 /* Accept! */ 448 return 0; 449 } 450 451 struct wg_session { 452 struct wg_peer *wgs_peer; 453 struct psref_target 454 wgs_psref; 455 456 int wgs_state; 457 #define WGS_STATE_UNKNOWN 0 458 #define WGS_STATE_INIT_ACTIVE 1 459 #define WGS_STATE_INIT_PASSIVE 2 460 #define WGS_STATE_ESTABLISHED 3 461 #define WGS_STATE_DESTROYING 4 462 463 time_t wgs_time_established; 464 time_t wgs_time_last_data_sent; 465 bool wgs_is_initiator; 466 467 uint32_t wgs_local_index; 468 uint32_t wgs_remote_index; 469 #ifdef __HAVE_ATOMIC64_LOADSTORE 470 volatile uint64_t 471 wgs_send_counter; 472 #else 473 kmutex_t wgs_send_counter_lock; 474 uint64_t wgs_send_counter; 475 #endif 476 477 struct { 478 kmutex_t lock; 479 struct sliwin window; 480 } *wgs_recvwin; 481 482 uint8_t wgs_handshake_hash[WG_HASH_LEN]; 483 uint8_t wgs_chaining_key[WG_CHAINING_KEY_LEN]; 484 uint8_t wgs_ephemeral_key_pub[WG_EPHEMERAL_KEY_LEN]; 485 uint8_t wgs_ephemeral_key_priv[WG_EPHEMERAL_KEY_LEN]; 486 uint8_t wgs_ephemeral_key_peer[WG_EPHEMERAL_KEY_LEN]; 487 uint8_t wgs_tkey_send[WG_DATA_KEY_LEN]; 488 uint8_t wgs_tkey_recv[WG_DATA_KEY_LEN]; 489 }; 490 491 struct wg_sockaddr { 492 union { 493 struct sockaddr_storage _ss; 494 struct sockaddr _sa; 495 struct sockaddr_in _sin; 496 struct sockaddr_in6 _sin6; 497 }; 498 struct psref_target wgsa_psref; 499 }; 500 501 #define wgsatoss(wgsa) (&(wgsa)->_ss) 502 #define wgsatosa(wgsa) (&(wgsa)->_sa) 503 #define wgsatosin(wgsa) (&(wgsa)->_sin) 504 #define wgsatosin6(wgsa) (&(wgsa)->_sin6) 505 506 #define wgsa_family(wgsa) (wgsatosa(wgsa)->sa_family) 507 508 struct wg_peer; 509 struct wg_allowedip { 510 struct radix_node wga_nodes[2]; 511 struct wg_sockaddr _wga_sa_addr; 512 struct wg_sockaddr _wga_sa_mask; 513 #define wga_sa_addr _wga_sa_addr._sa 514 #define wga_sa_mask _wga_sa_mask._sa 515 516 int wga_family; 517 uint8_t wga_cidr; 518 union { 519 struct in_addr _ip4; 520 struct in6_addr _ip6; 521 } wga_addr; 522 #define wga_addr4 wga_addr._ip4 523 #define wga_addr6 wga_addr._ip6 524 525 struct wg_peer *wga_peer; 526 }; 527 528 typedef uint8_t wg_timestamp_t[WG_TIMESTAMP_LEN]; 529 530 struct wg_ppsratecheck { 531 struct timeval wgprc_lasttime; 532 int wgprc_curpps; 533 }; 534 535 struct wg_softc; 536 struct wg_peer { 537 struct wg_softc *wgp_sc; 538 char wgp_name[WG_PEER_NAME_MAXLEN + 1]; 539 struct pslist_entry wgp_peerlist_entry; 540 pserialize_t wgp_psz; 541 struct psref_target wgp_psref; 542 kmutex_t *wgp_lock; 543 kmutex_t *wgp_intr_lock; 544 545 uint8_t wgp_pubkey[WG_STATIC_KEY_LEN]; 546 struct wg_sockaddr *wgp_endpoint; 547 struct wg_sockaddr *wgp_endpoint0; 548 volatile unsigned wgp_endpoint_changing; 549 bool wgp_endpoint_available; 550 551 /* The preshared key (optional) */ 552 uint8_t wgp_psk[WG_PRESHARED_KEY_LEN]; 553 554 struct wg_session *wgp_session_stable; 555 struct wg_session *wgp_session_unstable; 556 557 /* first outgoing packet awaiting session initiation */ 558 struct mbuf *wgp_pending; 559 560 /* timestamp in big-endian */ 561 wg_timestamp_t wgp_timestamp_latest_init; 562 563 struct timespec wgp_last_handshake_time; 564 565 callout_t wgp_rekey_timer; 566 callout_t wgp_handshake_timeout_timer; 567 callout_t wgp_session_dtor_timer; 568 569 time_t wgp_handshake_start_time; 570 571 int wgp_n_allowedips; 572 struct wg_allowedip wgp_allowedips[WG_ALLOWEDIPS]; 573 574 time_t wgp_latest_cookie_time; 575 uint8_t wgp_latest_cookie[WG_COOKIE_LEN]; 576 uint8_t wgp_last_sent_mac1[WG_MAC_LEN]; 577 bool wgp_last_sent_mac1_valid; 578 uint8_t wgp_last_sent_cookie[WG_COOKIE_LEN]; 579 bool wgp_last_sent_cookie_valid; 580 581 time_t wgp_last_msg_received_time[WG_MSG_TYPE_MAX]; 582 583 time_t wgp_last_genrandval_time; 584 uint32_t wgp_randval; 585 586 struct wg_ppsratecheck wgp_ppsratecheck; 587 588 struct work wgp_work; 589 unsigned int wgp_tasks; 590 #define WGP_TASK_SEND_INIT_MESSAGE __BIT(0) 591 #define WGP_TASK_RETRY_HANDSHAKE __BIT(1) 592 #define WGP_TASK_ESTABLISH_SESSION __BIT(2) 593 #define WGP_TASK_ENDPOINT_CHANGED __BIT(3) 594 #define WGP_TASK_SEND_KEEPALIVE_MESSAGE __BIT(4) 595 #define WGP_TASK_DESTROY_PREV_SESSION __BIT(5) 596 }; 597 598 struct wg_ops; 599 600 struct wg_softc { 601 struct ifnet wg_if; 602 LIST_ENTRY(wg_softc) wg_list; 603 kmutex_t *wg_lock; 604 kmutex_t *wg_intr_lock; 605 krwlock_t *wg_rwlock; 606 607 uint8_t wg_privkey[WG_STATIC_KEY_LEN]; 608 uint8_t wg_pubkey[WG_STATIC_KEY_LEN]; 609 610 int wg_npeers; 611 struct pslist_head wg_peers; 612 struct thmap *wg_peers_bypubkey; 613 struct thmap *wg_peers_byname; 614 struct thmap *wg_sessions_byindex; 615 uint16_t wg_listen_port; 616 617 struct threadpool *wg_threadpool; 618 619 struct threadpool_job wg_job; 620 int wg_upcalls; 621 #define WG_UPCALL_INET __BIT(0) 622 #define WG_UPCALL_INET6 __BIT(1) 623 624 #ifdef INET 625 struct socket *wg_so4; 626 struct radix_node_head *wg_rtable_ipv4; 627 #endif 628 #ifdef INET6 629 struct socket *wg_so6; 630 struct radix_node_head *wg_rtable_ipv6; 631 #endif 632 633 struct wg_ppsratecheck wg_ppsratecheck; 634 635 struct wg_ops *wg_ops; 636 637 #ifdef WG_RUMPKERNEL 638 struct wg_user *wg_user; 639 #endif 640 }; 641 642 /* [W] 6.1 Preliminaries */ 643 #define WG_REKEY_AFTER_MESSAGES (1ULL << 60) 644 #define WG_REJECT_AFTER_MESSAGES (UINT64_MAX - (1 << 13)) 645 #define WG_REKEY_AFTER_TIME 120 646 #define WG_REJECT_AFTER_TIME 180 647 #define WG_REKEY_ATTEMPT_TIME 90 648 #define WG_REKEY_TIMEOUT 5 649 #define WG_KEEPALIVE_TIMEOUT 10 650 651 #define WG_COOKIE_TIME 120 652 #define WG_RANDVAL_TIME (2 * 60) 653 654 static uint64_t wg_rekey_after_messages = WG_REKEY_AFTER_MESSAGES; 655 static uint64_t wg_reject_after_messages = WG_REJECT_AFTER_MESSAGES; 656 static unsigned wg_rekey_after_time = WG_REKEY_AFTER_TIME; 657 static unsigned wg_reject_after_time = WG_REJECT_AFTER_TIME; 658 static unsigned wg_rekey_attempt_time = WG_REKEY_ATTEMPT_TIME; 659 static unsigned wg_rekey_timeout = WG_REKEY_TIMEOUT; 660 static unsigned wg_keepalive_timeout = WG_KEEPALIVE_TIMEOUT; 661 662 static struct mbuf * 663 wg_get_mbuf(size_t, size_t); 664 665 static int wg_send_data_msg(struct wg_peer *, struct wg_session *, 666 struct mbuf *); 667 static int wg_send_cookie_msg(struct wg_softc *, struct wg_peer *, 668 const uint32_t, const uint8_t [], const struct sockaddr *); 669 static int wg_send_handshake_msg_resp(struct wg_softc *, struct wg_peer *, 670 struct wg_session *, const struct wg_msg_init *); 671 static void wg_send_keepalive_msg(struct wg_peer *, struct wg_session *); 672 673 static struct wg_peer * 674 wg_pick_peer_by_sa(struct wg_softc *, const struct sockaddr *, 675 struct psref *); 676 static struct wg_peer * 677 wg_lookup_peer_by_pubkey(struct wg_softc *, 678 const uint8_t [], struct psref *); 679 680 static struct wg_session * 681 wg_lookup_session_by_index(struct wg_softc *, 682 const uint32_t, struct psref *); 683 684 static void wg_update_endpoint_if_necessary(struct wg_peer *, 685 const struct sockaddr *); 686 687 static void wg_schedule_rekey_timer(struct wg_peer *); 688 static void wg_schedule_session_dtor_timer(struct wg_peer *); 689 690 static bool wg_is_underload(struct wg_softc *, struct wg_peer *, int); 691 static void wg_calculate_keys(struct wg_session *, const bool); 692 693 static void wg_clear_states(struct wg_session *); 694 695 static void wg_get_peer(struct wg_peer *, struct psref *); 696 static void wg_put_peer(struct wg_peer *, struct psref *); 697 698 static int wg_send_so(struct wg_peer *, struct mbuf *); 699 static int wg_send_udp(struct wg_peer *, struct mbuf *); 700 static int wg_output(struct ifnet *, struct mbuf *, 701 const struct sockaddr *, const struct rtentry *); 702 static void wg_input(struct ifnet *, struct mbuf *, const int); 703 static int wg_ioctl(struct ifnet *, u_long, void *); 704 static int wg_bind_port(struct wg_softc *, const uint16_t); 705 static int wg_init(struct ifnet *); 706 #ifdef ALTQ 707 static void wg_start(struct ifnet *); 708 #endif 709 static void wg_stop(struct ifnet *, int); 710 711 static void wg_peer_work(struct work *, void *); 712 static void wg_job(struct threadpool_job *); 713 static void wgintr(void *); 714 static void wg_purge_pending_packets(struct wg_peer *); 715 716 static int wg_clone_create(struct if_clone *, int); 717 static int wg_clone_destroy(struct ifnet *); 718 719 struct wg_ops { 720 int (*send_hs_msg)(struct wg_peer *, struct mbuf *); 721 int (*send_data_msg)(struct wg_peer *, struct mbuf *); 722 void (*input)(struct ifnet *, struct mbuf *, const int); 723 int (*bind_port)(struct wg_softc *, const uint16_t); 724 }; 725 726 struct wg_ops wg_ops_rumpkernel = { 727 .send_hs_msg = wg_send_so, 728 .send_data_msg = wg_send_udp, 729 .input = wg_input, 730 .bind_port = wg_bind_port, 731 }; 732 733 #ifdef WG_RUMPKERNEL 734 static bool wg_user_mode(struct wg_softc *); 735 static int wg_ioctl_linkstr(struct wg_softc *, struct ifdrv *); 736 737 static int wg_send_user(struct wg_peer *, struct mbuf *); 738 static void wg_input_user(struct ifnet *, struct mbuf *, const int); 739 static int wg_bind_port_user(struct wg_softc *, const uint16_t); 740 741 struct wg_ops wg_ops_rumpuser = { 742 .send_hs_msg = wg_send_user, 743 .send_data_msg = wg_send_user, 744 .input = wg_input_user, 745 .bind_port = wg_bind_port_user, 746 }; 747 #endif 748 749 #define WG_PEER_READER_FOREACH(wgp, wg) \ 750 PSLIST_READER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 751 wgp_peerlist_entry) 752 #define WG_PEER_WRITER_FOREACH(wgp, wg) \ 753 PSLIST_WRITER_FOREACH((wgp), &(wg)->wg_peers, struct wg_peer, \ 754 wgp_peerlist_entry) 755 #define WG_PEER_WRITER_INSERT_HEAD(wgp, wg) \ 756 PSLIST_WRITER_INSERT_HEAD(&(wg)->wg_peers, (wgp), wgp_peerlist_entry) 757 #define WG_PEER_WRITER_REMOVE(wgp) \ 758 PSLIST_WRITER_REMOVE((wgp), wgp_peerlist_entry) 759 760 struct wg_route { 761 struct radix_node wgr_nodes[2]; 762 struct wg_peer *wgr_peer; 763 }; 764 765 static struct radix_node_head * 766 wg_rnh(struct wg_softc *wg, const int family) 767 { 768 769 switch (family) { 770 case AF_INET: 771 return wg->wg_rtable_ipv4; 772 #ifdef INET6 773 case AF_INET6: 774 return wg->wg_rtable_ipv6; 775 #endif 776 default: 777 return NULL; 778 } 779 } 780 781 782 /* 783 * Global variables 784 */ 785 static volatile unsigned wg_count __cacheline_aligned; 786 787 struct psref_class *wg_psref_class __read_mostly; 788 789 static struct if_clone wg_cloner = 790 IF_CLONE_INITIALIZER("wg", wg_clone_create, wg_clone_destroy); 791 792 static struct pktqueue *wg_pktq __read_mostly; 793 static struct workqueue *wg_wq __read_mostly; 794 795 void wgattach(int); 796 /* ARGSUSED */ 797 void 798 wgattach(int count) 799 { 800 /* 801 * Nothing to do here, initialization is handled by the 802 * module initialization code in wginit() below). 803 */ 804 } 805 806 static void 807 wginit(void) 808 { 809 810 wg_psref_class = psref_class_create("wg", IPL_SOFTNET); 811 812 if_clone_attach(&wg_cloner); 813 } 814 815 /* 816 * XXX Kludge: This should just happen in wginit, but workqueue_create 817 * cannot be run until after CPUs have been detected, and wginit runs 818 * before configure. 819 */ 820 static int 821 wginitqueues(void) 822 { 823 int error __diagused; 824 825 wg_pktq = pktq_create(IFQ_MAXLEN, wgintr, NULL); 826 KASSERT(wg_pktq != NULL); 827 828 error = workqueue_create(&wg_wq, "wgpeer", wg_peer_work, NULL, 829 PRI_NONE, IPL_SOFTNET, WQ_MPSAFE|WQ_PERCPU); 830 KASSERT(error == 0); 831 832 return 0; 833 } 834 835 static void 836 wg_guarantee_initialized(void) 837 { 838 static ONCE_DECL(init); 839 int error __diagused; 840 841 error = RUN_ONCE(&init, wginitqueues); 842 KASSERT(error == 0); 843 } 844 845 static int 846 wg_count_inc(void) 847 { 848 unsigned o, n; 849 850 do { 851 o = atomic_load_relaxed(&wg_count); 852 if (o == UINT_MAX) 853 return ENFILE; 854 n = o + 1; 855 } while (atomic_cas_uint(&wg_count, o, n) != o); 856 857 return 0; 858 } 859 860 static void 861 wg_count_dec(void) 862 { 863 unsigned c __diagused; 864 865 c = atomic_dec_uint_nv(&wg_count); 866 KASSERT(c != UINT_MAX); 867 } 868 869 static int 870 wgdetach(void) 871 { 872 873 /* Prevent new interface creation. */ 874 if_clone_detach(&wg_cloner); 875 876 /* Check whether there are any existing interfaces. */ 877 if (atomic_load_relaxed(&wg_count)) { 878 /* Back out -- reattach the cloner. */ 879 if_clone_attach(&wg_cloner); 880 return EBUSY; 881 } 882 883 /* No interfaces left. Nuke it. */ 884 workqueue_destroy(wg_wq); 885 pktq_destroy(wg_pktq); 886 psref_class_destroy(wg_psref_class); 887 888 return 0; 889 } 890 891 static void 892 wg_init_key_and_hash(uint8_t ckey[WG_CHAINING_KEY_LEN], 893 uint8_t hash[WG_HASH_LEN]) 894 { 895 /* [W] 5.4: CONSTRUCTION */ 896 const char *signature = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; 897 /* [W] 5.4: IDENTIFIER */ 898 const char *id = "WireGuard v1 zx2c4 Jason@zx2c4.com"; 899 struct blake2s state; 900 901 blake2s(ckey, WG_CHAINING_KEY_LEN, NULL, 0, 902 signature, strlen(signature)); 903 904 CTASSERT(WG_HASH_LEN == WG_CHAINING_KEY_LEN); 905 memcpy(hash, ckey, WG_CHAINING_KEY_LEN); 906 907 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 908 blake2s_update(&state, ckey, WG_CHAINING_KEY_LEN); 909 blake2s_update(&state, id, strlen(id)); 910 blake2s_final(&state, hash); 911 912 WG_DUMP_HASH("ckey", ckey); 913 WG_DUMP_HASH("hash", hash); 914 } 915 916 static void 917 wg_algo_hash(uint8_t hash[WG_HASH_LEN], const uint8_t input[], 918 const size_t inputsize) 919 { 920 struct blake2s state; 921 922 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 923 blake2s_update(&state, hash, WG_HASH_LEN); 924 blake2s_update(&state, input, inputsize); 925 blake2s_final(&state, hash); 926 } 927 928 static void 929 wg_algo_mac(uint8_t out[], const size_t outsize, 930 const uint8_t key[], const size_t keylen, 931 const uint8_t input1[], const size_t input1len, 932 const uint8_t input2[], const size_t input2len) 933 { 934 struct blake2s state; 935 936 blake2s_init(&state, outsize, key, keylen); 937 938 blake2s_update(&state, input1, input1len); 939 if (input2 != NULL) 940 blake2s_update(&state, input2, input2len); 941 blake2s_final(&state, out); 942 } 943 944 static void 945 wg_algo_mac_mac1(uint8_t out[], const size_t outsize, 946 const uint8_t input1[], const size_t input1len, 947 const uint8_t input2[], const size_t input2len) 948 { 949 struct blake2s state; 950 /* [W] 5.4: LABEL-MAC1 */ 951 const char *label = "mac1----"; 952 uint8_t key[WG_HASH_LEN]; 953 954 blake2s_init(&state, sizeof(key), NULL, 0); 955 blake2s_update(&state, label, strlen(label)); 956 blake2s_update(&state, input1, input1len); 957 blake2s_final(&state, key); 958 959 blake2s_init(&state, outsize, key, sizeof(key)); 960 if (input2 != NULL) 961 blake2s_update(&state, input2, input2len); 962 blake2s_final(&state, out); 963 } 964 965 static void 966 wg_algo_mac_cookie(uint8_t out[], const size_t outsize, 967 const uint8_t input1[], const size_t input1len) 968 { 969 struct blake2s state; 970 /* [W] 5.4: LABEL-COOKIE */ 971 const char *label = "cookie--"; 972 973 blake2s_init(&state, outsize, NULL, 0); 974 blake2s_update(&state, label, strlen(label)); 975 blake2s_update(&state, input1, input1len); 976 blake2s_final(&state, out); 977 } 978 979 static void 980 wg_algo_generate_keypair(uint8_t pubkey[WG_EPHEMERAL_KEY_LEN], 981 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]) 982 { 983 984 CTASSERT(WG_EPHEMERAL_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 985 986 cprng_strong(kern_cprng, privkey, WG_EPHEMERAL_KEY_LEN, 0); 987 crypto_scalarmult_base(pubkey, privkey); 988 } 989 990 static void 991 wg_algo_dh(uint8_t out[WG_DH_OUTPUT_LEN], 992 const uint8_t privkey[WG_STATIC_KEY_LEN], 993 const uint8_t pubkey[WG_STATIC_KEY_LEN]) 994 { 995 996 CTASSERT(WG_STATIC_KEY_LEN == crypto_scalarmult_curve25519_BYTES); 997 998 int ret __diagused = crypto_scalarmult(out, privkey, pubkey); 999 KASSERT(ret == 0); 1000 } 1001 1002 static void 1003 wg_algo_hmac(uint8_t out[], const size_t outlen, 1004 const uint8_t key[], const size_t keylen, 1005 const uint8_t in[], const size_t inlen) 1006 { 1007 #define IPAD 0x36 1008 #define OPAD 0x5c 1009 uint8_t hmackey[HMAC_BLOCK_LEN] = {0}; 1010 uint8_t ipad[HMAC_BLOCK_LEN]; 1011 uint8_t opad[HMAC_BLOCK_LEN]; 1012 int i; 1013 struct blake2s state; 1014 1015 KASSERT(outlen == WG_HASH_LEN); 1016 KASSERT(keylen <= HMAC_BLOCK_LEN); 1017 1018 memcpy(hmackey, key, keylen); 1019 1020 for (i = 0; i < sizeof(hmackey); i++) { 1021 ipad[i] = hmackey[i] ^ IPAD; 1022 opad[i] = hmackey[i] ^ OPAD; 1023 } 1024 1025 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1026 blake2s_update(&state, ipad, sizeof(ipad)); 1027 blake2s_update(&state, in, inlen); 1028 blake2s_final(&state, out); 1029 1030 blake2s_init(&state, WG_HASH_LEN, NULL, 0); 1031 blake2s_update(&state, opad, sizeof(opad)); 1032 blake2s_update(&state, out, WG_HASH_LEN); 1033 blake2s_final(&state, out); 1034 #undef IPAD 1035 #undef OPAD 1036 } 1037 1038 static void 1039 wg_algo_kdf(uint8_t out1[WG_KDF_OUTPUT_LEN], uint8_t out2[WG_KDF_OUTPUT_LEN], 1040 uint8_t out3[WG_KDF_OUTPUT_LEN], const uint8_t ckey[WG_CHAINING_KEY_LEN], 1041 const uint8_t input[], const size_t inputlen) 1042 { 1043 uint8_t tmp1[WG_KDF_OUTPUT_LEN], tmp2[WG_KDF_OUTPUT_LEN + 1]; 1044 uint8_t one[1]; 1045 1046 /* 1047 * [N] 4.3: "an input_key_material byte sequence with length 1048 * either zero bytes, 32 bytes, or DHLEN bytes." 1049 */ 1050 KASSERT(inputlen == 0 || inputlen == 32 || inputlen == NOISE_DHLEN); 1051 1052 WG_DUMP_HASH("ckey", ckey); 1053 if (input != NULL) 1054 WG_DUMP_HASH("input", input); 1055 wg_algo_hmac(tmp1, sizeof(tmp1), ckey, WG_CHAINING_KEY_LEN, 1056 input, inputlen); 1057 WG_DUMP_HASH("tmp1", tmp1); 1058 one[0] = 1; 1059 wg_algo_hmac(out1, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1060 one, sizeof(one)); 1061 WG_DUMP_HASH("out1", out1); 1062 if (out2 == NULL) 1063 return; 1064 memcpy(tmp2, out1, WG_KDF_OUTPUT_LEN); 1065 tmp2[WG_KDF_OUTPUT_LEN] = 2; 1066 wg_algo_hmac(out2, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1067 tmp2, sizeof(tmp2)); 1068 WG_DUMP_HASH("out2", out2); 1069 if (out3 == NULL) 1070 return; 1071 memcpy(tmp2, out2, WG_KDF_OUTPUT_LEN); 1072 tmp2[WG_KDF_OUTPUT_LEN] = 3; 1073 wg_algo_hmac(out3, WG_KDF_OUTPUT_LEN, tmp1, sizeof(tmp1), 1074 tmp2, sizeof(tmp2)); 1075 WG_DUMP_HASH("out3", out3); 1076 } 1077 1078 static void 1079 wg_algo_dh_kdf(uint8_t ckey[WG_CHAINING_KEY_LEN], 1080 uint8_t cipher_key[WG_CIPHER_KEY_LEN], 1081 const uint8_t local_key[WG_STATIC_KEY_LEN], 1082 const uint8_t remote_key[WG_STATIC_KEY_LEN]) 1083 { 1084 uint8_t dhout[WG_DH_OUTPUT_LEN]; 1085 1086 wg_algo_dh(dhout, local_key, remote_key); 1087 wg_algo_kdf(ckey, cipher_key, NULL, ckey, dhout, sizeof(dhout)); 1088 1089 WG_DUMP_HASH("dhout", dhout); 1090 WG_DUMP_HASH("ckey", ckey); 1091 if (cipher_key != NULL) 1092 WG_DUMP_HASH("cipher_key", cipher_key); 1093 } 1094 1095 static void 1096 wg_algo_aead_enc(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1097 const uint64_t counter, const uint8_t plain[], const size_t plainsize, 1098 const uint8_t auth[], size_t authlen) 1099 { 1100 uint8_t nonce[(32 + 64) / 8] = {0}; 1101 long long unsigned int outsize; 1102 int error __diagused; 1103 1104 le64enc(&nonce[4], counter); 1105 1106 error = crypto_aead_chacha20poly1305_ietf_encrypt(out, &outsize, plain, 1107 plainsize, auth, authlen, NULL, nonce, key); 1108 KASSERT(error == 0); 1109 KASSERT(outsize == expected_outsize); 1110 } 1111 1112 static int 1113 wg_algo_aead_dec(uint8_t out[], size_t expected_outsize, const uint8_t key[], 1114 const uint64_t counter, const uint8_t encrypted[], 1115 const size_t encryptedsize, const uint8_t auth[], size_t authlen) 1116 { 1117 uint8_t nonce[(32 + 64) / 8] = {0}; 1118 long long unsigned int outsize; 1119 int error; 1120 1121 le64enc(&nonce[4], counter); 1122 1123 error = crypto_aead_chacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1124 encrypted, encryptedsize, auth, authlen, nonce, key); 1125 if (error == 0) 1126 KASSERT(outsize == expected_outsize); 1127 return error; 1128 } 1129 1130 static void 1131 wg_algo_xaead_enc(uint8_t out[], const size_t expected_outsize, 1132 const uint8_t key[], const uint8_t plain[], const size_t plainsize, 1133 const uint8_t auth[], size_t authlen, 1134 const uint8_t nonce[WG_SALT_LEN]) 1135 { 1136 long long unsigned int outsize; 1137 int error __diagused; 1138 1139 CTASSERT(WG_SALT_LEN == crypto_aead_xchacha20poly1305_ietf_NPUBBYTES); 1140 error = crypto_aead_xchacha20poly1305_ietf_encrypt(out, &outsize, 1141 plain, plainsize, auth, authlen, NULL, nonce, key); 1142 KASSERT(error == 0); 1143 KASSERT(outsize == expected_outsize); 1144 } 1145 1146 static int 1147 wg_algo_xaead_dec(uint8_t out[], const size_t expected_outsize, 1148 const uint8_t key[], const uint8_t encrypted[], const size_t encryptedsize, 1149 const uint8_t auth[], size_t authlen, 1150 const uint8_t nonce[WG_SALT_LEN]) 1151 { 1152 long long unsigned int outsize; 1153 int error; 1154 1155 error = crypto_aead_xchacha20poly1305_ietf_decrypt(out, &outsize, NULL, 1156 encrypted, encryptedsize, auth, authlen, nonce, key); 1157 if (error == 0) 1158 KASSERT(outsize == expected_outsize); 1159 return error; 1160 } 1161 1162 static void 1163 wg_algo_tai64n(wg_timestamp_t timestamp) 1164 { 1165 struct timespec ts; 1166 1167 /* FIXME strict TAI64N (https://cr.yp.to/libtai/tai64.html) */ 1168 getnanotime(&ts); 1169 /* TAI64 label in external TAI64 format */ 1170 be32enc(timestamp, 0x40000000UL + (ts.tv_sec >> 32)); 1171 /* second beginning from 1970 TAI */ 1172 be32enc(timestamp + 4, ts.tv_sec & 0xffffffffU); 1173 /* nanosecond in big-endian format */ 1174 be32enc(timestamp + 8, ts.tv_nsec); 1175 } 1176 1177 /* 1178 * wg_get_stable_session(wgp, psref) 1179 * 1180 * Get a passive reference to the current stable session, or 1181 * return NULL if there is no current stable session. 1182 * 1183 * The pointer is always there but the session is not necessarily 1184 * ESTABLISHED; if it is not ESTABLISHED, return NULL. However, 1185 * the session may transition from ESTABLISHED to DESTROYING while 1186 * holding the passive reference. 1187 */ 1188 static struct wg_session * 1189 wg_get_stable_session(struct wg_peer *wgp, struct psref *psref) 1190 { 1191 int s; 1192 struct wg_session *wgs; 1193 1194 s = pserialize_read_enter(); 1195 wgs = atomic_load_consume(&wgp->wgp_session_stable); 1196 if (__predict_false(wgs->wgs_state != WGS_STATE_ESTABLISHED)) 1197 wgs = NULL; 1198 else 1199 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 1200 pserialize_read_exit(s); 1201 1202 return wgs; 1203 } 1204 1205 static void 1206 wg_put_session(struct wg_session *wgs, struct psref *psref) 1207 { 1208 1209 psref_release(psref, &wgs->wgs_psref, wg_psref_class); 1210 } 1211 1212 static void 1213 wg_destroy_session(struct wg_softc *wg, struct wg_session *wgs) 1214 { 1215 struct wg_peer *wgp = wgs->wgs_peer; 1216 struct wg_session *wgs0 __diagused; 1217 void *garbage; 1218 1219 KASSERT(mutex_owned(wgp->wgp_lock)); 1220 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1221 1222 /* Remove the session from the table. */ 1223 wgs0 = thmap_del(wg->wg_sessions_byindex, 1224 &wgs->wgs_local_index, sizeof(wgs->wgs_local_index)); 1225 KASSERT(wgs0 == wgs); 1226 garbage = thmap_stage_gc(wg->wg_sessions_byindex); 1227 1228 /* Wait for passive references to drain. */ 1229 pserialize_perform(wgp->wgp_psz); 1230 psref_target_destroy(&wgs->wgs_psref, wg_psref_class); 1231 1232 /* Free memory, zero state, and transition to UNKNOWN. */ 1233 thmap_gc(wg->wg_sessions_byindex, garbage); 1234 wg_clear_states(wgs); 1235 wgs->wgs_state = WGS_STATE_UNKNOWN; 1236 } 1237 1238 /* 1239 * wg_get_session_index(wg, wgs) 1240 * 1241 * Choose a session index for wgs->wgs_local_index, and store it 1242 * in wg's table of sessions by index. 1243 * 1244 * wgs must be the unstable session of its peer, and must be 1245 * transitioning out of the UNKNOWN state. 1246 */ 1247 static void 1248 wg_get_session_index(struct wg_softc *wg, struct wg_session *wgs) 1249 { 1250 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1251 struct wg_session *wgs0; 1252 uint32_t index; 1253 1254 KASSERT(mutex_owned(wgp->wgp_lock)); 1255 KASSERT(wgs == wgp->wgp_session_unstable); 1256 KASSERT(wgs->wgs_state == WGS_STATE_UNKNOWN); 1257 1258 do { 1259 /* Pick a uniform random index. */ 1260 index = cprng_strong32(); 1261 1262 /* Try to take it. */ 1263 wgs->wgs_local_index = index; 1264 wgs0 = thmap_put(wg->wg_sessions_byindex, 1265 &wgs->wgs_local_index, sizeof wgs->wgs_local_index, wgs); 1266 1267 /* If someone else beat us, start over. */ 1268 } while (__predict_false(wgs0 != wgs)); 1269 } 1270 1271 /* 1272 * wg_put_session_index(wg, wgs) 1273 * 1274 * Remove wgs from the table of sessions by index, wait for any 1275 * passive references to drain, and transition the session to the 1276 * UNKNOWN state. 1277 * 1278 * wgs must be the unstable session of its peer, and must not be 1279 * UNKNOWN or ESTABLISHED. 1280 */ 1281 static void 1282 wg_put_session_index(struct wg_softc *wg, struct wg_session *wgs) 1283 { 1284 struct wg_peer *wgp __diagused = wgs->wgs_peer; 1285 1286 KASSERT(mutex_owned(wgp->wgp_lock)); 1287 KASSERT(wgs == wgp->wgp_session_unstable); 1288 KASSERT(wgs->wgs_state != WGS_STATE_UNKNOWN); 1289 KASSERT(wgs->wgs_state != WGS_STATE_ESTABLISHED); 1290 1291 wg_destroy_session(wg, wgs); 1292 psref_target_init(&wgs->wgs_psref, wg_psref_class); 1293 } 1294 1295 /* 1296 * Handshake patterns 1297 * 1298 * [W] 5: "These messages use the "IK" pattern from Noise" 1299 * [N] 7.5. Interactive handshake patterns (fundamental) 1300 * "The first character refers to the initiator’s static key:" 1301 * "I = Static key for initiator Immediately transmitted to responder, 1302 * despite reduced or absent identity hiding" 1303 * "The second character refers to the responder’s static key:" 1304 * "K = Static key for responder Known to initiator" 1305 * "IK: 1306 * <- s 1307 * ... 1308 * -> e, es, s, ss 1309 * <- e, ee, se" 1310 * [N] 9.4. Pattern modifiers 1311 * "IKpsk2: 1312 * <- s 1313 * ... 1314 * -> e, es, s, ss 1315 * <- e, ee, se, psk" 1316 */ 1317 static void 1318 wg_fill_msg_init(struct wg_softc *wg, struct wg_peer *wgp, 1319 struct wg_session *wgs, struct wg_msg_init *wgmi) 1320 { 1321 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1322 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1323 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1324 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1325 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1326 1327 KASSERT(mutex_owned(wgp->wgp_lock)); 1328 KASSERT(wgs == wgp->wgp_session_unstable); 1329 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1330 1331 wgmi->wgmi_type = htole32(WG_MSG_TYPE_INIT); 1332 wgmi->wgmi_sender = wgs->wgs_local_index; 1333 1334 /* [W] 5.4.2: First Message: Initiator to Responder */ 1335 1336 /* Ci := HASH(CONSTRUCTION) */ 1337 /* Hi := HASH(Ci || IDENTIFIER) */ 1338 wg_init_key_and_hash(ckey, hash); 1339 /* Hi := HASH(Hi || Sr^pub) */ 1340 wg_algo_hash(hash, wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey)); 1341 1342 WG_DUMP_HASH("hash", hash); 1343 1344 /* [N] 2.2: "e" */ 1345 /* Ei^priv, Ei^pub := DH-GENERATE() */ 1346 wg_algo_generate_keypair(pubkey, privkey); 1347 /* Ci := KDF1(Ci, Ei^pub) */ 1348 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1349 /* msg.ephemeral := Ei^pub */ 1350 memcpy(wgmi->wgmi_ephemeral, pubkey, sizeof(wgmi->wgmi_ephemeral)); 1351 /* Hi := HASH(Hi || msg.ephemeral) */ 1352 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1353 1354 WG_DUMP_HASH("ckey", ckey); 1355 WG_DUMP_HASH("hash", hash); 1356 1357 /* [N] 2.2: "es" */ 1358 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1359 wg_algo_dh_kdf(ckey, cipher_key, privkey, wgp->wgp_pubkey); 1360 1361 /* [N] 2.2: "s" */ 1362 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1363 wg_algo_aead_enc(wgmi->wgmi_static, sizeof(wgmi->wgmi_static), 1364 cipher_key, 0, wg->wg_pubkey, sizeof(wg->wg_pubkey), 1365 hash, sizeof(hash)); 1366 /* Hi := HASH(Hi || msg.static) */ 1367 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1368 1369 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1370 1371 /* [N] 2.2: "ss" */ 1372 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1373 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1374 1375 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1376 wg_timestamp_t timestamp; 1377 wg_algo_tai64n(timestamp); 1378 wg_algo_aead_enc(wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1379 cipher_key, 0, timestamp, sizeof(timestamp), hash, sizeof(hash)); 1380 /* Hi := HASH(Hi || msg.timestamp) */ 1381 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1382 1383 /* [W] 5.4.4 Cookie MACs */ 1384 wg_algo_mac_mac1(wgmi->wgmi_mac1, sizeof(wgmi->wgmi_mac1), 1385 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1386 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1387 /* Need mac1 to decrypt a cookie from a cookie message */ 1388 memcpy(wgp->wgp_last_sent_mac1, wgmi->wgmi_mac1, 1389 sizeof(wgp->wgp_last_sent_mac1)); 1390 wgp->wgp_last_sent_mac1_valid = true; 1391 1392 if (wgp->wgp_latest_cookie_time == 0 || 1393 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1394 memset(wgmi->wgmi_mac2, 0, sizeof(wgmi->wgmi_mac2)); 1395 else { 1396 wg_algo_mac(wgmi->wgmi_mac2, sizeof(wgmi->wgmi_mac2), 1397 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1398 (const uint8_t *)wgmi, 1399 offsetof(struct wg_msg_init, wgmi_mac2), 1400 NULL, 0); 1401 } 1402 1403 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1404 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1405 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1406 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1407 WG_DLOG("%s: sender=%x\n", __func__, wgs->wgs_local_index); 1408 } 1409 1410 static void 1411 wg_handle_msg_init(struct wg_softc *wg, const struct wg_msg_init *wgmi, 1412 const struct sockaddr *src) 1413 { 1414 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.2: Ci */ 1415 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.2: Hi */ 1416 uint8_t cipher_key[WG_CIPHER_KEY_LEN]; 1417 uint8_t peer_pubkey[WG_STATIC_KEY_LEN]; 1418 struct wg_peer *wgp; 1419 struct wg_session *wgs; 1420 int error, ret; 1421 struct psref psref_peer; 1422 uint8_t mac1[WG_MAC_LEN]; 1423 1424 WG_TRACE("init msg received"); 1425 1426 wg_algo_mac_mac1(mac1, sizeof(mac1), 1427 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1428 (const uint8_t *)wgmi, offsetof(struct wg_msg_init, wgmi_mac1)); 1429 1430 /* 1431 * [W] 5.3: Denial of Service Mitigation & Cookies 1432 * "the responder, ..., must always reject messages with an invalid 1433 * msg.mac1" 1434 */ 1435 if (!consttime_memequal(mac1, wgmi->wgmi_mac1, sizeof(mac1))) { 1436 WG_DLOG("mac1 is invalid\n"); 1437 return; 1438 } 1439 1440 /* 1441 * [W] 5.4.2: First Message: Initiator to Responder 1442 * "When the responder receives this message, it does the same 1443 * operations so that its final state variables are identical, 1444 * replacing the operands of the DH function to produce equivalent 1445 * values." 1446 * Note that the following comments of operations are just copies of 1447 * the initiator's ones. 1448 */ 1449 1450 /* Ci := HASH(CONSTRUCTION) */ 1451 /* Hi := HASH(Ci || IDENTIFIER) */ 1452 wg_init_key_and_hash(ckey, hash); 1453 /* Hi := HASH(Hi || Sr^pub) */ 1454 wg_algo_hash(hash, wg->wg_pubkey, sizeof(wg->wg_pubkey)); 1455 1456 /* [N] 2.2: "e" */ 1457 /* Ci := KDF1(Ci, Ei^pub) */ 1458 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmi->wgmi_ephemeral, 1459 sizeof(wgmi->wgmi_ephemeral)); 1460 /* Hi := HASH(Hi || msg.ephemeral) */ 1461 wg_algo_hash(hash, wgmi->wgmi_ephemeral, sizeof(wgmi->wgmi_ephemeral)); 1462 1463 WG_DUMP_HASH("ckey", ckey); 1464 1465 /* [N] 2.2: "es" */ 1466 /* Ci, k := KDF2(Ci, DH(Ei^priv, Sr^pub)) */ 1467 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgmi->wgmi_ephemeral); 1468 1469 WG_DUMP_HASH48("wgmi_static", wgmi->wgmi_static); 1470 1471 /* [N] 2.2: "s" */ 1472 /* msg.static := AEAD(k, 0, Si^pub, Hi) */ 1473 error = wg_algo_aead_dec(peer_pubkey, WG_STATIC_KEY_LEN, cipher_key, 0, 1474 wgmi->wgmi_static, sizeof(wgmi->wgmi_static), hash, sizeof(hash)); 1475 if (error != 0) { 1476 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 1477 "wg_algo_aead_dec for secret key failed\n"); 1478 return; 1479 } 1480 /* Hi := HASH(Hi || msg.static) */ 1481 wg_algo_hash(hash, wgmi->wgmi_static, sizeof(wgmi->wgmi_static)); 1482 1483 wgp = wg_lookup_peer_by_pubkey(wg, peer_pubkey, &psref_peer); 1484 if (wgp == NULL) { 1485 WG_DLOG("peer not found\n"); 1486 return; 1487 } 1488 1489 /* 1490 * Lock the peer to serialize access to cookie state. 1491 * 1492 * XXX Can we safely avoid holding the lock across DH? Take it 1493 * just to verify mac2 and then unlock/DH/lock? 1494 */ 1495 mutex_enter(wgp->wgp_lock); 1496 1497 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_INIT))) { 1498 WG_TRACE("under load"); 1499 /* 1500 * [W] 5.3: Denial of Service Mitigation & Cookies 1501 * "the responder, ..., and when under load may reject messages 1502 * with an invalid msg.mac2. If the responder receives a 1503 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1504 * and is under load, it may respond with a cookie reply 1505 * message" 1506 */ 1507 uint8_t zero[WG_MAC_LEN] = {0}; 1508 if (consttime_memequal(wgmi->wgmi_mac2, zero, sizeof(zero))) { 1509 WG_TRACE("sending a cookie message: no cookie included"); 1510 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1511 wgmi->wgmi_mac1, src); 1512 goto out; 1513 } 1514 if (!wgp->wgp_last_sent_cookie_valid) { 1515 WG_TRACE("sending a cookie message: no cookie sent ever"); 1516 (void)wg_send_cookie_msg(wg, wgp, wgmi->wgmi_sender, 1517 wgmi->wgmi_mac1, src); 1518 goto out; 1519 } 1520 uint8_t mac2[WG_MAC_LEN]; 1521 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1522 WG_COOKIE_LEN, (const uint8_t *)wgmi, 1523 offsetof(struct wg_msg_init, wgmi_mac2), NULL, 0); 1524 if (!consttime_memequal(mac2, wgmi->wgmi_mac2, sizeof(mac2))) { 1525 WG_DLOG("mac2 is invalid\n"); 1526 goto out; 1527 } 1528 WG_TRACE("under load, but continue to sending"); 1529 } 1530 1531 /* [N] 2.2: "ss" */ 1532 /* Ci, k := KDF2(Ci, DH(Si^priv, Sr^pub)) */ 1533 wg_algo_dh_kdf(ckey, cipher_key, wg->wg_privkey, wgp->wgp_pubkey); 1534 1535 /* msg.timestamp := AEAD(k, TIMESTAMP(), Hi) */ 1536 wg_timestamp_t timestamp; 1537 error = wg_algo_aead_dec(timestamp, sizeof(timestamp), cipher_key, 0, 1538 wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp), 1539 hash, sizeof(hash)); 1540 if (error != 0) { 1541 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1542 "wg_algo_aead_dec for timestamp failed\n"); 1543 goto out; 1544 } 1545 /* Hi := HASH(Hi || msg.timestamp) */ 1546 wg_algo_hash(hash, wgmi->wgmi_timestamp, sizeof(wgmi->wgmi_timestamp)); 1547 1548 /* 1549 * [W] 5.1 "The responder keeps track of the greatest timestamp 1550 * received per peer and discards packets containing 1551 * timestamps less than or equal to it." 1552 */ 1553 ret = memcmp(timestamp, wgp->wgp_timestamp_latest_init, 1554 sizeof(timestamp)); 1555 if (ret <= 0) { 1556 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1557 "invalid init msg: timestamp is old\n"); 1558 goto out; 1559 } 1560 memcpy(wgp->wgp_timestamp_latest_init, timestamp, sizeof(timestamp)); 1561 1562 /* 1563 * Message is good -- we're committing to handle it now, unless 1564 * we were already initiating a session. 1565 */ 1566 wgs = wgp->wgp_session_unstable; 1567 switch (wgs->wgs_state) { 1568 case WGS_STATE_UNKNOWN: /* new session initiated by peer */ 1569 wg_get_session_index(wg, wgs); 1570 break; 1571 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, drop */ 1572 WG_TRACE("Session already initializing, ignoring the message"); 1573 goto out; 1574 case WGS_STATE_INIT_PASSIVE: /* peer is retrying, start over */ 1575 WG_TRACE("Session already initializing, destroying old states"); 1576 wg_clear_states(wgs); 1577 /* keep session index */ 1578 break; 1579 case WGS_STATE_ESTABLISHED: /* can't happen */ 1580 panic("unstable session can't be established"); 1581 break; 1582 case WGS_STATE_DESTROYING: /* rekey initiated by peer */ 1583 WG_TRACE("Session destroying, but force to clear"); 1584 callout_stop(&wgp->wgp_session_dtor_timer); 1585 wg_clear_states(wgs); 1586 /* keep session index */ 1587 break; 1588 default: 1589 panic("invalid session state: %d", wgs->wgs_state); 1590 } 1591 wgs->wgs_state = WGS_STATE_INIT_PASSIVE; 1592 1593 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1594 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1595 memcpy(wgs->wgs_ephemeral_key_peer, wgmi->wgmi_ephemeral, 1596 sizeof(wgmi->wgmi_ephemeral)); 1597 1598 wg_update_endpoint_if_necessary(wgp, src); 1599 1600 (void)wg_send_handshake_msg_resp(wg, wgp, wgs, wgmi); 1601 1602 wg_calculate_keys(wgs, false); 1603 wg_clear_states(wgs); 1604 1605 out: 1606 mutex_exit(wgp->wgp_lock); 1607 wg_put_peer(wgp, &psref_peer); 1608 } 1609 1610 static struct socket * 1611 wg_get_so_by_af(struct wg_softc *wg, const int af) 1612 { 1613 1614 return (af == AF_INET) ? wg->wg_so4 : wg->wg_so6; 1615 } 1616 1617 static struct socket * 1618 wg_get_so_by_peer(struct wg_peer *wgp, struct wg_sockaddr *wgsa) 1619 { 1620 1621 return wg_get_so_by_af(wgp->wgp_sc, wgsa_family(wgsa)); 1622 } 1623 1624 static struct wg_sockaddr * 1625 wg_get_endpoint_sa(struct wg_peer *wgp, struct psref *psref) 1626 { 1627 struct wg_sockaddr *wgsa; 1628 int s; 1629 1630 s = pserialize_read_enter(); 1631 wgsa = atomic_load_consume(&wgp->wgp_endpoint); 1632 psref_acquire(psref, &wgsa->wgsa_psref, wg_psref_class); 1633 pserialize_read_exit(s); 1634 1635 return wgsa; 1636 } 1637 1638 static void 1639 wg_put_sa(struct wg_peer *wgp, struct wg_sockaddr *wgsa, struct psref *psref) 1640 { 1641 1642 psref_release(psref, &wgsa->wgsa_psref, wg_psref_class); 1643 } 1644 1645 static int 1646 wg_send_so(struct wg_peer *wgp, struct mbuf *m) 1647 { 1648 int error; 1649 struct socket *so; 1650 struct psref psref; 1651 struct wg_sockaddr *wgsa; 1652 1653 wgsa = wg_get_endpoint_sa(wgp, &psref); 1654 so = wg_get_so_by_peer(wgp, wgsa); 1655 error = sosend(so, wgsatosa(wgsa), NULL, m, NULL, 0, curlwp); 1656 wg_put_sa(wgp, wgsa, &psref); 1657 1658 return error; 1659 } 1660 1661 static int 1662 wg_send_handshake_msg_init(struct wg_softc *wg, struct wg_peer *wgp) 1663 { 1664 int error; 1665 struct mbuf *m; 1666 struct wg_msg_init *wgmi; 1667 struct wg_session *wgs; 1668 1669 KASSERT(mutex_owned(wgp->wgp_lock)); 1670 1671 wgs = wgp->wgp_session_unstable; 1672 /* XXX pull dispatch out into wg_task_send_init_message */ 1673 switch (wgs->wgs_state) { 1674 case WGS_STATE_UNKNOWN: /* new session initiated by us */ 1675 wg_get_session_index(wg, wgs); 1676 break; 1677 case WGS_STATE_INIT_ACTIVE: /* we're already initiating, stop */ 1678 WG_TRACE("Session already initializing, skip starting new one"); 1679 return EBUSY; 1680 case WGS_STATE_INIT_PASSIVE: /* peer was trying -- XXX what now? */ 1681 WG_TRACE("Session already initializing, destroying old states"); 1682 wg_clear_states(wgs); 1683 /* keep session index */ 1684 break; 1685 case WGS_STATE_ESTABLISHED: /* can't happen */ 1686 panic("unstable session can't be established"); 1687 break; 1688 case WGS_STATE_DESTROYING: /* rekey initiated by us too early */ 1689 WG_TRACE("Session destroying"); 1690 /* XXX should wait? */ 1691 return EBUSY; 1692 } 1693 wgs->wgs_state = WGS_STATE_INIT_ACTIVE; 1694 1695 m = m_gethdr(M_WAIT, MT_DATA); 1696 m->m_pkthdr.len = m->m_len = sizeof(*wgmi); 1697 wgmi = mtod(m, struct wg_msg_init *); 1698 wg_fill_msg_init(wg, wgp, wgs, wgmi); 1699 1700 error = wg->wg_ops->send_hs_msg(wgp, m); 1701 if (error == 0) { 1702 WG_TRACE("init msg sent"); 1703 1704 if (wgp->wgp_handshake_start_time == 0) 1705 wgp->wgp_handshake_start_time = time_uptime; 1706 callout_schedule(&wgp->wgp_handshake_timeout_timer, 1707 MIN(wg_rekey_timeout, INT_MAX/hz) * hz); 1708 } else { 1709 wg_put_session_index(wg, wgs); 1710 /* Initiation failed; toss packet waiting for it if any. */ 1711 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 1712 m_freem(m); 1713 } 1714 1715 return error; 1716 } 1717 1718 static void 1719 wg_fill_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 1720 struct wg_session *wgs, struct wg_msg_resp *wgmr, 1721 const struct wg_msg_init *wgmi) 1722 { 1723 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1724 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Hr */ 1725 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1726 uint8_t pubkey[WG_EPHEMERAL_KEY_LEN]; 1727 uint8_t privkey[WG_EPHEMERAL_KEY_LEN]; 1728 1729 KASSERT(mutex_owned(wgp->wgp_lock)); 1730 KASSERT(wgs == wgp->wgp_session_unstable); 1731 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 1732 1733 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1734 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1735 1736 wgmr->wgmr_type = htole32(WG_MSG_TYPE_RESP); 1737 wgmr->wgmr_sender = wgs->wgs_local_index; 1738 wgmr->wgmr_receiver = wgmi->wgmi_sender; 1739 1740 /* [W] 5.4.3 Second Message: Responder to Initiator */ 1741 1742 /* [N] 2.2: "e" */ 1743 /* Er^priv, Er^pub := DH-GENERATE() */ 1744 wg_algo_generate_keypair(pubkey, privkey); 1745 /* Cr := KDF1(Cr, Er^pub) */ 1746 wg_algo_kdf(ckey, NULL, NULL, ckey, pubkey, sizeof(pubkey)); 1747 /* msg.ephemeral := Er^pub */ 1748 memcpy(wgmr->wgmr_ephemeral, pubkey, sizeof(wgmr->wgmr_ephemeral)); 1749 /* Hr := HASH(Hr || msg.ephemeral) */ 1750 wg_algo_hash(hash, pubkey, sizeof(pubkey)); 1751 1752 WG_DUMP_HASH("ckey", ckey); 1753 WG_DUMP_HASH("hash", hash); 1754 1755 /* [N] 2.2: "ee" */ 1756 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1757 wg_algo_dh_kdf(ckey, NULL, privkey, wgs->wgs_ephemeral_key_peer); 1758 1759 /* [N] 2.2: "se" */ 1760 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1761 wg_algo_dh_kdf(ckey, NULL, privkey, wgp->wgp_pubkey); 1762 1763 /* [N] 9.2: "psk" */ 1764 { 1765 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1766 /* Cr, r, k := KDF3(Cr, Q) */ 1767 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1768 sizeof(wgp->wgp_psk)); 1769 /* Hr := HASH(Hr || r) */ 1770 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1771 } 1772 1773 /* msg.empty := AEAD(k, 0, e, Hr) */ 1774 wg_algo_aead_enc(wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty), 1775 cipher_key, 0, NULL, 0, hash, sizeof(hash)); 1776 /* Hr := HASH(Hr || msg.empty) */ 1777 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1778 1779 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1780 1781 /* [W] 5.4.4: Cookie MACs */ 1782 /* msg.mac1 := MAC(HASH(LABEL-MAC1 || Sm'^pub), msg_a) */ 1783 wg_algo_mac_mac1(wgmr->wgmr_mac1, sizeof(wgmi->wgmi_mac1), 1784 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey), 1785 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1786 /* Need mac1 to decrypt a cookie from a cookie message */ 1787 memcpy(wgp->wgp_last_sent_mac1, wgmr->wgmr_mac1, 1788 sizeof(wgp->wgp_last_sent_mac1)); 1789 wgp->wgp_last_sent_mac1_valid = true; 1790 1791 if (wgp->wgp_latest_cookie_time == 0 || 1792 (time_uptime - wgp->wgp_latest_cookie_time) >= WG_COOKIE_TIME) 1793 /* msg.mac2 := 0^16 */ 1794 memset(wgmr->wgmr_mac2, 0, sizeof(wgmr->wgmr_mac2)); 1795 else { 1796 /* msg.mac2 := MAC(Lm, msg_b) */ 1797 wg_algo_mac(wgmr->wgmr_mac2, sizeof(wgmi->wgmi_mac2), 1798 wgp->wgp_latest_cookie, WG_COOKIE_LEN, 1799 (const uint8_t *)wgmr, 1800 offsetof(struct wg_msg_resp, wgmr_mac2), 1801 NULL, 0); 1802 } 1803 1804 memcpy(wgs->wgs_handshake_hash, hash, sizeof(hash)); 1805 memcpy(wgs->wgs_chaining_key, ckey, sizeof(ckey)); 1806 memcpy(wgs->wgs_ephemeral_key_pub, pubkey, sizeof(pubkey)); 1807 memcpy(wgs->wgs_ephemeral_key_priv, privkey, sizeof(privkey)); 1808 wgs->wgs_remote_index = wgmi->wgmi_sender; 1809 WG_DLOG("sender=%x\n", wgs->wgs_local_index); 1810 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1811 } 1812 1813 static void 1814 wg_swap_sessions(struct wg_peer *wgp) 1815 { 1816 struct wg_session *wgs, *wgs_prev; 1817 1818 KASSERT(mutex_owned(wgp->wgp_lock)); 1819 1820 wgs = wgp->wgp_session_unstable; 1821 KASSERT(wgs->wgs_state == WGS_STATE_ESTABLISHED); 1822 1823 wgs_prev = wgp->wgp_session_stable; 1824 KASSERT(wgs_prev->wgs_state == WGS_STATE_ESTABLISHED || 1825 wgs_prev->wgs_state == WGS_STATE_UNKNOWN); 1826 atomic_store_release(&wgp->wgp_session_stable, wgs); 1827 wgp->wgp_session_unstable = wgs_prev; 1828 } 1829 1830 static void 1831 wg_handle_msg_resp(struct wg_softc *wg, const struct wg_msg_resp *wgmr, 1832 const struct sockaddr *src) 1833 { 1834 uint8_t ckey[WG_CHAINING_KEY_LEN]; /* [W] 5.4.3: Cr */ 1835 uint8_t hash[WG_HASH_LEN]; /* [W] 5.4.3: Kr */ 1836 uint8_t cipher_key[WG_KDF_OUTPUT_LEN]; 1837 struct wg_peer *wgp; 1838 struct wg_session *wgs; 1839 struct psref psref; 1840 int error; 1841 uint8_t mac1[WG_MAC_LEN]; 1842 struct wg_session *wgs_prev; 1843 struct mbuf *m; 1844 1845 wg_algo_mac_mac1(mac1, sizeof(mac1), 1846 wg->wg_pubkey, sizeof(wg->wg_pubkey), 1847 (const uint8_t *)wgmr, offsetof(struct wg_msg_resp, wgmr_mac1)); 1848 1849 /* 1850 * [W] 5.3: Denial of Service Mitigation & Cookies 1851 * "the responder, ..., must always reject messages with an invalid 1852 * msg.mac1" 1853 */ 1854 if (!consttime_memequal(mac1, wgmr->wgmr_mac1, sizeof(mac1))) { 1855 WG_DLOG("mac1 is invalid\n"); 1856 return; 1857 } 1858 1859 WG_TRACE("resp msg received"); 1860 wgs = wg_lookup_session_by_index(wg, wgmr->wgmr_receiver, &psref); 1861 if (wgs == NULL) { 1862 WG_TRACE("No session found"); 1863 return; 1864 } 1865 1866 wgp = wgs->wgs_peer; 1867 1868 mutex_enter(wgp->wgp_lock); 1869 1870 /* If we weren't waiting for a handshake response, drop it. */ 1871 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) { 1872 WG_TRACE("peer sent spurious handshake response, ignoring"); 1873 goto out; 1874 } 1875 1876 if (__predict_false(wg_is_underload(wg, wgp, WG_MSG_TYPE_RESP))) { 1877 WG_TRACE("under load"); 1878 /* 1879 * [W] 5.3: Denial of Service Mitigation & Cookies 1880 * "the responder, ..., and when under load may reject messages 1881 * with an invalid msg.mac2. If the responder receives a 1882 * message with a valid msg.mac1 yet with an invalid msg.mac2, 1883 * and is under load, it may respond with a cookie reply 1884 * message" 1885 */ 1886 uint8_t zero[WG_MAC_LEN] = {0}; 1887 if (consttime_memequal(wgmr->wgmr_mac2, zero, sizeof(zero))) { 1888 WG_TRACE("sending a cookie message: no cookie included"); 1889 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1890 wgmr->wgmr_mac1, src); 1891 goto out; 1892 } 1893 if (!wgp->wgp_last_sent_cookie_valid) { 1894 WG_TRACE("sending a cookie message: no cookie sent ever"); 1895 (void)wg_send_cookie_msg(wg, wgp, wgmr->wgmr_sender, 1896 wgmr->wgmr_mac1, src); 1897 goto out; 1898 } 1899 uint8_t mac2[WG_MAC_LEN]; 1900 wg_algo_mac(mac2, sizeof(mac2), wgp->wgp_last_sent_cookie, 1901 WG_COOKIE_LEN, (const uint8_t *)wgmr, 1902 offsetof(struct wg_msg_resp, wgmr_mac2), NULL, 0); 1903 if (!consttime_memequal(mac2, wgmr->wgmr_mac2, sizeof(mac2))) { 1904 WG_DLOG("mac2 is invalid\n"); 1905 goto out; 1906 } 1907 WG_TRACE("under load, but continue to sending"); 1908 } 1909 1910 memcpy(hash, wgs->wgs_handshake_hash, sizeof(hash)); 1911 memcpy(ckey, wgs->wgs_chaining_key, sizeof(ckey)); 1912 1913 /* 1914 * [W] 5.4.3 Second Message: Responder to Initiator 1915 * "When the initiator receives this message, it does the same 1916 * operations so that its final state variables are identical, 1917 * replacing the operands of the DH function to produce equivalent 1918 * values." 1919 * Note that the following comments of operations are just copies of 1920 * the initiator's ones. 1921 */ 1922 1923 /* [N] 2.2: "e" */ 1924 /* Cr := KDF1(Cr, Er^pub) */ 1925 wg_algo_kdf(ckey, NULL, NULL, ckey, wgmr->wgmr_ephemeral, 1926 sizeof(wgmr->wgmr_ephemeral)); 1927 /* Hr := HASH(Hr || msg.ephemeral) */ 1928 wg_algo_hash(hash, wgmr->wgmr_ephemeral, sizeof(wgmr->wgmr_ephemeral)); 1929 1930 WG_DUMP_HASH("ckey", ckey); 1931 WG_DUMP_HASH("hash", hash); 1932 1933 /* [N] 2.2: "ee" */ 1934 /* Cr := KDF1(Cr, DH(Er^priv, Ei^pub)) */ 1935 wg_algo_dh_kdf(ckey, NULL, wgs->wgs_ephemeral_key_priv, 1936 wgmr->wgmr_ephemeral); 1937 1938 /* [N] 2.2: "se" */ 1939 /* Cr := KDF1(Cr, DH(Er^priv, Si^pub)) */ 1940 wg_algo_dh_kdf(ckey, NULL, wg->wg_privkey, wgmr->wgmr_ephemeral); 1941 1942 /* [N] 9.2: "psk" */ 1943 { 1944 uint8_t kdfout[WG_KDF_OUTPUT_LEN]; 1945 /* Cr, r, k := KDF3(Cr, Q) */ 1946 wg_algo_kdf(ckey, kdfout, cipher_key, ckey, wgp->wgp_psk, 1947 sizeof(wgp->wgp_psk)); 1948 /* Hr := HASH(Hr || r) */ 1949 wg_algo_hash(hash, kdfout, sizeof(kdfout)); 1950 } 1951 1952 { 1953 uint8_t out[sizeof(wgmr->wgmr_empty)]; /* for safety */ 1954 /* msg.empty := AEAD(k, 0, e, Hr) */ 1955 error = wg_algo_aead_dec(out, 0, cipher_key, 0, wgmr->wgmr_empty, 1956 sizeof(wgmr->wgmr_empty), hash, sizeof(hash)); 1957 WG_DUMP_HASH("wgmr_empty", wgmr->wgmr_empty); 1958 if (error != 0) { 1959 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 1960 "wg_algo_aead_dec for empty message failed\n"); 1961 goto out; 1962 } 1963 /* Hr := HASH(Hr || msg.empty) */ 1964 wg_algo_hash(hash, wgmr->wgmr_empty, sizeof(wgmr->wgmr_empty)); 1965 } 1966 1967 memcpy(wgs->wgs_handshake_hash, hash, sizeof(wgs->wgs_handshake_hash)); 1968 memcpy(wgs->wgs_chaining_key, ckey, sizeof(wgs->wgs_chaining_key)); 1969 wgs->wgs_remote_index = wgmr->wgmr_sender; 1970 WG_DLOG("receiver=%x\n", wgs->wgs_remote_index); 1971 1972 KASSERT(wgs->wgs_state == WGS_STATE_INIT_ACTIVE); 1973 wgs->wgs_state = WGS_STATE_ESTABLISHED; 1974 wgs->wgs_time_established = time_uptime; 1975 wgs->wgs_time_last_data_sent = 0; 1976 wgs->wgs_is_initiator = true; 1977 wg_calculate_keys(wgs, true); 1978 wg_clear_states(wgs); 1979 WG_TRACE("WGS_STATE_ESTABLISHED"); 1980 1981 callout_stop(&wgp->wgp_handshake_timeout_timer); 1982 1983 wg_swap_sessions(wgp); 1984 KASSERT(wgs == wgp->wgp_session_stable); 1985 wgs_prev = wgp->wgp_session_unstable; 1986 getnanotime(&wgp->wgp_last_handshake_time); 1987 wgp->wgp_handshake_start_time = 0; 1988 wgp->wgp_last_sent_mac1_valid = false; 1989 wgp->wgp_last_sent_cookie_valid = false; 1990 1991 wg_schedule_rekey_timer(wgp); 1992 1993 wg_update_endpoint_if_necessary(wgp, src); 1994 1995 /* 1996 * If we had a data packet queued up, send it; otherwise send a 1997 * keepalive message -- either way we have to send something 1998 * immediately or else the responder will never answer. 1999 */ 2000 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 2001 kpreempt_disable(); 2002 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 2003 M_SETCTX(m, wgp); 2004 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 2005 WGLOG(LOG_ERR, "pktq full, dropping\n"); 2006 m_freem(m); 2007 } 2008 kpreempt_enable(); 2009 } else { 2010 wg_send_keepalive_msg(wgp, wgs); 2011 } 2012 2013 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 2014 /* Wait for wg_get_stable_session to drain. */ 2015 pserialize_perform(wgp->wgp_psz); 2016 2017 /* Transition ESTABLISHED->DESTROYING. */ 2018 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 2019 2020 /* We can't destroy the old session immediately */ 2021 wg_schedule_session_dtor_timer(wgp); 2022 } else { 2023 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 2024 "state=%d", wgs_prev->wgs_state); 2025 } 2026 2027 out: 2028 mutex_exit(wgp->wgp_lock); 2029 wg_put_session(wgs, &psref); 2030 } 2031 2032 static int 2033 wg_send_handshake_msg_resp(struct wg_softc *wg, struct wg_peer *wgp, 2034 struct wg_session *wgs, const struct wg_msg_init *wgmi) 2035 { 2036 int error; 2037 struct mbuf *m; 2038 struct wg_msg_resp *wgmr; 2039 2040 KASSERT(mutex_owned(wgp->wgp_lock)); 2041 KASSERT(wgs == wgp->wgp_session_unstable); 2042 KASSERT(wgs->wgs_state == WGS_STATE_INIT_PASSIVE); 2043 2044 m = m_gethdr(M_WAIT, MT_DATA); 2045 m->m_pkthdr.len = m->m_len = sizeof(*wgmr); 2046 wgmr = mtod(m, struct wg_msg_resp *); 2047 wg_fill_msg_resp(wg, wgp, wgs, wgmr, wgmi); 2048 2049 error = wg->wg_ops->send_hs_msg(wgp, m); 2050 if (error == 0) 2051 WG_TRACE("resp msg sent"); 2052 return error; 2053 } 2054 2055 static struct wg_peer * 2056 wg_lookup_peer_by_pubkey(struct wg_softc *wg, 2057 const uint8_t pubkey[WG_STATIC_KEY_LEN], struct psref *psref) 2058 { 2059 struct wg_peer *wgp; 2060 2061 int s = pserialize_read_enter(); 2062 wgp = thmap_get(wg->wg_peers_bypubkey, pubkey, WG_STATIC_KEY_LEN); 2063 if (wgp != NULL) 2064 wg_get_peer(wgp, psref); 2065 pserialize_read_exit(s); 2066 2067 return wgp; 2068 } 2069 2070 static void 2071 wg_fill_msg_cookie(struct wg_softc *wg, struct wg_peer *wgp, 2072 struct wg_msg_cookie *wgmc, const uint32_t sender, 2073 const uint8_t mac1[WG_MAC_LEN], const struct sockaddr *src) 2074 { 2075 uint8_t cookie[WG_COOKIE_LEN]; 2076 uint8_t key[WG_HASH_LEN]; 2077 uint8_t addr[sizeof(struct in6_addr)]; 2078 size_t addrlen; 2079 uint16_t uh_sport; /* be */ 2080 2081 KASSERT(mutex_owned(wgp->wgp_lock)); 2082 2083 wgmc->wgmc_type = htole32(WG_MSG_TYPE_COOKIE); 2084 wgmc->wgmc_receiver = sender; 2085 cprng_fast(wgmc->wgmc_salt, sizeof(wgmc->wgmc_salt)); 2086 2087 /* 2088 * [W] 5.4.7: Under Load: Cookie Reply Message 2089 * "The secret variable, Rm, changes every two minutes to a 2090 * random value" 2091 */ 2092 if ((time_uptime - wgp->wgp_last_genrandval_time) > WG_RANDVAL_TIME) { 2093 wgp->wgp_randval = cprng_strong32(); 2094 wgp->wgp_last_genrandval_time = time_uptime; 2095 } 2096 2097 switch (src->sa_family) { 2098 case AF_INET: { 2099 const struct sockaddr_in *sin = satocsin(src); 2100 addrlen = sizeof(sin->sin_addr); 2101 memcpy(addr, &sin->sin_addr, addrlen); 2102 uh_sport = sin->sin_port; 2103 break; 2104 } 2105 #ifdef INET6 2106 case AF_INET6: { 2107 const struct sockaddr_in6 *sin6 = satocsin6(src); 2108 addrlen = sizeof(sin6->sin6_addr); 2109 memcpy(addr, &sin6->sin6_addr, addrlen); 2110 uh_sport = sin6->sin6_port; 2111 break; 2112 } 2113 #endif 2114 default: 2115 panic("invalid af=%d", src->sa_family); 2116 } 2117 2118 wg_algo_mac(cookie, sizeof(cookie), 2119 (const uint8_t *)&wgp->wgp_randval, sizeof(wgp->wgp_randval), 2120 addr, addrlen, (const uint8_t *)&uh_sport, sizeof(uh_sport)); 2121 wg_algo_mac_cookie(key, sizeof(key), wg->wg_pubkey, 2122 sizeof(wg->wg_pubkey)); 2123 wg_algo_xaead_enc(wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), key, 2124 cookie, sizeof(cookie), mac1, WG_MAC_LEN, wgmc->wgmc_salt); 2125 2126 /* Need to store to calculate mac2 */ 2127 memcpy(wgp->wgp_last_sent_cookie, cookie, sizeof(cookie)); 2128 wgp->wgp_last_sent_cookie_valid = true; 2129 } 2130 2131 static int 2132 wg_send_cookie_msg(struct wg_softc *wg, struct wg_peer *wgp, 2133 const uint32_t sender, const uint8_t mac1[WG_MAC_LEN], 2134 const struct sockaddr *src) 2135 { 2136 int error; 2137 struct mbuf *m; 2138 struct wg_msg_cookie *wgmc; 2139 2140 KASSERT(mutex_owned(wgp->wgp_lock)); 2141 2142 m = m_gethdr(M_WAIT, MT_DATA); 2143 m->m_pkthdr.len = m->m_len = sizeof(*wgmc); 2144 wgmc = mtod(m, struct wg_msg_cookie *); 2145 wg_fill_msg_cookie(wg, wgp, wgmc, sender, mac1, src); 2146 2147 error = wg->wg_ops->send_hs_msg(wgp, m); 2148 if (error == 0) 2149 WG_TRACE("cookie msg sent"); 2150 return error; 2151 } 2152 2153 static bool 2154 wg_is_underload(struct wg_softc *wg, struct wg_peer *wgp, int msgtype) 2155 { 2156 #ifdef WG_DEBUG_PARAMS 2157 if (wg_force_underload) 2158 return true; 2159 #endif 2160 2161 /* 2162 * XXX we don't have a means of a load estimation. The purpose of 2163 * the mechanism is a DoS mitigation, so we consider frequent handshake 2164 * messages as (a kind of) load; if a message of the same type comes 2165 * to a peer within 1 second, we consider we are under load. 2166 */ 2167 time_t last = wgp->wgp_last_msg_received_time[msgtype]; 2168 wgp->wgp_last_msg_received_time[msgtype] = time_uptime; 2169 return (time_uptime - last) == 0; 2170 } 2171 2172 static void 2173 wg_calculate_keys(struct wg_session *wgs, const bool initiator) 2174 { 2175 2176 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2177 2178 /* 2179 * [W] 5.4.5: Ti^send = Tr^recv, Ti^recv = Tr^send := KDF2(Ci = Cr, e) 2180 */ 2181 if (initiator) { 2182 wg_algo_kdf(wgs->wgs_tkey_send, wgs->wgs_tkey_recv, NULL, 2183 wgs->wgs_chaining_key, NULL, 0); 2184 } else { 2185 wg_algo_kdf(wgs->wgs_tkey_recv, wgs->wgs_tkey_send, NULL, 2186 wgs->wgs_chaining_key, NULL, 0); 2187 } 2188 WG_DUMP_HASH("wgs_tkey_send", wgs->wgs_tkey_send); 2189 WG_DUMP_HASH("wgs_tkey_recv", wgs->wgs_tkey_recv); 2190 } 2191 2192 static uint64_t 2193 wg_session_get_send_counter(struct wg_session *wgs) 2194 { 2195 #ifdef __HAVE_ATOMIC64_LOADSTORE 2196 return atomic_load_relaxed(&wgs->wgs_send_counter); 2197 #else 2198 uint64_t send_counter; 2199 2200 mutex_enter(&wgs->wgs_send_counter_lock); 2201 send_counter = wgs->wgs_send_counter; 2202 mutex_exit(&wgs->wgs_send_counter_lock); 2203 2204 return send_counter; 2205 #endif 2206 } 2207 2208 static uint64_t 2209 wg_session_inc_send_counter(struct wg_session *wgs) 2210 { 2211 #ifdef __HAVE_ATOMIC64_LOADSTORE 2212 return atomic_inc_64_nv(&wgs->wgs_send_counter) - 1; 2213 #else 2214 uint64_t send_counter; 2215 2216 mutex_enter(&wgs->wgs_send_counter_lock); 2217 send_counter = wgs->wgs_send_counter++; 2218 mutex_exit(&wgs->wgs_send_counter_lock); 2219 2220 return send_counter; 2221 #endif 2222 } 2223 2224 static void 2225 wg_clear_states(struct wg_session *wgs) 2226 { 2227 2228 KASSERT(mutex_owned(wgs->wgs_peer->wgp_lock)); 2229 2230 wgs->wgs_send_counter = 0; 2231 sliwin_reset(&wgs->wgs_recvwin->window); 2232 2233 #define wgs_clear(v) explicit_memset(wgs->wgs_##v, 0, sizeof(wgs->wgs_##v)) 2234 wgs_clear(handshake_hash); 2235 wgs_clear(chaining_key); 2236 wgs_clear(ephemeral_key_pub); 2237 wgs_clear(ephemeral_key_priv); 2238 wgs_clear(ephemeral_key_peer); 2239 #undef wgs_clear 2240 } 2241 2242 static struct wg_session * 2243 wg_lookup_session_by_index(struct wg_softc *wg, const uint32_t index, 2244 struct psref *psref) 2245 { 2246 struct wg_session *wgs; 2247 2248 int s = pserialize_read_enter(); 2249 wgs = thmap_get(wg->wg_sessions_byindex, &index, sizeof index); 2250 if (wgs != NULL) { 2251 KASSERT(atomic_load_relaxed(&wgs->wgs_state) != 2252 WGS_STATE_UNKNOWN); 2253 psref_acquire(psref, &wgs->wgs_psref, wg_psref_class); 2254 } 2255 pserialize_read_exit(s); 2256 2257 return wgs; 2258 } 2259 2260 static void 2261 wg_schedule_rekey_timer(struct wg_peer *wgp) 2262 { 2263 int timeout = MIN(wg_rekey_after_time, INT_MAX/hz); 2264 2265 callout_schedule(&wgp->wgp_rekey_timer, timeout * hz); 2266 } 2267 2268 static void 2269 wg_send_keepalive_msg(struct wg_peer *wgp, struct wg_session *wgs) 2270 { 2271 struct mbuf *m; 2272 2273 /* 2274 * [W] 6.5 Passive Keepalive 2275 * "A keepalive message is simply a transport data message with 2276 * a zero-length encapsulated encrypted inner-packet." 2277 */ 2278 m = m_gethdr(M_WAIT, MT_DATA); 2279 wg_send_data_msg(wgp, wgs, m); 2280 } 2281 2282 static bool 2283 wg_need_to_send_init_message(struct wg_session *wgs) 2284 { 2285 /* 2286 * [W] 6.2 Transport Message Limits 2287 * "if a peer is the initiator of a current secure session, 2288 * WireGuard will send a handshake initiation message to begin 2289 * a new secure session ... if after receiving a transport data 2290 * message, the current secure session is (REJECT-AFTER-TIME − 2291 * KEEPALIVE-TIMEOUT − REKEY-TIMEOUT) seconds old and it has 2292 * not yet acted upon this event." 2293 */ 2294 return wgs->wgs_is_initiator && wgs->wgs_time_last_data_sent == 0 && 2295 (time_uptime - wgs->wgs_time_established) >= 2296 (wg_reject_after_time - wg_keepalive_timeout - wg_rekey_timeout); 2297 } 2298 2299 static void 2300 wg_schedule_peer_task(struct wg_peer *wgp, int task) 2301 { 2302 2303 mutex_enter(wgp->wgp_intr_lock); 2304 WG_DLOG("tasks=%d, task=%d\n", wgp->wgp_tasks, task); 2305 if (wgp->wgp_tasks == 0) 2306 /* 2307 * XXX If the current CPU is already loaded -- e.g., if 2308 * there's already a bunch of handshakes queued up -- 2309 * consider tossing this over to another CPU to 2310 * distribute the load. 2311 */ 2312 workqueue_enqueue(wg_wq, &wgp->wgp_work, NULL); 2313 wgp->wgp_tasks |= task; 2314 mutex_exit(wgp->wgp_intr_lock); 2315 } 2316 2317 static void 2318 wg_change_endpoint(struct wg_peer *wgp, const struct sockaddr *new) 2319 { 2320 struct wg_sockaddr *wgsa_prev; 2321 2322 WG_TRACE("Changing endpoint"); 2323 2324 memcpy(wgp->wgp_endpoint0, new, new->sa_len); 2325 wgsa_prev = wgp->wgp_endpoint; 2326 atomic_store_release(&wgp->wgp_endpoint, wgp->wgp_endpoint0); 2327 wgp->wgp_endpoint0 = wgsa_prev; 2328 atomic_store_release(&wgp->wgp_endpoint_available, true); 2329 2330 wg_schedule_peer_task(wgp, WGP_TASK_ENDPOINT_CHANGED); 2331 } 2332 2333 static bool 2334 wg_validate_inner_packet(const char *packet, size_t decrypted_len, int *af) 2335 { 2336 uint16_t packet_len; 2337 const struct ip *ip; 2338 2339 if (__predict_false(decrypted_len < sizeof(struct ip))) 2340 return false; 2341 2342 ip = (const struct ip *)packet; 2343 if (ip->ip_v == 4) 2344 *af = AF_INET; 2345 else if (ip->ip_v == 6) 2346 *af = AF_INET6; 2347 else 2348 return false; 2349 2350 WG_DLOG("af=%d\n", *af); 2351 2352 if (*af == AF_INET) { 2353 packet_len = ntohs(ip->ip_len); 2354 } else { 2355 const struct ip6_hdr *ip6; 2356 2357 if (__predict_false(decrypted_len < sizeof(struct ip6_hdr))) 2358 return false; 2359 2360 ip6 = (const struct ip6_hdr *)packet; 2361 packet_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); 2362 } 2363 2364 WG_DLOG("packet_len=%u\n", packet_len); 2365 if (packet_len > decrypted_len) 2366 return false; 2367 2368 return true; 2369 } 2370 2371 static bool 2372 wg_validate_route(struct wg_softc *wg, struct wg_peer *wgp_expected, 2373 int af, char *packet) 2374 { 2375 struct sockaddr_storage ss; 2376 struct sockaddr *sa; 2377 struct psref psref; 2378 struct wg_peer *wgp; 2379 bool ok; 2380 2381 /* 2382 * II CRYPTOKEY ROUTING 2383 * "it will only accept it if its source IP resolves in the 2384 * table to the public key used in the secure session for 2385 * decrypting it." 2386 */ 2387 2388 if (af == AF_INET) { 2389 const struct ip *ip = (const struct ip *)packet; 2390 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 2391 sockaddr_in_init(sin, &ip->ip_src, 0); 2392 sa = sintosa(sin); 2393 #ifdef INET6 2394 } else { 2395 const struct ip6_hdr *ip6 = (const struct ip6_hdr *)packet; 2396 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 2397 sockaddr_in6_init(sin6, &ip6->ip6_src, 0, 0, 0); 2398 sa = sin6tosa(sin6); 2399 #endif 2400 } 2401 2402 wgp = wg_pick_peer_by_sa(wg, sa, &psref); 2403 ok = (wgp == wgp_expected); 2404 if (wgp != NULL) 2405 wg_put_peer(wgp, &psref); 2406 2407 return ok; 2408 } 2409 2410 static void 2411 wg_session_dtor_timer(void *arg) 2412 { 2413 struct wg_peer *wgp = arg; 2414 2415 WG_TRACE("enter"); 2416 2417 wg_schedule_peer_task(wgp, WGP_TASK_DESTROY_PREV_SESSION); 2418 } 2419 2420 static void 2421 wg_schedule_session_dtor_timer(struct wg_peer *wgp) 2422 { 2423 2424 /* 1 second grace period */ 2425 callout_schedule(&wgp->wgp_session_dtor_timer, hz); 2426 } 2427 2428 static bool 2429 sockaddr_port_match(const struct sockaddr *sa1, const struct sockaddr *sa2) 2430 { 2431 if (sa1->sa_family != sa2->sa_family) 2432 return false; 2433 2434 switch (sa1->sa_family) { 2435 case AF_INET: 2436 return satocsin(sa1)->sin_port == satocsin(sa2)->sin_port; 2437 case AF_INET6: 2438 return satocsin6(sa1)->sin6_port == satocsin6(sa2)->sin6_port; 2439 default: 2440 return true; 2441 } 2442 } 2443 2444 static void 2445 wg_update_endpoint_if_necessary(struct wg_peer *wgp, 2446 const struct sockaddr *src) 2447 { 2448 struct wg_sockaddr *wgsa; 2449 struct psref psref; 2450 2451 wgsa = wg_get_endpoint_sa(wgp, &psref); 2452 2453 #ifdef WG_DEBUG_LOG 2454 char oldaddr[128], newaddr[128]; 2455 sockaddr_format(wgsatosa(wgsa), oldaddr, sizeof(oldaddr)); 2456 sockaddr_format(src, newaddr, sizeof(newaddr)); 2457 WG_DLOG("old=%s, new=%s\n", oldaddr, newaddr); 2458 #endif 2459 2460 /* 2461 * III: "Since the packet has authenticated correctly, the source IP of 2462 * the outer UDP/IP packet is used to update the endpoint for peer..." 2463 */ 2464 if (__predict_false(sockaddr_cmp(src, wgsatosa(wgsa)) != 0 || 2465 !sockaddr_port_match(src, wgsatosa(wgsa)))) { 2466 /* XXX We can't change the endpoint twice in a short period */ 2467 if (atomic_swap_uint(&wgp->wgp_endpoint_changing, 1) == 0) { 2468 wg_change_endpoint(wgp, src); 2469 } 2470 } 2471 2472 wg_put_sa(wgp, wgsa, &psref); 2473 } 2474 2475 static void 2476 wg_handle_msg_data(struct wg_softc *wg, struct mbuf *m, 2477 const struct sockaddr *src) 2478 { 2479 struct wg_msg_data *wgmd; 2480 char *encrypted_buf = NULL, *decrypted_buf; 2481 size_t encrypted_len, decrypted_len; 2482 struct wg_session *wgs; 2483 struct wg_peer *wgp; 2484 int state; 2485 size_t mlen; 2486 struct psref psref; 2487 int error, af; 2488 bool success, free_encrypted_buf = false, ok; 2489 struct mbuf *n; 2490 2491 KASSERT(m->m_len >= sizeof(struct wg_msg_data)); 2492 wgmd = mtod(m, struct wg_msg_data *); 2493 2494 KASSERT(wgmd->wgmd_type == htole32(WG_MSG_TYPE_DATA)); 2495 WG_TRACE("data"); 2496 2497 /* Find the putative session, or drop. */ 2498 wgs = wg_lookup_session_by_index(wg, wgmd->wgmd_receiver, &psref); 2499 if (wgs == NULL) { 2500 WG_TRACE("No session found"); 2501 m_freem(m); 2502 return; 2503 } 2504 2505 /* 2506 * We are only ready to handle data when in INIT_PASSIVE, 2507 * ESTABLISHED, or DESTROYING. All transitions out of that 2508 * state dissociate the session index and drain psrefs. 2509 */ 2510 state = atomic_load_relaxed(&wgs->wgs_state); 2511 switch (state) { 2512 case WGS_STATE_UNKNOWN: 2513 panic("wg session %p in unknown state has session index %u", 2514 wgs, wgmd->wgmd_receiver); 2515 case WGS_STATE_INIT_ACTIVE: 2516 WG_TRACE("not yet ready for data"); 2517 goto out; 2518 case WGS_STATE_INIT_PASSIVE: 2519 case WGS_STATE_ESTABLISHED: 2520 case WGS_STATE_DESTROYING: 2521 break; 2522 } 2523 2524 /* 2525 * Get the peer, for rate-limited logs (XXX MPSAFE, dtrace) and 2526 * to update the endpoint if authentication succeeds. 2527 */ 2528 wgp = wgs->wgs_peer; 2529 2530 /* 2531 * Reject outrageously wrong sequence numbers before doing any 2532 * crypto work or taking any locks. 2533 */ 2534 error = sliwin_check_fast(&wgs->wgs_recvwin->window, 2535 le64toh(wgmd->wgmd_counter)); 2536 if (error) { 2537 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2538 "out-of-window packet: %"PRIu64"\n", 2539 le64toh(wgmd->wgmd_counter)); 2540 goto out; 2541 } 2542 2543 /* Ensure the payload and authenticator are contiguous. */ 2544 mlen = m_length(m); 2545 encrypted_len = mlen - sizeof(*wgmd); 2546 if (encrypted_len < WG_AUTHTAG_LEN) { 2547 WG_DLOG("Short encrypted_len: %lu\n", encrypted_len); 2548 goto out; 2549 } 2550 success = m_ensure_contig(&m, sizeof(*wgmd) + encrypted_len); 2551 if (success) { 2552 encrypted_buf = mtod(m, char *) + sizeof(*wgmd); 2553 } else { 2554 encrypted_buf = kmem_intr_alloc(encrypted_len, KM_NOSLEEP); 2555 if (encrypted_buf == NULL) { 2556 WG_DLOG("failed to allocate encrypted_buf\n"); 2557 goto out; 2558 } 2559 m_copydata(m, sizeof(*wgmd), encrypted_len, encrypted_buf); 2560 free_encrypted_buf = true; 2561 } 2562 /* m_ensure_contig may change m regardless of its result */ 2563 KASSERT(m->m_len >= sizeof(*wgmd)); 2564 wgmd = mtod(m, struct wg_msg_data *); 2565 2566 /* 2567 * Get a buffer for the plaintext. Add WG_AUTHTAG_LEN to avoid 2568 * a zero-length buffer (XXX). Drop if plaintext is longer 2569 * than MCLBYTES (XXX). 2570 */ 2571 decrypted_len = encrypted_len - WG_AUTHTAG_LEN; 2572 if (decrypted_len > MCLBYTES) { 2573 /* FIXME handle larger data than MCLBYTES */ 2574 WG_DLOG("couldn't handle larger data than MCLBYTES\n"); 2575 goto out; 2576 } 2577 n = wg_get_mbuf(0, decrypted_len + WG_AUTHTAG_LEN); 2578 if (n == NULL) { 2579 WG_DLOG("wg_get_mbuf failed\n"); 2580 goto out; 2581 } 2582 decrypted_buf = mtod(n, char *); 2583 2584 /* Decrypt and verify the packet. */ 2585 WG_DLOG("mlen=%lu, encrypted_len=%lu\n", mlen, encrypted_len); 2586 error = wg_algo_aead_dec(decrypted_buf, 2587 encrypted_len - WG_AUTHTAG_LEN /* can be 0 */, 2588 wgs->wgs_tkey_recv, le64toh(wgmd->wgmd_counter), encrypted_buf, 2589 encrypted_len, NULL, 0); 2590 if (error != 0) { 2591 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2592 "failed to wg_algo_aead_dec\n"); 2593 m_freem(n); 2594 goto out; 2595 } 2596 WG_DLOG("outsize=%u\n", (u_int)decrypted_len); 2597 2598 /* Packet is genuine. Reject it if a replay or just too old. */ 2599 mutex_enter(&wgs->wgs_recvwin->lock); 2600 error = sliwin_update(&wgs->wgs_recvwin->window, 2601 le64toh(wgmd->wgmd_counter)); 2602 mutex_exit(&wgs->wgs_recvwin->lock); 2603 if (error) { 2604 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2605 "replay or out-of-window packet: %"PRIu64"\n", 2606 le64toh(wgmd->wgmd_counter)); 2607 m_freem(n); 2608 goto out; 2609 } 2610 2611 /* We're done with m now; free it and chuck the pointers. */ 2612 m_freem(m); 2613 m = NULL; 2614 wgmd = NULL; 2615 2616 /* 2617 * Validate the encapsulated packet header and get the address 2618 * family, or drop. 2619 */ 2620 ok = wg_validate_inner_packet(decrypted_buf, decrypted_len, &af); 2621 if (!ok) { 2622 m_freem(n); 2623 goto out; 2624 } 2625 2626 /* 2627 * The packet is genuine. Update the peer's endpoint if the 2628 * source address changed. 2629 * 2630 * XXX How to prevent DoS by replaying genuine packets from the 2631 * wrong source address? 2632 */ 2633 wg_update_endpoint_if_necessary(wgp, src); 2634 2635 /* Submit it into our network stack if routable. */ 2636 ok = wg_validate_route(wg, wgp, af, decrypted_buf); 2637 if (ok) { 2638 wg->wg_ops->input(&wg->wg_if, n, af); 2639 } else { 2640 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2641 "invalid source address\n"); 2642 m_freem(n); 2643 /* 2644 * The inner address is invalid however the session is valid 2645 * so continue the session processing below. 2646 */ 2647 } 2648 n = NULL; 2649 2650 /* Update the state machine if necessary. */ 2651 if (__predict_false(state == WGS_STATE_INIT_PASSIVE)) { 2652 /* 2653 * We were waiting for the initiator to send their 2654 * first data transport message, and that has happened. 2655 * Schedule a task to establish this session. 2656 */ 2657 wg_schedule_peer_task(wgp, WGP_TASK_ESTABLISH_SESSION); 2658 } else { 2659 if (__predict_false(wg_need_to_send_init_message(wgs))) { 2660 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 2661 } 2662 /* 2663 * [W] 6.5 Passive Keepalive 2664 * "If a peer has received a validly-authenticated transport 2665 * data message (section 5.4.6), but does not have any packets 2666 * itself to send back for KEEPALIVE-TIMEOUT seconds, it sends 2667 * a keepalive message." 2668 */ 2669 WG_DLOG("time_uptime=%ju wgs_time_last_data_sent=%ju\n", 2670 (uintmax_t)time_uptime, 2671 (uintmax_t)wgs->wgs_time_last_data_sent); 2672 if ((time_uptime - wgs->wgs_time_last_data_sent) >= 2673 wg_keepalive_timeout) { 2674 WG_TRACE("Schedule sending keepalive message"); 2675 /* 2676 * We can't send a keepalive message here to avoid 2677 * a deadlock; we already hold the solock of a socket 2678 * that is used to send the message. 2679 */ 2680 wg_schedule_peer_task(wgp, 2681 WGP_TASK_SEND_KEEPALIVE_MESSAGE); 2682 } 2683 } 2684 out: 2685 wg_put_session(wgs, &psref); 2686 if (m != NULL) 2687 m_freem(m); 2688 if (free_encrypted_buf) 2689 kmem_intr_free(encrypted_buf, encrypted_len); 2690 } 2691 2692 static void 2693 wg_handle_msg_cookie(struct wg_softc *wg, const struct wg_msg_cookie *wgmc) 2694 { 2695 struct wg_session *wgs; 2696 struct wg_peer *wgp; 2697 struct psref psref; 2698 int error; 2699 uint8_t key[WG_HASH_LEN]; 2700 uint8_t cookie[WG_COOKIE_LEN]; 2701 2702 WG_TRACE("cookie msg received"); 2703 2704 /* Find the putative session. */ 2705 wgs = wg_lookup_session_by_index(wg, wgmc->wgmc_receiver, &psref); 2706 if (wgs == NULL) { 2707 WG_TRACE("No session found"); 2708 return; 2709 } 2710 2711 /* Lock the peer so we can update the cookie state. */ 2712 wgp = wgs->wgs_peer; 2713 mutex_enter(wgp->wgp_lock); 2714 2715 if (!wgp->wgp_last_sent_mac1_valid) { 2716 WG_TRACE("No valid mac1 sent (or expired)"); 2717 goto out; 2718 } 2719 2720 /* Decrypt the cookie and store it for later handshake retry. */ 2721 wg_algo_mac_cookie(key, sizeof(key), wgp->wgp_pubkey, 2722 sizeof(wgp->wgp_pubkey)); 2723 error = wg_algo_xaead_dec(cookie, sizeof(cookie), key, 2724 wgmc->wgmc_cookie, sizeof(wgmc->wgmc_cookie), 2725 wgp->wgp_last_sent_mac1, sizeof(wgp->wgp_last_sent_mac1), 2726 wgmc->wgmc_salt); 2727 if (error != 0) { 2728 WG_LOG_RATECHECK(&wgp->wgp_ppsratecheck, LOG_DEBUG, 2729 "wg_algo_aead_dec for cookie failed: error=%d\n", error); 2730 goto out; 2731 } 2732 /* 2733 * [W] 6.6: Interaction with Cookie Reply System 2734 * "it should simply store the decrypted cookie value from the cookie 2735 * reply message, and wait for the expiration of the REKEY-TIMEOUT 2736 * timer for retrying a handshake initiation message." 2737 */ 2738 wgp->wgp_latest_cookie_time = time_uptime; 2739 memcpy(wgp->wgp_latest_cookie, cookie, sizeof(wgp->wgp_latest_cookie)); 2740 out: 2741 mutex_exit(wgp->wgp_lock); 2742 wg_put_session(wgs, &psref); 2743 } 2744 2745 static struct mbuf * 2746 wg_validate_msg_header(struct wg_softc *wg, struct mbuf *m) 2747 { 2748 struct wg_msg wgm; 2749 size_t mbuflen; 2750 size_t msglen; 2751 2752 /* 2753 * Get the mbuf chain length. It is already guaranteed, by 2754 * wg_overudp_cb, to be large enough for a struct wg_msg. 2755 */ 2756 mbuflen = m_length(m); 2757 KASSERT(mbuflen >= sizeof(struct wg_msg)); 2758 2759 /* 2760 * Copy the message header (32-bit message type) out -- we'll 2761 * worry about contiguity and alignment later. 2762 */ 2763 m_copydata(m, 0, sizeof(wgm), &wgm); 2764 switch (le32toh(wgm.wgm_type)) { 2765 case WG_MSG_TYPE_INIT: 2766 msglen = sizeof(struct wg_msg_init); 2767 break; 2768 case WG_MSG_TYPE_RESP: 2769 msglen = sizeof(struct wg_msg_resp); 2770 break; 2771 case WG_MSG_TYPE_COOKIE: 2772 msglen = sizeof(struct wg_msg_cookie); 2773 break; 2774 case WG_MSG_TYPE_DATA: 2775 msglen = sizeof(struct wg_msg_data); 2776 break; 2777 default: 2778 WG_LOG_RATECHECK(&wg->wg_ppsratecheck, LOG_DEBUG, 2779 "Unexpected msg type: %u\n", le32toh(wgm.wgm_type)); 2780 goto error; 2781 } 2782 2783 /* Verify the mbuf chain is long enough for this type of message. */ 2784 if (__predict_false(mbuflen < msglen)) { 2785 WG_DLOG("Invalid msg size: mbuflen=%lu type=%u\n", mbuflen, 2786 le32toh(wgm.wgm_type)); 2787 goto error; 2788 } 2789 2790 /* Make the message header contiguous if necessary. */ 2791 if (__predict_false(m->m_len < msglen)) { 2792 m = m_pullup(m, msglen); 2793 if (m == NULL) 2794 return NULL; 2795 } 2796 2797 return m; 2798 2799 error: 2800 m_freem(m); 2801 return NULL; 2802 } 2803 2804 static void 2805 wg_handle_packet(struct wg_softc *wg, struct mbuf *m, 2806 const struct sockaddr *src) 2807 { 2808 struct wg_msg *wgm; 2809 2810 m = wg_validate_msg_header(wg, m); 2811 if (__predict_false(m == NULL)) 2812 return; 2813 2814 KASSERT(m->m_len >= sizeof(struct wg_msg)); 2815 wgm = mtod(m, struct wg_msg *); 2816 switch (le32toh(wgm->wgm_type)) { 2817 case WG_MSG_TYPE_INIT: 2818 wg_handle_msg_init(wg, (struct wg_msg_init *)wgm, src); 2819 break; 2820 case WG_MSG_TYPE_RESP: 2821 wg_handle_msg_resp(wg, (struct wg_msg_resp *)wgm, src); 2822 break; 2823 case WG_MSG_TYPE_COOKIE: 2824 wg_handle_msg_cookie(wg, (struct wg_msg_cookie *)wgm); 2825 break; 2826 case WG_MSG_TYPE_DATA: 2827 wg_handle_msg_data(wg, m, src); 2828 /* wg_handle_msg_data frees m for us */ 2829 return; 2830 default: 2831 panic("invalid message type: %d", le32toh(wgm->wgm_type)); 2832 } 2833 2834 m_freem(m); 2835 } 2836 2837 static void 2838 wg_receive_packets(struct wg_softc *wg, const int af) 2839 { 2840 2841 for (;;) { 2842 int error, flags; 2843 struct socket *so; 2844 struct mbuf *m = NULL; 2845 struct uio dummy_uio; 2846 struct mbuf *paddr = NULL; 2847 struct sockaddr *src; 2848 2849 so = wg_get_so_by_af(wg, af); 2850 flags = MSG_DONTWAIT; 2851 dummy_uio.uio_resid = 1000000000; 2852 2853 error = so->so_receive(so, &paddr, &dummy_uio, &m, NULL, 2854 &flags); 2855 if (error || m == NULL) { 2856 //if (error == EWOULDBLOCK) 2857 return; 2858 } 2859 2860 KASSERT(paddr != NULL); 2861 KASSERT(paddr->m_len >= sizeof(struct sockaddr)); 2862 src = mtod(paddr, struct sockaddr *); 2863 2864 wg_handle_packet(wg, m, src); 2865 } 2866 } 2867 2868 static void 2869 wg_get_peer(struct wg_peer *wgp, struct psref *psref) 2870 { 2871 2872 psref_acquire(psref, &wgp->wgp_psref, wg_psref_class); 2873 } 2874 2875 static void 2876 wg_put_peer(struct wg_peer *wgp, struct psref *psref) 2877 { 2878 2879 psref_release(psref, &wgp->wgp_psref, wg_psref_class); 2880 } 2881 2882 static void 2883 wg_task_send_init_message(struct wg_softc *wg, struct wg_peer *wgp) 2884 { 2885 struct wg_session *wgs; 2886 2887 WG_TRACE("WGP_TASK_SEND_INIT_MESSAGE"); 2888 2889 KASSERT(mutex_owned(wgp->wgp_lock)); 2890 2891 if (!atomic_load_acquire(&wgp->wgp_endpoint_available)) { 2892 WGLOG(LOG_DEBUG, "No endpoint available\n"); 2893 /* XXX should do something? */ 2894 return; 2895 } 2896 2897 wgs = wgp->wgp_session_stable; 2898 if (wgs->wgs_state == WGS_STATE_UNKNOWN) { 2899 /* XXX What if the unstable session is already INIT_ACTIVE? */ 2900 wg_send_handshake_msg_init(wg, wgp); 2901 } else { 2902 /* rekey */ 2903 wgs = wgp->wgp_session_unstable; 2904 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2905 wg_send_handshake_msg_init(wg, wgp); 2906 } 2907 } 2908 2909 static void 2910 wg_task_retry_handshake(struct wg_softc *wg, struct wg_peer *wgp) 2911 { 2912 struct wg_session *wgs; 2913 2914 WG_TRACE("WGP_TASK_RETRY_HANDSHAKE"); 2915 2916 KASSERT(mutex_owned(wgp->wgp_lock)); 2917 KASSERT(wgp->wgp_handshake_start_time != 0); 2918 2919 wgs = wgp->wgp_session_unstable; 2920 if (wgs->wgs_state != WGS_STATE_INIT_ACTIVE) 2921 return; 2922 2923 /* 2924 * XXX no real need to assign a new index here, but we do need 2925 * to transition to UNKNOWN temporarily 2926 */ 2927 wg_put_session_index(wg, wgs); 2928 2929 /* [W] 6.4 Handshake Initiation Retransmission */ 2930 if ((time_uptime - wgp->wgp_handshake_start_time) > 2931 wg_rekey_attempt_time) { 2932 /* Give up handshaking */ 2933 wgp->wgp_handshake_start_time = 0; 2934 WG_TRACE("give up"); 2935 2936 /* 2937 * If a new data packet comes, handshaking will be retried 2938 * and a new session would be established at that time, 2939 * however we don't want to send pending packets then. 2940 */ 2941 wg_purge_pending_packets(wgp); 2942 return; 2943 } 2944 2945 wg_task_send_init_message(wg, wgp); 2946 } 2947 2948 static void 2949 wg_task_establish_session(struct wg_softc *wg, struct wg_peer *wgp) 2950 { 2951 struct wg_session *wgs, *wgs_prev; 2952 struct mbuf *m; 2953 2954 KASSERT(mutex_owned(wgp->wgp_lock)); 2955 2956 wgs = wgp->wgp_session_unstable; 2957 if (wgs->wgs_state != WGS_STATE_INIT_PASSIVE) 2958 /* XXX Can this happen? */ 2959 return; 2960 2961 wgs->wgs_state = WGS_STATE_ESTABLISHED; 2962 wgs->wgs_time_established = time_uptime; 2963 wgs->wgs_time_last_data_sent = 0; 2964 wgs->wgs_is_initiator = false; 2965 WG_TRACE("WGS_STATE_ESTABLISHED"); 2966 2967 wg_swap_sessions(wgp); 2968 KASSERT(wgs == wgp->wgp_session_stable); 2969 wgs_prev = wgp->wgp_session_unstable; 2970 getnanotime(&wgp->wgp_last_handshake_time); 2971 wgp->wgp_handshake_start_time = 0; 2972 wgp->wgp_last_sent_mac1_valid = false; 2973 wgp->wgp_last_sent_cookie_valid = false; 2974 2975 /* If we had a data packet queued up, send it. */ 2976 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) { 2977 kpreempt_disable(); 2978 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 2979 M_SETCTX(m, wgp); 2980 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 2981 WGLOG(LOG_ERR, "pktq full, dropping\n"); 2982 m_freem(m); 2983 } 2984 kpreempt_enable(); 2985 } 2986 2987 if (wgs_prev->wgs_state == WGS_STATE_ESTABLISHED) { 2988 /* Wait for wg_get_stable_session to drain. */ 2989 pserialize_perform(wgp->wgp_psz); 2990 2991 /* Transition ESTABLISHED->DESTROYING. */ 2992 wgs_prev->wgs_state = WGS_STATE_DESTROYING; 2993 2994 /* We can't destroy the old session immediately */ 2995 wg_schedule_session_dtor_timer(wgp); 2996 } else { 2997 KASSERTMSG(wgs_prev->wgs_state == WGS_STATE_UNKNOWN, 2998 "state=%d", wgs_prev->wgs_state); 2999 wg_clear_states(wgs_prev); 3000 wgs_prev->wgs_state = WGS_STATE_UNKNOWN; 3001 } 3002 } 3003 3004 static void 3005 wg_task_endpoint_changed(struct wg_softc *wg, struct wg_peer *wgp) 3006 { 3007 3008 WG_TRACE("WGP_TASK_ENDPOINT_CHANGED"); 3009 3010 KASSERT(mutex_owned(wgp->wgp_lock)); 3011 3012 if (atomic_load_relaxed(&wgp->wgp_endpoint_changing)) { 3013 pserialize_perform(wgp->wgp_psz); 3014 mutex_exit(wgp->wgp_lock); 3015 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, 3016 wg_psref_class); 3017 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, 3018 wg_psref_class); 3019 mutex_enter(wgp->wgp_lock); 3020 atomic_store_release(&wgp->wgp_endpoint_changing, 0); 3021 } 3022 } 3023 3024 static void 3025 wg_task_send_keepalive_message(struct wg_softc *wg, struct wg_peer *wgp) 3026 { 3027 struct wg_session *wgs; 3028 3029 WG_TRACE("WGP_TASK_SEND_KEEPALIVE_MESSAGE"); 3030 3031 KASSERT(mutex_owned(wgp->wgp_lock)); 3032 3033 wgs = wgp->wgp_session_stable; 3034 if (wgs->wgs_state != WGS_STATE_ESTABLISHED) 3035 return; 3036 3037 wg_send_keepalive_msg(wgp, wgs); 3038 } 3039 3040 static void 3041 wg_task_destroy_prev_session(struct wg_softc *wg, struct wg_peer *wgp) 3042 { 3043 struct wg_session *wgs; 3044 3045 WG_TRACE("WGP_TASK_DESTROY_PREV_SESSION"); 3046 3047 KASSERT(mutex_owned(wgp->wgp_lock)); 3048 3049 wgs = wgp->wgp_session_unstable; 3050 if (wgs->wgs_state == WGS_STATE_DESTROYING) { 3051 wg_put_session_index(wg, wgs); 3052 } 3053 } 3054 3055 static void 3056 wg_peer_work(struct work *wk, void *cookie) 3057 { 3058 struct wg_peer *wgp = container_of(wk, struct wg_peer, wgp_work); 3059 struct wg_softc *wg = wgp->wgp_sc; 3060 int tasks; 3061 3062 mutex_enter(wgp->wgp_intr_lock); 3063 while ((tasks = wgp->wgp_tasks) != 0) { 3064 wgp->wgp_tasks = 0; 3065 mutex_exit(wgp->wgp_intr_lock); 3066 3067 mutex_enter(wgp->wgp_lock); 3068 if (ISSET(tasks, WGP_TASK_SEND_INIT_MESSAGE)) 3069 wg_task_send_init_message(wg, wgp); 3070 if (ISSET(tasks, WGP_TASK_RETRY_HANDSHAKE)) 3071 wg_task_retry_handshake(wg, wgp); 3072 if (ISSET(tasks, WGP_TASK_ESTABLISH_SESSION)) 3073 wg_task_establish_session(wg, wgp); 3074 if (ISSET(tasks, WGP_TASK_ENDPOINT_CHANGED)) 3075 wg_task_endpoint_changed(wg, wgp); 3076 if (ISSET(tasks, WGP_TASK_SEND_KEEPALIVE_MESSAGE)) 3077 wg_task_send_keepalive_message(wg, wgp); 3078 if (ISSET(tasks, WGP_TASK_DESTROY_PREV_SESSION)) 3079 wg_task_destroy_prev_session(wg, wgp); 3080 mutex_exit(wgp->wgp_lock); 3081 3082 mutex_enter(wgp->wgp_intr_lock); 3083 } 3084 mutex_exit(wgp->wgp_intr_lock); 3085 } 3086 3087 static void 3088 wg_job(struct threadpool_job *job) 3089 { 3090 struct wg_softc *wg = container_of(job, struct wg_softc, wg_job); 3091 int bound, upcalls; 3092 3093 mutex_enter(wg->wg_intr_lock); 3094 while ((upcalls = wg->wg_upcalls) != 0) { 3095 wg->wg_upcalls = 0; 3096 mutex_exit(wg->wg_intr_lock); 3097 bound = curlwp_bind(); 3098 if (ISSET(upcalls, WG_UPCALL_INET)) 3099 wg_receive_packets(wg, AF_INET); 3100 if (ISSET(upcalls, WG_UPCALL_INET6)) 3101 wg_receive_packets(wg, AF_INET6); 3102 curlwp_bindx(bound); 3103 mutex_enter(wg->wg_intr_lock); 3104 } 3105 threadpool_job_done(job); 3106 mutex_exit(wg->wg_intr_lock); 3107 } 3108 3109 static int 3110 wg_bind_port(struct wg_softc *wg, const uint16_t port) 3111 { 3112 int error; 3113 uint16_t old_port = wg->wg_listen_port; 3114 3115 if (port != 0 && old_port == port) 3116 return 0; 3117 3118 struct sockaddr_in _sin, *sin = &_sin; 3119 sin->sin_len = sizeof(*sin); 3120 sin->sin_family = AF_INET; 3121 sin->sin_addr.s_addr = INADDR_ANY; 3122 sin->sin_port = htons(port); 3123 3124 error = sobind(wg->wg_so4, sintosa(sin), curlwp); 3125 if (error != 0) 3126 return error; 3127 3128 #ifdef INET6 3129 struct sockaddr_in6 _sin6, *sin6 = &_sin6; 3130 sin6->sin6_len = sizeof(*sin6); 3131 sin6->sin6_family = AF_INET6; 3132 sin6->sin6_addr = in6addr_any; 3133 sin6->sin6_port = htons(port); 3134 3135 error = sobind(wg->wg_so6, sin6tosa(sin6), curlwp); 3136 if (error != 0) 3137 return error; 3138 #endif 3139 3140 wg->wg_listen_port = port; 3141 3142 return 0; 3143 } 3144 3145 static void 3146 wg_so_upcall(struct socket *so, void *cookie, int events, int waitflag) 3147 { 3148 struct wg_softc *wg = cookie; 3149 int reason; 3150 3151 reason = (so->so_proto->pr_domain->dom_family == AF_INET) ? 3152 WG_UPCALL_INET : 3153 WG_UPCALL_INET6; 3154 3155 mutex_enter(wg->wg_intr_lock); 3156 wg->wg_upcalls |= reason; 3157 threadpool_schedule_job(wg->wg_threadpool, &wg->wg_job); 3158 mutex_exit(wg->wg_intr_lock); 3159 } 3160 3161 static int 3162 wg_overudp_cb(struct mbuf **mp, int offset, struct socket *so, 3163 struct sockaddr *src, void *arg) 3164 { 3165 struct wg_softc *wg = arg; 3166 struct wg_msg wgm; 3167 struct mbuf *m = *mp; 3168 3169 WG_TRACE("enter"); 3170 3171 /* Verify the mbuf chain is long enough to have a wg msg header. */ 3172 KASSERT(offset <= m_length(m)); 3173 if (__predict_false(m_length(m) - offset < sizeof(struct wg_msg))) { 3174 /* drop on the floor */ 3175 m_freem(m); 3176 return -1; 3177 } 3178 3179 /* 3180 * Copy the message header (32-bit message type) out -- we'll 3181 * worry about contiguity and alignment later. 3182 */ 3183 m_copydata(m, offset, sizeof(struct wg_msg), &wgm); 3184 WG_DLOG("type=%d\n", le32toh(wgm.wgm_type)); 3185 3186 /* 3187 * Handle DATA packets promptly as they arrive. Other packets 3188 * may require expensive public-key crypto and are not as 3189 * sensitive to latency, so defer them to the worker thread. 3190 */ 3191 switch (le32toh(wgm.wgm_type)) { 3192 case WG_MSG_TYPE_DATA: 3193 /* handle immediately */ 3194 m_adj(m, offset); 3195 if (__predict_false(m->m_len < sizeof(struct wg_msg_data))) { 3196 m = m_pullup(m, sizeof(struct wg_msg_data)); 3197 if (m == NULL) 3198 return -1; 3199 } 3200 wg_handle_msg_data(wg, m, src); 3201 *mp = NULL; 3202 return 1; 3203 case WG_MSG_TYPE_INIT: 3204 case WG_MSG_TYPE_RESP: 3205 case WG_MSG_TYPE_COOKIE: 3206 /* pass through to so_receive in wg_receive_packets */ 3207 return 0; 3208 default: 3209 /* drop on the floor */ 3210 m_freem(m); 3211 return -1; 3212 } 3213 } 3214 3215 static int 3216 wg_socreate(struct wg_softc *wg, int af, struct socket **sop) 3217 { 3218 int error; 3219 struct socket *so; 3220 3221 error = socreate(af, &so, SOCK_DGRAM, 0, curlwp, NULL); 3222 if (error != 0) 3223 return error; 3224 3225 solock(so); 3226 so->so_upcallarg = wg; 3227 so->so_upcall = wg_so_upcall; 3228 so->so_rcv.sb_flags |= SB_UPCALL; 3229 if (af == AF_INET) 3230 in_pcb_register_overudp_cb(sotoinpcb(so), wg_overudp_cb, wg); 3231 #if INET6 3232 else 3233 in6_pcb_register_overudp_cb(sotoin6pcb(so), wg_overudp_cb, wg); 3234 #endif 3235 sounlock(so); 3236 3237 *sop = so; 3238 3239 return 0; 3240 } 3241 3242 static bool 3243 wg_session_hit_limits(struct wg_session *wgs) 3244 { 3245 3246 /* 3247 * [W] 6.2: Transport Message Limits 3248 * "After REJECT-AFTER-MESSAGES transport data messages or after the 3249 * current secure session is REJECT-AFTER-TIME seconds old, whichever 3250 * comes first, WireGuard will refuse to send any more transport data 3251 * messages using the current secure session, ..." 3252 */ 3253 KASSERT(wgs->wgs_time_established != 0); 3254 if ((time_uptime - wgs->wgs_time_established) > wg_reject_after_time) { 3255 WG_DLOG("The session hits REJECT_AFTER_TIME\n"); 3256 return true; 3257 } else if (wg_session_get_send_counter(wgs) > 3258 wg_reject_after_messages) { 3259 WG_DLOG("The session hits REJECT_AFTER_MESSAGES\n"); 3260 return true; 3261 } 3262 3263 return false; 3264 } 3265 3266 static void 3267 wgintr(void *cookie) 3268 { 3269 struct wg_peer *wgp; 3270 struct wg_session *wgs; 3271 struct mbuf *m; 3272 struct psref psref; 3273 3274 while ((m = pktq_dequeue(wg_pktq)) != NULL) { 3275 wgp = M_GETCTX(m, struct wg_peer *); 3276 if ((wgs = wg_get_stable_session(wgp, &psref)) == NULL) { 3277 WG_TRACE("no stable session"); 3278 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3279 goto next0; 3280 } 3281 if (__predict_false(wg_session_hit_limits(wgs))) { 3282 WG_TRACE("stable session hit limits"); 3283 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3284 goto next1; 3285 } 3286 wg_send_data_msg(wgp, wgs, m); 3287 m = NULL; /* consumed */ 3288 next1: wg_put_session(wgs, &psref); 3289 next0: if (m) 3290 m_freem(m); 3291 /* XXX Yield to avoid userland starvation? */ 3292 } 3293 } 3294 3295 static void 3296 wg_rekey_timer(void *arg) 3297 { 3298 struct wg_peer *wgp = arg; 3299 3300 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3301 } 3302 3303 static void 3304 wg_purge_pending_packets(struct wg_peer *wgp) 3305 { 3306 struct mbuf *m; 3307 3308 if ((m = atomic_swap_ptr(&wgp->wgp_pending, NULL)) != NULL) 3309 m_freem(m); 3310 pktq_barrier(wg_pktq); 3311 } 3312 3313 static void 3314 wg_handshake_timeout_timer(void *arg) 3315 { 3316 struct wg_peer *wgp = arg; 3317 3318 WG_TRACE("enter"); 3319 3320 wg_schedule_peer_task(wgp, WGP_TASK_RETRY_HANDSHAKE); 3321 } 3322 3323 static struct wg_peer * 3324 wg_alloc_peer(struct wg_softc *wg) 3325 { 3326 struct wg_peer *wgp; 3327 3328 wgp = kmem_zalloc(sizeof(*wgp), KM_SLEEP); 3329 3330 wgp->wgp_sc = wg; 3331 callout_init(&wgp->wgp_rekey_timer, CALLOUT_MPSAFE); 3332 callout_setfunc(&wgp->wgp_rekey_timer, wg_rekey_timer, wgp); 3333 callout_init(&wgp->wgp_handshake_timeout_timer, CALLOUT_MPSAFE); 3334 callout_setfunc(&wgp->wgp_handshake_timeout_timer, 3335 wg_handshake_timeout_timer, wgp); 3336 callout_init(&wgp->wgp_session_dtor_timer, CALLOUT_MPSAFE); 3337 callout_setfunc(&wgp->wgp_session_dtor_timer, 3338 wg_session_dtor_timer, wgp); 3339 PSLIST_ENTRY_INIT(wgp, wgp_peerlist_entry); 3340 wgp->wgp_endpoint_changing = false; 3341 wgp->wgp_endpoint_available = false; 3342 wgp->wgp_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3343 wgp->wgp_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3344 wgp->wgp_psz = pserialize_create(); 3345 psref_target_init(&wgp->wgp_psref, wg_psref_class); 3346 3347 wgp->wgp_endpoint = kmem_zalloc(sizeof(*wgp->wgp_endpoint), KM_SLEEP); 3348 wgp->wgp_endpoint0 = kmem_zalloc(sizeof(*wgp->wgp_endpoint0), KM_SLEEP); 3349 psref_target_init(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3350 psref_target_init(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3351 3352 struct wg_session *wgs; 3353 wgp->wgp_session_stable = 3354 kmem_zalloc(sizeof(*wgp->wgp_session_stable), KM_SLEEP); 3355 wgp->wgp_session_unstable = 3356 kmem_zalloc(sizeof(*wgp->wgp_session_unstable), KM_SLEEP); 3357 wgs = wgp->wgp_session_stable; 3358 wgs->wgs_peer = wgp; 3359 wgs->wgs_state = WGS_STATE_UNKNOWN; 3360 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3361 #ifndef __HAVE_ATOMIC64_LOADSTORE 3362 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3363 #endif 3364 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3365 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3366 3367 wgs = wgp->wgp_session_unstable; 3368 wgs->wgs_peer = wgp; 3369 wgs->wgs_state = WGS_STATE_UNKNOWN; 3370 psref_target_init(&wgs->wgs_psref, wg_psref_class); 3371 #ifndef __HAVE_ATOMIC64_LOADSTORE 3372 mutex_init(&wgs->wgs_send_counter_lock, MUTEX_DEFAULT, IPL_SOFTNET); 3373 #endif 3374 wgs->wgs_recvwin = kmem_zalloc(sizeof(*wgs->wgs_recvwin), KM_SLEEP); 3375 mutex_init(&wgs->wgs_recvwin->lock, MUTEX_DEFAULT, IPL_SOFTNET); 3376 3377 return wgp; 3378 } 3379 3380 static void 3381 wg_destroy_peer(struct wg_peer *wgp) 3382 { 3383 struct wg_session *wgs; 3384 struct wg_softc *wg = wgp->wgp_sc; 3385 3386 /* Prevent new packets from this peer on any source address. */ 3387 rw_enter(wg->wg_rwlock, RW_WRITER); 3388 for (int i = 0; i < wgp->wgp_n_allowedips; i++) { 3389 struct wg_allowedip *wga = &wgp->wgp_allowedips[i]; 3390 struct radix_node_head *rnh = wg_rnh(wg, wga->wga_family); 3391 struct radix_node *rn; 3392 3393 KASSERT(rnh != NULL); 3394 rn = rnh->rnh_deladdr(&wga->wga_sa_addr, 3395 &wga->wga_sa_mask, rnh); 3396 if (rn == NULL) { 3397 char addrstr[128]; 3398 sockaddr_format(&wga->wga_sa_addr, addrstr, 3399 sizeof(addrstr)); 3400 WGLOG(LOG_WARNING, "Couldn't delete %s", addrstr); 3401 } 3402 } 3403 rw_exit(wg->wg_rwlock); 3404 3405 /* Purge pending packets. */ 3406 wg_purge_pending_packets(wgp); 3407 3408 /* Halt all packet processing and timeouts. */ 3409 callout_halt(&wgp->wgp_rekey_timer, NULL); 3410 callout_halt(&wgp->wgp_handshake_timeout_timer, NULL); 3411 callout_halt(&wgp->wgp_session_dtor_timer, NULL); 3412 3413 /* Wait for any queued work to complete. */ 3414 workqueue_wait(wg_wq, &wgp->wgp_work); 3415 3416 wgs = wgp->wgp_session_unstable; 3417 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3418 mutex_enter(wgp->wgp_lock); 3419 wg_destroy_session(wg, wgs); 3420 mutex_exit(wgp->wgp_lock); 3421 } 3422 mutex_destroy(&wgs->wgs_recvwin->lock); 3423 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3424 #ifndef __HAVE_ATOMIC64_LOADSTORE 3425 mutex_destroy(&wgs->wgs_send_counter_lock); 3426 #endif 3427 kmem_free(wgs, sizeof(*wgs)); 3428 3429 wgs = wgp->wgp_session_stable; 3430 if (wgs->wgs_state != WGS_STATE_UNKNOWN) { 3431 mutex_enter(wgp->wgp_lock); 3432 wg_destroy_session(wg, wgs); 3433 mutex_exit(wgp->wgp_lock); 3434 } 3435 mutex_destroy(&wgs->wgs_recvwin->lock); 3436 kmem_free(wgs->wgs_recvwin, sizeof(*wgs->wgs_recvwin)); 3437 #ifndef __HAVE_ATOMIC64_LOADSTORE 3438 mutex_destroy(&wgs->wgs_send_counter_lock); 3439 #endif 3440 kmem_free(wgs, sizeof(*wgs)); 3441 3442 psref_target_destroy(&wgp->wgp_endpoint->wgsa_psref, wg_psref_class); 3443 psref_target_destroy(&wgp->wgp_endpoint0->wgsa_psref, wg_psref_class); 3444 kmem_free(wgp->wgp_endpoint, sizeof(*wgp->wgp_endpoint)); 3445 kmem_free(wgp->wgp_endpoint0, sizeof(*wgp->wgp_endpoint0)); 3446 3447 pserialize_destroy(wgp->wgp_psz); 3448 mutex_obj_free(wgp->wgp_intr_lock); 3449 mutex_obj_free(wgp->wgp_lock); 3450 3451 kmem_free(wgp, sizeof(*wgp)); 3452 } 3453 3454 static void 3455 wg_destroy_all_peers(struct wg_softc *wg) 3456 { 3457 struct wg_peer *wgp, *wgp0 __diagused; 3458 void *garbage_byname, *garbage_bypubkey; 3459 3460 restart: 3461 garbage_byname = garbage_bypubkey = NULL; 3462 mutex_enter(wg->wg_lock); 3463 WG_PEER_WRITER_FOREACH(wgp, wg) { 3464 if (wgp->wgp_name[0]) { 3465 wgp0 = thmap_del(wg->wg_peers_byname, wgp->wgp_name, 3466 strlen(wgp->wgp_name)); 3467 KASSERT(wgp0 == wgp); 3468 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3469 } 3470 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3471 sizeof(wgp->wgp_pubkey)); 3472 KASSERT(wgp0 == wgp); 3473 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3474 WG_PEER_WRITER_REMOVE(wgp); 3475 wg->wg_npeers--; 3476 mutex_enter(wgp->wgp_lock); 3477 pserialize_perform(wgp->wgp_psz); 3478 mutex_exit(wgp->wgp_lock); 3479 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3480 break; 3481 } 3482 mutex_exit(wg->wg_lock); 3483 3484 if (wgp == NULL) 3485 return; 3486 3487 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3488 3489 wg_destroy_peer(wgp); 3490 thmap_gc(wg->wg_peers_byname, garbage_byname); 3491 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3492 3493 goto restart; 3494 } 3495 3496 static int 3497 wg_destroy_peer_name(struct wg_softc *wg, const char *name) 3498 { 3499 struct wg_peer *wgp, *wgp0 __diagused; 3500 void *garbage_byname, *garbage_bypubkey; 3501 3502 mutex_enter(wg->wg_lock); 3503 wgp = thmap_del(wg->wg_peers_byname, name, strlen(name)); 3504 if (wgp != NULL) { 3505 wgp0 = thmap_del(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 3506 sizeof(wgp->wgp_pubkey)); 3507 KASSERT(wgp0 == wgp); 3508 garbage_byname = thmap_stage_gc(wg->wg_peers_byname); 3509 garbage_bypubkey = thmap_stage_gc(wg->wg_peers_bypubkey); 3510 WG_PEER_WRITER_REMOVE(wgp); 3511 wg->wg_npeers--; 3512 mutex_enter(wgp->wgp_lock); 3513 pserialize_perform(wgp->wgp_psz); 3514 mutex_exit(wgp->wgp_lock); 3515 PSLIST_ENTRY_DESTROY(wgp, wgp_peerlist_entry); 3516 } 3517 mutex_exit(wg->wg_lock); 3518 3519 if (wgp == NULL) 3520 return ENOENT; 3521 3522 psref_target_destroy(&wgp->wgp_psref, wg_psref_class); 3523 3524 wg_destroy_peer(wgp); 3525 thmap_gc(wg->wg_peers_byname, garbage_byname); 3526 thmap_gc(wg->wg_peers_bypubkey, garbage_bypubkey); 3527 3528 return 0; 3529 } 3530 3531 static int 3532 wg_if_attach(struct wg_softc *wg) 3533 { 3534 int error; 3535 3536 wg->wg_if.if_addrlen = 0; 3537 wg->wg_if.if_mtu = WG_MTU; 3538 wg->wg_if.if_flags = IFF_MULTICAST; 3539 wg->wg_if.if_extflags = IFEF_NO_LINK_STATE_CHANGE; 3540 wg->wg_if.if_extflags |= IFEF_MPSAFE; 3541 wg->wg_if.if_ioctl = wg_ioctl; 3542 wg->wg_if.if_output = wg_output; 3543 wg->wg_if.if_init = wg_init; 3544 #ifdef ALTQ 3545 wg->wg_if.if_start = wg_start; 3546 #endif 3547 wg->wg_if.if_stop = wg_stop; 3548 wg->wg_if.if_type = IFT_OTHER; 3549 wg->wg_if.if_dlt = DLT_NULL; 3550 wg->wg_if.if_softc = wg; 3551 #ifdef ALTQ 3552 IFQ_SET_READY(&wg->wg_if.if_snd); 3553 #endif 3554 3555 error = if_initialize(&wg->wg_if); 3556 if (error != 0) 3557 return error; 3558 3559 if_alloc_sadl(&wg->wg_if); 3560 if_register(&wg->wg_if); 3561 3562 bpf_attach(&wg->wg_if, DLT_NULL, sizeof(uint32_t)); 3563 3564 return 0; 3565 } 3566 3567 static void 3568 wg_if_detach(struct wg_softc *wg) 3569 { 3570 struct ifnet *ifp = &wg->wg_if; 3571 3572 bpf_detach(ifp); 3573 if_detach(ifp); 3574 } 3575 3576 static int 3577 wg_clone_create(struct if_clone *ifc, int unit) 3578 { 3579 struct wg_softc *wg; 3580 int error; 3581 3582 wg_guarantee_initialized(); 3583 3584 error = wg_count_inc(); 3585 if (error) 3586 return error; 3587 3588 wg = kmem_zalloc(sizeof(*wg), KM_SLEEP); 3589 3590 if_initname(&wg->wg_if, ifc->ifc_name, unit); 3591 3592 PSLIST_INIT(&wg->wg_peers); 3593 wg->wg_peers_bypubkey = thmap_create(0, NULL, THMAP_NOCOPY); 3594 wg->wg_peers_byname = thmap_create(0, NULL, THMAP_NOCOPY); 3595 wg->wg_sessions_byindex = thmap_create(0, NULL, THMAP_NOCOPY); 3596 wg->wg_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 3597 wg->wg_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 3598 wg->wg_rwlock = rw_obj_alloc(); 3599 threadpool_job_init(&wg->wg_job, wg_job, wg->wg_intr_lock, 3600 "%s", if_name(&wg->wg_if)); 3601 wg->wg_ops = &wg_ops_rumpkernel; 3602 3603 error = threadpool_get(&wg->wg_threadpool, PRI_NONE); 3604 if (error) 3605 goto fail0; 3606 3607 #ifdef INET 3608 error = wg_socreate(wg, AF_INET, &wg->wg_so4); 3609 if (error) 3610 goto fail1; 3611 rn_inithead((void **)&wg->wg_rtable_ipv4, 3612 offsetof(struct sockaddr_in, sin_addr) * NBBY); 3613 #endif 3614 #ifdef INET6 3615 error = wg_socreate(wg, AF_INET6, &wg->wg_so6); 3616 if (error) 3617 goto fail2; 3618 rn_inithead((void **)&wg->wg_rtable_ipv6, 3619 offsetof(struct sockaddr_in6, sin6_addr) * NBBY); 3620 #endif 3621 3622 error = wg_if_attach(wg); 3623 if (error) 3624 goto fail3; 3625 3626 return 0; 3627 3628 fail4: __unused 3629 wg_if_detach(wg); 3630 fail3: wg_destroy_all_peers(wg); 3631 #ifdef INET6 3632 solock(wg->wg_so6); 3633 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3634 sounlock(wg->wg_so6); 3635 #endif 3636 #ifdef INET 3637 solock(wg->wg_so4); 3638 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3639 sounlock(wg->wg_so4); 3640 #endif 3641 mutex_enter(wg->wg_intr_lock); 3642 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3643 mutex_exit(wg->wg_intr_lock); 3644 #ifdef INET6 3645 if (wg->wg_rtable_ipv6 != NULL) 3646 free(wg->wg_rtable_ipv6, M_RTABLE); 3647 soclose(wg->wg_so6); 3648 fail2: 3649 #endif 3650 #ifdef INET 3651 if (wg->wg_rtable_ipv4 != NULL) 3652 free(wg->wg_rtable_ipv4, M_RTABLE); 3653 soclose(wg->wg_so4); 3654 fail1: 3655 #endif 3656 threadpool_put(wg->wg_threadpool, PRI_NONE); 3657 fail0: threadpool_job_destroy(&wg->wg_job); 3658 rw_obj_free(wg->wg_rwlock); 3659 mutex_obj_free(wg->wg_intr_lock); 3660 mutex_obj_free(wg->wg_lock); 3661 thmap_destroy(wg->wg_sessions_byindex); 3662 thmap_destroy(wg->wg_peers_byname); 3663 thmap_destroy(wg->wg_peers_bypubkey); 3664 PSLIST_DESTROY(&wg->wg_peers); 3665 kmem_free(wg, sizeof(*wg)); 3666 wg_count_dec(); 3667 return error; 3668 } 3669 3670 static int 3671 wg_clone_destroy(struct ifnet *ifp) 3672 { 3673 struct wg_softc *wg = container_of(ifp, struct wg_softc, wg_if); 3674 3675 #ifdef WG_RUMPKERNEL 3676 if (wg_user_mode(wg)) { 3677 rumpuser_wg_destroy(wg->wg_user); 3678 wg->wg_user = NULL; 3679 } 3680 #endif 3681 3682 wg_if_detach(wg); 3683 wg_destroy_all_peers(wg); 3684 #ifdef INET6 3685 solock(wg->wg_so6); 3686 wg->wg_so6->so_rcv.sb_flags &= ~SB_UPCALL; 3687 sounlock(wg->wg_so6); 3688 #endif 3689 #ifdef INET 3690 solock(wg->wg_so4); 3691 wg->wg_so4->so_rcv.sb_flags &= ~SB_UPCALL; 3692 sounlock(wg->wg_so4); 3693 #endif 3694 mutex_enter(wg->wg_intr_lock); 3695 threadpool_cancel_job(wg->wg_threadpool, &wg->wg_job); 3696 mutex_exit(wg->wg_intr_lock); 3697 #ifdef INET6 3698 if (wg->wg_rtable_ipv6 != NULL) 3699 free(wg->wg_rtable_ipv6, M_RTABLE); 3700 soclose(wg->wg_so6); 3701 #endif 3702 #ifdef INET 3703 if (wg->wg_rtable_ipv4 != NULL) 3704 free(wg->wg_rtable_ipv4, M_RTABLE); 3705 soclose(wg->wg_so4); 3706 #endif 3707 threadpool_put(wg->wg_threadpool, PRI_NONE); 3708 threadpool_job_destroy(&wg->wg_job); 3709 rw_obj_free(wg->wg_rwlock); 3710 mutex_obj_free(wg->wg_intr_lock); 3711 mutex_obj_free(wg->wg_lock); 3712 thmap_destroy(wg->wg_sessions_byindex); 3713 thmap_destroy(wg->wg_peers_byname); 3714 thmap_destroy(wg->wg_peers_bypubkey); 3715 PSLIST_DESTROY(&wg->wg_peers); 3716 kmem_free(wg, sizeof(*wg)); 3717 wg_count_dec(); 3718 3719 return 0; 3720 } 3721 3722 static struct wg_peer * 3723 wg_pick_peer_by_sa(struct wg_softc *wg, const struct sockaddr *sa, 3724 struct psref *psref) 3725 { 3726 struct radix_node_head *rnh; 3727 struct radix_node *rn; 3728 struct wg_peer *wgp = NULL; 3729 struct wg_allowedip *wga; 3730 3731 #ifdef WG_DEBUG_LOG 3732 char addrstr[128]; 3733 sockaddr_format(sa, addrstr, sizeof(addrstr)); 3734 WG_DLOG("sa=%s\n", addrstr); 3735 #endif 3736 3737 rw_enter(wg->wg_rwlock, RW_READER); 3738 3739 rnh = wg_rnh(wg, sa->sa_family); 3740 if (rnh == NULL) 3741 goto out; 3742 3743 rn = rnh->rnh_matchaddr(sa, rnh); 3744 if (rn == NULL || (rn->rn_flags & RNF_ROOT) != 0) 3745 goto out; 3746 3747 WG_TRACE("success"); 3748 3749 wga = container_of(rn, struct wg_allowedip, wga_nodes[0]); 3750 wgp = wga->wga_peer; 3751 wg_get_peer(wgp, psref); 3752 3753 out: 3754 rw_exit(wg->wg_rwlock); 3755 return wgp; 3756 } 3757 3758 static void 3759 wg_fill_msg_data(struct wg_softc *wg, struct wg_peer *wgp, 3760 struct wg_session *wgs, struct wg_msg_data *wgmd) 3761 { 3762 3763 memset(wgmd, 0, sizeof(*wgmd)); 3764 wgmd->wgmd_type = htole32(WG_MSG_TYPE_DATA); 3765 wgmd->wgmd_receiver = wgs->wgs_remote_index; 3766 /* [W] 5.4.6: msg.counter := Nm^send */ 3767 /* [W] 5.4.6: Nm^send := Nm^send + 1 */ 3768 wgmd->wgmd_counter = htole64(wg_session_inc_send_counter(wgs)); 3769 WG_DLOG("counter=%"PRIu64"\n", le64toh(wgmd->wgmd_counter)); 3770 } 3771 3772 static int 3773 wg_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 3774 const struct rtentry *rt) 3775 { 3776 struct wg_softc *wg = ifp->if_softc; 3777 struct wg_peer *wgp = NULL; 3778 struct wg_session *wgs = NULL; 3779 struct psref wgp_psref, wgs_psref; 3780 int bound; 3781 int error; 3782 3783 bound = curlwp_bind(); 3784 3785 /* TODO make the nest limit configurable via sysctl */ 3786 error = if_tunnel_check_nesting(ifp, m, 1); 3787 if (error) { 3788 WGLOG(LOG_ERR, "tunneling loop detected and packet dropped\n"); 3789 goto out0; 3790 } 3791 3792 #ifdef ALTQ 3793 bool altq = atomic_load_relaxed(&ifp->if_snd.altq_flags) 3794 & ALTQF_ENABLED; 3795 if (altq) 3796 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family); 3797 #endif 3798 3799 bpf_mtap_af(ifp, dst->sa_family, m, BPF_D_OUT); 3800 3801 m->m_flags &= ~(M_BCAST|M_MCAST); 3802 3803 wgp = wg_pick_peer_by_sa(wg, dst, &wgp_psref); 3804 if (wgp == NULL) { 3805 WG_TRACE("peer not found"); 3806 error = EHOSTUNREACH; 3807 goto out0; 3808 } 3809 3810 /* Clear checksum-offload flags. */ 3811 m->m_pkthdr.csum_flags = 0; 3812 m->m_pkthdr.csum_data = 0; 3813 3814 /* Check whether there's an established session. */ 3815 wgs = wg_get_stable_session(wgp, &wgs_psref); 3816 if (wgs == NULL) { 3817 /* 3818 * No established session. If we're the first to try 3819 * sending data, schedule a handshake and queue the 3820 * packet for when the handshake is done; otherwise 3821 * just drop the packet and let the ongoing handshake 3822 * attempt continue. We could queue more data packets 3823 * but it's not clear that's worthwhile. 3824 */ 3825 if (atomic_cas_ptr(&wgp->wgp_pending, NULL, m) == NULL) { 3826 m = NULL; /* consume */ 3827 WG_TRACE("queued first packet; init handshake"); 3828 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 3829 } else { 3830 WG_TRACE("first packet already queued, dropping"); 3831 } 3832 goto out1; 3833 } 3834 3835 /* There's an established session. Toss it in the queue. */ 3836 #ifdef ALTQ 3837 if (altq) { 3838 mutex_enter(ifp->if_snd.ifq_lock); 3839 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 3840 M_SETCTX(m, wgp); 3841 ALTQ_ENQUEUE(&ifp->if_snd, m, error); 3842 m = NULL; /* consume */ 3843 } 3844 mutex_exit(ifp->if_snd.ifq_lock); 3845 if (m == NULL) { 3846 wg_start(ifp); 3847 goto out2; 3848 } 3849 } 3850 #endif 3851 kpreempt_disable(); 3852 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 3853 M_SETCTX(m, wgp); 3854 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 3855 WGLOG(LOG_ERR, "pktq full, dropping\n"); 3856 error = ENOBUFS; 3857 goto out3; 3858 } 3859 m = NULL; /* consumed */ 3860 error = 0; 3861 out3: kpreempt_enable(); 3862 3863 #ifdef ALTQ 3864 out2: 3865 #endif 3866 wg_put_session(wgs, &wgs_psref); 3867 out1: wg_put_peer(wgp, &wgp_psref); 3868 out0: if (m) 3869 m_freem(m); 3870 curlwp_bindx(bound); 3871 return error; 3872 } 3873 3874 static int 3875 wg_send_udp(struct wg_peer *wgp, struct mbuf *m) 3876 { 3877 struct psref psref; 3878 struct wg_sockaddr *wgsa; 3879 int error; 3880 struct socket *so; 3881 3882 wgsa = wg_get_endpoint_sa(wgp, &psref); 3883 so = wg_get_so_by_peer(wgp, wgsa); 3884 solock(so); 3885 if (wgsatosa(wgsa)->sa_family == AF_INET) { 3886 error = udp_send(so, m, wgsatosa(wgsa), NULL, curlwp); 3887 } else { 3888 #ifdef INET6 3889 error = udp6_output(sotoin6pcb(so), m, wgsatosin6(wgsa), 3890 NULL, curlwp); 3891 #else 3892 m_freem(m); 3893 error = EPFNOSUPPORT; 3894 #endif 3895 } 3896 sounlock(so); 3897 wg_put_sa(wgp, wgsa, &psref); 3898 3899 return error; 3900 } 3901 3902 /* Inspired by pppoe_get_mbuf */ 3903 static struct mbuf * 3904 wg_get_mbuf(size_t leading_len, size_t len) 3905 { 3906 struct mbuf *m; 3907 3908 KASSERT(leading_len <= MCLBYTES); 3909 KASSERT(len <= MCLBYTES - leading_len); 3910 3911 m = m_gethdr(M_DONTWAIT, MT_DATA); 3912 if (m == NULL) 3913 return NULL; 3914 if (len + leading_len > MHLEN) { 3915 m_clget(m, M_DONTWAIT); 3916 if ((m->m_flags & M_EXT) == 0) { 3917 m_free(m); 3918 return NULL; 3919 } 3920 } 3921 m->m_data += leading_len; 3922 m->m_pkthdr.len = m->m_len = len; 3923 3924 return m; 3925 } 3926 3927 static int 3928 wg_send_data_msg(struct wg_peer *wgp, struct wg_session *wgs, 3929 struct mbuf *m) 3930 { 3931 struct wg_softc *wg = wgp->wgp_sc; 3932 int error; 3933 size_t inner_len, padded_len, encrypted_len; 3934 char *padded_buf = NULL; 3935 size_t mlen; 3936 struct wg_msg_data *wgmd; 3937 bool free_padded_buf = false; 3938 struct mbuf *n; 3939 size_t leading_len = max_linkhdr + sizeof(struct ip6_hdr) + 3940 sizeof(struct udphdr); 3941 3942 mlen = m_length(m); 3943 inner_len = mlen; 3944 padded_len = roundup(mlen, 16); 3945 encrypted_len = padded_len + WG_AUTHTAG_LEN; 3946 WG_DLOG("inner=%lu, padded=%lu, encrypted_len=%lu\n", 3947 inner_len, padded_len, encrypted_len); 3948 if (mlen != 0) { 3949 bool success; 3950 success = m_ensure_contig(&m, padded_len); 3951 if (success) { 3952 padded_buf = mtod(m, char *); 3953 } else { 3954 padded_buf = kmem_intr_alloc(padded_len, KM_NOSLEEP); 3955 if (padded_buf == NULL) { 3956 error = ENOBUFS; 3957 goto end; 3958 } 3959 free_padded_buf = true; 3960 m_copydata(m, 0, mlen, padded_buf); 3961 } 3962 memset(padded_buf + mlen, 0, padded_len - inner_len); 3963 } 3964 3965 n = wg_get_mbuf(leading_len, sizeof(*wgmd) + encrypted_len); 3966 if (n == NULL) { 3967 error = ENOBUFS; 3968 goto end; 3969 } 3970 KASSERT(n->m_len >= sizeof(*wgmd)); 3971 wgmd = mtod(n, struct wg_msg_data *); 3972 wg_fill_msg_data(wg, wgp, wgs, wgmd); 3973 /* [W] 5.4.6: AEAD(Tm^send, Nm^send, P, e) */ 3974 wg_algo_aead_enc((char *)wgmd + sizeof(*wgmd), encrypted_len, 3975 wgs->wgs_tkey_send, le64toh(wgmd->wgmd_counter), 3976 padded_buf, padded_len, 3977 NULL, 0); 3978 3979 error = wg->wg_ops->send_data_msg(wgp, n); 3980 if (error == 0) { 3981 struct ifnet *ifp = &wg->wg_if; 3982 if_statadd(ifp, if_obytes, mlen); 3983 if_statinc(ifp, if_opackets); 3984 if (wgs->wgs_is_initiator && 3985 wgs->wgs_time_last_data_sent == 0) { 3986 /* 3987 * [W] 6.2 Transport Message Limits 3988 * "if a peer is the initiator of a current secure 3989 * session, WireGuard will send a handshake initiation 3990 * message to begin a new secure session if, after 3991 * transmitting a transport data message, the current 3992 * secure session is REKEY-AFTER-TIME seconds old," 3993 */ 3994 wg_schedule_rekey_timer(wgp); 3995 } 3996 wgs->wgs_time_last_data_sent = time_uptime; 3997 if (wg_session_get_send_counter(wgs) >= 3998 wg_rekey_after_messages) { 3999 /* 4000 * [W] 6.2 Transport Message Limits 4001 * "WireGuard will try to create a new session, by 4002 * sending a handshake initiation message (section 4003 * 5.4.2), after it has sent REKEY-AFTER-MESSAGES 4004 * transport data messages..." 4005 */ 4006 wg_schedule_peer_task(wgp, WGP_TASK_SEND_INIT_MESSAGE); 4007 } 4008 } 4009 end: 4010 m_freem(m); 4011 if (free_padded_buf) 4012 kmem_intr_free(padded_buf, padded_len); 4013 return error; 4014 } 4015 4016 static void 4017 wg_input(struct ifnet *ifp, struct mbuf *m, const int af) 4018 { 4019 pktqueue_t *pktq; 4020 size_t pktlen; 4021 4022 KASSERT(af == AF_INET || af == AF_INET6); 4023 4024 WG_TRACE(""); 4025 4026 m_set_rcvif(m, ifp); 4027 pktlen = m->m_pkthdr.len; 4028 4029 bpf_mtap_af(ifp, af, m, BPF_D_IN); 4030 4031 switch (af) { 4032 case AF_INET: 4033 pktq = ip_pktq; 4034 break; 4035 #ifdef INET6 4036 case AF_INET6: 4037 pktq = ip6_pktq; 4038 break; 4039 #endif 4040 default: 4041 panic("invalid af=%d", af); 4042 } 4043 4044 kpreempt_disable(); 4045 const u_int h = curcpu()->ci_index; 4046 if (__predict_true(pktq_enqueue(pktq, m, h))) { 4047 if_statadd(ifp, if_ibytes, pktlen); 4048 if_statinc(ifp, if_ipackets); 4049 } else { 4050 m_freem(m); 4051 } 4052 kpreempt_enable(); 4053 } 4054 4055 static void 4056 wg_calc_pubkey(uint8_t pubkey[WG_STATIC_KEY_LEN], 4057 const uint8_t privkey[WG_STATIC_KEY_LEN]) 4058 { 4059 4060 crypto_scalarmult_base(pubkey, privkey); 4061 } 4062 4063 static int 4064 wg_rtable_add_route(struct wg_softc *wg, struct wg_allowedip *wga) 4065 { 4066 struct radix_node_head *rnh; 4067 struct radix_node *rn; 4068 int error = 0; 4069 4070 rw_enter(wg->wg_rwlock, RW_WRITER); 4071 rnh = wg_rnh(wg, wga->wga_family); 4072 KASSERT(rnh != NULL); 4073 rn = rnh->rnh_addaddr(&wga->wga_sa_addr, &wga->wga_sa_mask, rnh, 4074 wga->wga_nodes); 4075 rw_exit(wg->wg_rwlock); 4076 4077 if (rn == NULL) 4078 error = EEXIST; 4079 4080 return error; 4081 } 4082 4083 static int 4084 wg_handle_prop_peer(struct wg_softc *wg, prop_dictionary_t peer, 4085 struct wg_peer **wgpp) 4086 { 4087 int error = 0; 4088 const void *pubkey; 4089 size_t pubkey_len; 4090 const void *psk; 4091 size_t psk_len; 4092 const char *name = NULL; 4093 4094 if (prop_dictionary_get_string(peer, "name", &name)) { 4095 if (strlen(name) > WG_PEER_NAME_MAXLEN) { 4096 error = EINVAL; 4097 goto out; 4098 } 4099 } 4100 4101 if (!prop_dictionary_get_data(peer, "public_key", 4102 &pubkey, &pubkey_len)) { 4103 error = EINVAL; 4104 goto out; 4105 } 4106 #ifdef WG_DEBUG_DUMP 4107 { 4108 char *hex = gethexdump(pubkey, pubkey_len); 4109 log(LOG_DEBUG, "pubkey=%p, pubkey_len=%lu\n%s\n", 4110 pubkey, pubkey_len, hex); 4111 puthexdump(hex, pubkey, pubkey_len); 4112 } 4113 #endif 4114 4115 struct wg_peer *wgp = wg_alloc_peer(wg); 4116 memcpy(wgp->wgp_pubkey, pubkey, sizeof(wgp->wgp_pubkey)); 4117 if (name != NULL) 4118 strncpy(wgp->wgp_name, name, sizeof(wgp->wgp_name)); 4119 4120 if (prop_dictionary_get_data(peer, "preshared_key", &psk, &psk_len)) { 4121 if (psk_len != sizeof(wgp->wgp_psk)) { 4122 error = EINVAL; 4123 goto out; 4124 } 4125 memcpy(wgp->wgp_psk, psk, sizeof(wgp->wgp_psk)); 4126 } 4127 4128 const void *addr; 4129 size_t addr_len; 4130 struct wg_sockaddr *wgsa = wgp->wgp_endpoint; 4131 4132 if (!prop_dictionary_get_data(peer, "endpoint", &addr, &addr_len)) 4133 goto skip_endpoint; 4134 if (addr_len < sizeof(*wgsatosa(wgsa)) || 4135 addr_len > sizeof(*wgsatoss(wgsa))) { 4136 error = EINVAL; 4137 goto out; 4138 } 4139 memcpy(wgsatoss(wgsa), addr, addr_len); 4140 switch (wgsa_family(wgsa)) { 4141 case AF_INET: 4142 #ifdef INET6 4143 case AF_INET6: 4144 #endif 4145 break; 4146 default: 4147 error = EPFNOSUPPORT; 4148 goto out; 4149 } 4150 if (addr_len != sockaddr_getsize_by_family(wgsa_family(wgsa))) { 4151 error = EINVAL; 4152 goto out; 4153 } 4154 { 4155 char addrstr[128]; 4156 sockaddr_format(wgsatosa(wgsa), addrstr, sizeof(addrstr)); 4157 WG_DLOG("addr=%s\n", addrstr); 4158 } 4159 wgp->wgp_endpoint_available = true; 4160 4161 prop_array_t allowedips; 4162 skip_endpoint: 4163 allowedips = prop_dictionary_get(peer, "allowedips"); 4164 if (allowedips == NULL) 4165 goto skip; 4166 4167 prop_object_iterator_t _it = prop_array_iterator(allowedips); 4168 prop_dictionary_t prop_allowedip; 4169 int j = 0; 4170 while ((prop_allowedip = prop_object_iterator_next(_it)) != NULL) { 4171 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4172 4173 if (!prop_dictionary_get_int(prop_allowedip, "family", 4174 &wga->wga_family)) 4175 continue; 4176 if (!prop_dictionary_get_data(prop_allowedip, "ip", 4177 &addr, &addr_len)) 4178 continue; 4179 if (!prop_dictionary_get_uint8(prop_allowedip, "cidr", 4180 &wga->wga_cidr)) 4181 continue; 4182 4183 switch (wga->wga_family) { 4184 case AF_INET: { 4185 struct sockaddr_in sin; 4186 char addrstr[128]; 4187 struct in_addr mask; 4188 struct sockaddr_in sin_mask; 4189 4190 if (addr_len != sizeof(struct in_addr)) 4191 return EINVAL; 4192 memcpy(&wga->wga_addr4, addr, addr_len); 4193 4194 sockaddr_in_init(&sin, (const struct in_addr *)addr, 4195 0); 4196 sockaddr_copy(&wga->wga_sa_addr, 4197 sizeof(sin), sintosa(&sin)); 4198 4199 sockaddr_format(sintosa(&sin), 4200 addrstr, sizeof(addrstr)); 4201 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4202 4203 in_len2mask(&mask, wga->wga_cidr); 4204 sockaddr_in_init(&sin_mask, &mask, 0); 4205 sockaddr_copy(&wga->wga_sa_mask, 4206 sizeof(sin_mask), sintosa(&sin_mask)); 4207 4208 break; 4209 } 4210 #ifdef INET6 4211 case AF_INET6: { 4212 struct sockaddr_in6 sin6; 4213 char addrstr[128]; 4214 struct in6_addr mask; 4215 struct sockaddr_in6 sin6_mask; 4216 4217 if (addr_len != sizeof(struct in6_addr)) 4218 return EINVAL; 4219 memcpy(&wga->wga_addr6, addr, addr_len); 4220 4221 sockaddr_in6_init(&sin6, (const struct in6_addr *)addr, 4222 0, 0, 0); 4223 sockaddr_copy(&wga->wga_sa_addr, 4224 sizeof(sin6), sin6tosa(&sin6)); 4225 4226 sockaddr_format(sin6tosa(&sin6), 4227 addrstr, sizeof(addrstr)); 4228 WG_DLOG("addr=%s/%d\n", addrstr, wga->wga_cidr); 4229 4230 in6_prefixlen2mask(&mask, wga->wga_cidr); 4231 sockaddr_in6_init(&sin6_mask, &mask, 0, 0, 0); 4232 sockaddr_copy(&wga->wga_sa_mask, 4233 sizeof(sin6_mask), sin6tosa(&sin6_mask)); 4234 4235 break; 4236 } 4237 #endif 4238 default: 4239 error = EINVAL; 4240 goto out; 4241 } 4242 wga->wga_peer = wgp; 4243 4244 error = wg_rtable_add_route(wg, wga); 4245 if (error != 0) 4246 goto out; 4247 4248 j++; 4249 } 4250 wgp->wgp_n_allowedips = j; 4251 skip: 4252 *wgpp = wgp; 4253 out: 4254 return error; 4255 } 4256 4257 static int 4258 wg_alloc_prop_buf(char **_buf, struct ifdrv *ifd) 4259 { 4260 int error; 4261 char *buf; 4262 4263 WG_DLOG("buf=%p, len=%lu\n", ifd->ifd_data, ifd->ifd_len); 4264 buf = kmem_alloc(ifd->ifd_len + 1, KM_SLEEP); 4265 error = copyin(ifd->ifd_data, buf, ifd->ifd_len); 4266 if (error != 0) 4267 return error; 4268 buf[ifd->ifd_len] = '\0'; 4269 #ifdef WG_DEBUG_DUMP 4270 log(LOG_DEBUG, "%.*s\n", 4271 (int)MIN(INT_MAX, ifd->ifd_len), 4272 (const char *)buf); 4273 #endif 4274 *_buf = buf; 4275 return 0; 4276 } 4277 4278 static int 4279 wg_ioctl_set_private_key(struct wg_softc *wg, struct ifdrv *ifd) 4280 { 4281 int error; 4282 prop_dictionary_t prop_dict; 4283 char *buf = NULL; 4284 const void *privkey; 4285 size_t privkey_len; 4286 4287 error = wg_alloc_prop_buf(&buf, ifd); 4288 if (error != 0) 4289 return error; 4290 error = EINVAL; 4291 prop_dict = prop_dictionary_internalize(buf); 4292 if (prop_dict == NULL) 4293 goto out; 4294 if (!prop_dictionary_get_data(prop_dict, "private_key", 4295 &privkey, &privkey_len)) 4296 goto out; 4297 #ifdef WG_DEBUG_DUMP 4298 { 4299 char *hex = gethexdump(privkey, privkey_len); 4300 log(LOG_DEBUG, "privkey=%p, privkey_len=%lu\n%s\n", 4301 privkey, privkey_len, hex); 4302 puthexdump(hex, privkey, privkey_len); 4303 } 4304 #endif 4305 if (privkey_len != WG_STATIC_KEY_LEN) 4306 goto out; 4307 memcpy(wg->wg_privkey, privkey, WG_STATIC_KEY_LEN); 4308 wg_calc_pubkey(wg->wg_pubkey, wg->wg_privkey); 4309 error = 0; 4310 4311 out: 4312 kmem_free(buf, ifd->ifd_len + 1); 4313 return error; 4314 } 4315 4316 static int 4317 wg_ioctl_set_listen_port(struct wg_softc *wg, struct ifdrv *ifd) 4318 { 4319 int error; 4320 prop_dictionary_t prop_dict; 4321 char *buf = NULL; 4322 uint16_t port; 4323 4324 error = wg_alloc_prop_buf(&buf, ifd); 4325 if (error != 0) 4326 return error; 4327 error = EINVAL; 4328 prop_dict = prop_dictionary_internalize(buf); 4329 if (prop_dict == NULL) 4330 goto out; 4331 if (!prop_dictionary_get_uint16(prop_dict, "listen_port", &port)) 4332 goto out; 4333 4334 error = wg->wg_ops->bind_port(wg, (uint16_t)port); 4335 4336 out: 4337 kmem_free(buf, ifd->ifd_len + 1); 4338 return error; 4339 } 4340 4341 static int 4342 wg_ioctl_add_peer(struct wg_softc *wg, struct ifdrv *ifd) 4343 { 4344 int error; 4345 prop_dictionary_t prop_dict; 4346 char *buf = NULL; 4347 struct wg_peer *wgp = NULL, *wgp0 __diagused; 4348 4349 error = wg_alloc_prop_buf(&buf, ifd); 4350 if (error != 0) 4351 return error; 4352 error = EINVAL; 4353 prop_dict = prop_dictionary_internalize(buf); 4354 if (prop_dict == NULL) 4355 goto out; 4356 4357 error = wg_handle_prop_peer(wg, prop_dict, &wgp); 4358 if (error != 0) 4359 goto out; 4360 4361 mutex_enter(wg->wg_lock); 4362 if (thmap_get(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4363 sizeof(wgp->wgp_pubkey)) != NULL || 4364 (wgp->wgp_name[0] && 4365 thmap_get(wg->wg_peers_byname, wgp->wgp_name, 4366 strlen(wgp->wgp_name)) != NULL)) { 4367 mutex_exit(wg->wg_lock); 4368 wg_destroy_peer(wgp); 4369 error = EEXIST; 4370 goto out; 4371 } 4372 wgp0 = thmap_put(wg->wg_peers_bypubkey, wgp->wgp_pubkey, 4373 sizeof(wgp->wgp_pubkey), wgp); 4374 KASSERT(wgp0 == wgp); 4375 if (wgp->wgp_name[0]) { 4376 wgp0 = thmap_put(wg->wg_peers_byname, wgp->wgp_name, 4377 strlen(wgp->wgp_name), wgp); 4378 KASSERT(wgp0 == wgp); 4379 } 4380 WG_PEER_WRITER_INSERT_HEAD(wgp, wg); 4381 wg->wg_npeers++; 4382 mutex_exit(wg->wg_lock); 4383 4384 out: 4385 kmem_free(buf, ifd->ifd_len + 1); 4386 return error; 4387 } 4388 4389 static int 4390 wg_ioctl_delete_peer(struct wg_softc *wg, struct ifdrv *ifd) 4391 { 4392 int error; 4393 prop_dictionary_t prop_dict; 4394 char *buf = NULL; 4395 const char *name; 4396 4397 error = wg_alloc_prop_buf(&buf, ifd); 4398 if (error != 0) 4399 return error; 4400 error = EINVAL; 4401 prop_dict = prop_dictionary_internalize(buf); 4402 if (prop_dict == NULL) 4403 goto out; 4404 4405 if (!prop_dictionary_get_string(prop_dict, "name", &name)) 4406 goto out; 4407 if (strlen(name) > WG_PEER_NAME_MAXLEN) 4408 goto out; 4409 4410 error = wg_destroy_peer_name(wg, name); 4411 out: 4412 kmem_free(buf, ifd->ifd_len + 1); 4413 return error; 4414 } 4415 4416 static int 4417 wg_ioctl_get(struct wg_softc *wg, struct ifdrv *ifd) 4418 { 4419 int error = ENOMEM; 4420 prop_dictionary_t prop_dict; 4421 prop_array_t peers = NULL; 4422 char *buf; 4423 struct wg_peer *wgp; 4424 int s, i; 4425 4426 prop_dict = prop_dictionary_create(); 4427 if (prop_dict == NULL) 4428 goto error; 4429 4430 if (!prop_dictionary_set_data(prop_dict, "private_key", wg->wg_privkey, 4431 WG_STATIC_KEY_LEN)) 4432 goto error; 4433 4434 if (wg->wg_listen_port != 0) { 4435 if (!prop_dictionary_set_uint16(prop_dict, "listen_port", 4436 wg->wg_listen_port)) 4437 goto error; 4438 } 4439 4440 if (wg->wg_npeers == 0) 4441 goto skip_peers; 4442 4443 peers = prop_array_create(); 4444 if (peers == NULL) 4445 goto error; 4446 4447 s = pserialize_read_enter(); 4448 i = 0; 4449 WG_PEER_READER_FOREACH(wgp, wg) { 4450 struct wg_sockaddr *wgsa; 4451 struct psref wgp_psref, wgsa_psref; 4452 prop_dictionary_t prop_peer; 4453 4454 wg_get_peer(wgp, &wgp_psref); 4455 pserialize_read_exit(s); 4456 4457 prop_peer = prop_dictionary_create(); 4458 if (prop_peer == NULL) 4459 goto next; 4460 4461 if (strlen(wgp->wgp_name) > 0) { 4462 if (!prop_dictionary_set_string(prop_peer, "name", 4463 wgp->wgp_name)) 4464 goto next; 4465 } 4466 4467 if (!prop_dictionary_set_data(prop_peer, "public_key", 4468 wgp->wgp_pubkey, sizeof(wgp->wgp_pubkey))) 4469 goto next; 4470 4471 uint8_t psk_zero[WG_PRESHARED_KEY_LEN] = {0}; 4472 if (!consttime_memequal(wgp->wgp_psk, psk_zero, 4473 sizeof(wgp->wgp_psk))) { 4474 if (!prop_dictionary_set_data(prop_peer, 4475 "preshared_key", 4476 wgp->wgp_psk, sizeof(wgp->wgp_psk))) 4477 goto next; 4478 } 4479 4480 wgsa = wg_get_endpoint_sa(wgp, &wgsa_psref); 4481 CTASSERT(AF_UNSPEC == 0); 4482 if (wgsa_family(wgsa) != 0 /*AF_UNSPEC*/ && 4483 !prop_dictionary_set_data(prop_peer, "endpoint", 4484 wgsatoss(wgsa), 4485 sockaddr_getsize_by_family(wgsa_family(wgsa)))) { 4486 wg_put_sa(wgp, wgsa, &wgsa_psref); 4487 goto next; 4488 } 4489 wg_put_sa(wgp, wgsa, &wgsa_psref); 4490 4491 const struct timespec *t = &wgp->wgp_last_handshake_time; 4492 4493 if (!prop_dictionary_set_uint64(prop_peer, 4494 "last_handshake_time_sec", t->tv_sec)) 4495 goto next; 4496 if (!prop_dictionary_set_uint32(prop_peer, 4497 "last_handshake_time_nsec", t->tv_nsec)) 4498 goto next; 4499 4500 if (wgp->wgp_n_allowedips == 0) 4501 goto skip_allowedips; 4502 4503 prop_array_t allowedips = prop_array_create(); 4504 if (allowedips == NULL) 4505 goto next; 4506 for (int j = 0; j < wgp->wgp_n_allowedips; j++) { 4507 struct wg_allowedip *wga = &wgp->wgp_allowedips[j]; 4508 prop_dictionary_t prop_allowedip; 4509 4510 prop_allowedip = prop_dictionary_create(); 4511 if (prop_allowedip == NULL) 4512 break; 4513 4514 if (!prop_dictionary_set_int(prop_allowedip, "family", 4515 wga->wga_family)) 4516 goto _next; 4517 if (!prop_dictionary_set_uint8(prop_allowedip, "cidr", 4518 wga->wga_cidr)) 4519 goto _next; 4520 4521 switch (wga->wga_family) { 4522 case AF_INET: 4523 if (!prop_dictionary_set_data(prop_allowedip, 4524 "ip", &wga->wga_addr4, 4525 sizeof(wga->wga_addr4))) 4526 goto _next; 4527 break; 4528 #ifdef INET6 4529 case AF_INET6: 4530 if (!prop_dictionary_set_data(prop_allowedip, 4531 "ip", &wga->wga_addr6, 4532 sizeof(wga->wga_addr6))) 4533 goto _next; 4534 break; 4535 #endif 4536 default: 4537 break; 4538 } 4539 prop_array_set(allowedips, j, prop_allowedip); 4540 _next: 4541 prop_object_release(prop_allowedip); 4542 } 4543 prop_dictionary_set(prop_peer, "allowedips", allowedips); 4544 prop_object_release(allowedips); 4545 4546 skip_allowedips: 4547 4548 prop_array_set(peers, i, prop_peer); 4549 next: 4550 if (prop_peer) 4551 prop_object_release(prop_peer); 4552 i++; 4553 4554 s = pserialize_read_enter(); 4555 wg_put_peer(wgp, &wgp_psref); 4556 } 4557 pserialize_read_exit(s); 4558 4559 prop_dictionary_set(prop_dict, "peers", peers); 4560 prop_object_release(peers); 4561 peers = NULL; 4562 4563 skip_peers: 4564 buf = prop_dictionary_externalize(prop_dict); 4565 if (buf == NULL) 4566 goto error; 4567 if (ifd->ifd_len < (strlen(buf) + 1)) { 4568 error = EINVAL; 4569 goto error; 4570 } 4571 error = copyout(buf, ifd->ifd_data, strlen(buf) + 1); 4572 4573 free(buf, 0); 4574 error: 4575 if (peers != NULL) 4576 prop_object_release(peers); 4577 if (prop_dict != NULL) 4578 prop_object_release(prop_dict); 4579 4580 return error; 4581 } 4582 4583 static int 4584 wg_ioctl(struct ifnet *ifp, u_long cmd, void *data) 4585 { 4586 struct wg_softc *wg = ifp->if_softc; 4587 struct ifreq *ifr = data; 4588 struct ifaddr *ifa = data; 4589 struct ifdrv *ifd = data; 4590 int error = 0; 4591 4592 switch (cmd) { 4593 case SIOCINITIFADDR: 4594 if (ifa->ifa_addr->sa_family != AF_LINK && 4595 (ifp->if_flags & (IFF_UP | IFF_RUNNING)) != 4596 (IFF_UP | IFF_RUNNING)) { 4597 ifp->if_flags |= IFF_UP; 4598 error = ifp->if_init(ifp); 4599 } 4600 return error; 4601 case SIOCADDMULTI: 4602 case SIOCDELMULTI: 4603 switch (ifr->ifr_addr.sa_family) { 4604 case AF_INET: /* IP supports Multicast */ 4605 break; 4606 #ifdef INET6 4607 case AF_INET6: /* IP6 supports Multicast */ 4608 break; 4609 #endif 4610 default: /* Other protocols doesn't support Multicast */ 4611 error = EAFNOSUPPORT; 4612 break; 4613 } 4614 return error; 4615 case SIOCSDRVSPEC: 4616 switch (ifd->ifd_cmd) { 4617 case WG_IOCTL_SET_PRIVATE_KEY: 4618 error = wg_ioctl_set_private_key(wg, ifd); 4619 break; 4620 case WG_IOCTL_SET_LISTEN_PORT: 4621 error = wg_ioctl_set_listen_port(wg, ifd); 4622 break; 4623 case WG_IOCTL_ADD_PEER: 4624 error = wg_ioctl_add_peer(wg, ifd); 4625 break; 4626 case WG_IOCTL_DELETE_PEER: 4627 error = wg_ioctl_delete_peer(wg, ifd); 4628 break; 4629 default: 4630 error = EINVAL; 4631 break; 4632 } 4633 return error; 4634 case SIOCGDRVSPEC: 4635 return wg_ioctl_get(wg, ifd); 4636 case SIOCSIFFLAGS: 4637 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 4638 break; 4639 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 4640 case IFF_RUNNING: 4641 /* 4642 * If interface is marked down and it is running, 4643 * then stop and disable it. 4644 */ 4645 (*ifp->if_stop)(ifp, 1); 4646 break; 4647 case IFF_UP: 4648 /* 4649 * If interface is marked up and it is stopped, then 4650 * start it. 4651 */ 4652 error = (*ifp->if_init)(ifp); 4653 break; 4654 default: 4655 break; 4656 } 4657 return error; 4658 #ifdef WG_RUMPKERNEL 4659 case SIOCSLINKSTR: 4660 error = wg_ioctl_linkstr(wg, ifd); 4661 if (error == 0) 4662 wg->wg_ops = &wg_ops_rumpuser; 4663 return error; 4664 #endif 4665 default: 4666 break; 4667 } 4668 4669 error = ifioctl_common(ifp, cmd, data); 4670 4671 #ifdef WG_RUMPKERNEL 4672 if (!wg_user_mode(wg)) 4673 return error; 4674 4675 /* Do the same to the corresponding tun device on the host */ 4676 /* 4677 * XXX Actually the command has not been handled yet. It 4678 * will be handled via pr_ioctl form doifioctl later. 4679 */ 4680 switch (cmd) { 4681 case SIOCAIFADDR: 4682 case SIOCDIFADDR: { 4683 struct in_aliasreq _ifra = *(const struct in_aliasreq *)data; 4684 struct in_aliasreq *ifra = &_ifra; 4685 KASSERT(error == ENOTTY); 4686 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4687 IFNAMSIZ); 4688 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET); 4689 if (error == 0) 4690 error = ENOTTY; 4691 break; 4692 } 4693 #ifdef INET6 4694 case SIOCAIFADDR_IN6: 4695 case SIOCDIFADDR_IN6: { 4696 struct in6_aliasreq _ifra = *(const struct in6_aliasreq *)data; 4697 struct in6_aliasreq *ifra = &_ifra; 4698 KASSERT(error == ENOTTY); 4699 strncpy(ifra->ifra_name, rumpuser_wg_get_tunname(wg->wg_user), 4700 IFNAMSIZ); 4701 error = rumpuser_wg_ioctl(wg->wg_user, cmd, ifra, AF_INET6); 4702 if (error == 0) 4703 error = ENOTTY; 4704 break; 4705 } 4706 #endif 4707 } 4708 #endif /* WG_RUMPKERNEL */ 4709 4710 return error; 4711 } 4712 4713 static int 4714 wg_init(struct ifnet *ifp) 4715 { 4716 4717 ifp->if_flags |= IFF_RUNNING; 4718 4719 /* TODO flush pending packets. */ 4720 return 0; 4721 } 4722 4723 #ifdef ALTQ 4724 static void 4725 wg_start(struct ifnet *ifp) 4726 { 4727 struct mbuf *m; 4728 4729 for (;;) { 4730 IFQ_DEQUEUE(&ifp->if_snd, m); 4731 if (m == NULL) 4732 break; 4733 4734 kpreempt_disable(); 4735 const uint32_t h = curcpu()->ci_index; // pktq_rps_hash(m) 4736 if (__predict_false(!pktq_enqueue(wg_pktq, m, h))) { 4737 WGLOG(LOG_ERR, "pktq full, dropping\n"); 4738 m_freem(m); 4739 } 4740 kpreempt_enable(); 4741 } 4742 } 4743 #endif 4744 4745 static void 4746 wg_stop(struct ifnet *ifp, int disable) 4747 { 4748 4749 KASSERT((ifp->if_flags & IFF_RUNNING) != 0); 4750 ifp->if_flags &= ~IFF_RUNNING; 4751 4752 /* Need to do something? */ 4753 } 4754 4755 #ifdef WG_DEBUG_PARAMS 4756 SYSCTL_SETUP(sysctl_net_wg_setup, "sysctl net.wg setup") 4757 { 4758 const struct sysctlnode *node = NULL; 4759 4760 sysctl_createv(clog, 0, NULL, &node, 4761 CTLFLAG_PERMANENT, 4762 CTLTYPE_NODE, "wg", 4763 SYSCTL_DESCR("wg(4)"), 4764 NULL, 0, NULL, 0, 4765 CTL_NET, CTL_CREATE, CTL_EOL); 4766 sysctl_createv(clog, 0, &node, NULL, 4767 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4768 CTLTYPE_QUAD, "rekey_after_messages", 4769 SYSCTL_DESCR("session liftime by messages"), 4770 NULL, 0, &wg_rekey_after_messages, 0, CTL_CREATE, CTL_EOL); 4771 sysctl_createv(clog, 0, &node, NULL, 4772 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4773 CTLTYPE_INT, "rekey_after_time", 4774 SYSCTL_DESCR("session liftime"), 4775 NULL, 0, &wg_rekey_after_time, 0, CTL_CREATE, CTL_EOL); 4776 sysctl_createv(clog, 0, &node, NULL, 4777 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4778 CTLTYPE_INT, "rekey_timeout", 4779 SYSCTL_DESCR("session handshake retry time"), 4780 NULL, 0, &wg_rekey_timeout, 0, CTL_CREATE, CTL_EOL); 4781 sysctl_createv(clog, 0, &node, NULL, 4782 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4783 CTLTYPE_INT, "rekey_attempt_time", 4784 SYSCTL_DESCR("session handshake timeout"), 4785 NULL, 0, &wg_rekey_attempt_time, 0, CTL_CREATE, CTL_EOL); 4786 sysctl_createv(clog, 0, &node, NULL, 4787 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4788 CTLTYPE_INT, "keepalive_timeout", 4789 SYSCTL_DESCR("keepalive timeout"), 4790 NULL, 0, &wg_keepalive_timeout, 0, CTL_CREATE, CTL_EOL); 4791 sysctl_createv(clog, 0, &node, NULL, 4792 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 4793 CTLTYPE_BOOL, "force_underload", 4794 SYSCTL_DESCR("force to detemine under load"), 4795 NULL, 0, &wg_force_underload, 0, CTL_CREATE, CTL_EOL); 4796 } 4797 #endif 4798 4799 #ifdef WG_RUMPKERNEL 4800 static bool 4801 wg_user_mode(struct wg_softc *wg) 4802 { 4803 4804 return wg->wg_user != NULL; 4805 } 4806 4807 static int 4808 wg_ioctl_linkstr(struct wg_softc *wg, struct ifdrv *ifd) 4809 { 4810 struct ifnet *ifp = &wg->wg_if; 4811 int error; 4812 4813 if (ifp->if_flags & IFF_UP) 4814 return EBUSY; 4815 4816 if (ifd->ifd_cmd == IFLINKSTR_UNSET) { 4817 /* XXX do nothing */ 4818 return 0; 4819 } else if (ifd->ifd_cmd != 0) { 4820 return EINVAL; 4821 } else if (wg->wg_user != NULL) { 4822 return EBUSY; 4823 } 4824 4825 /* Assume \0 included */ 4826 if (ifd->ifd_len > IFNAMSIZ) { 4827 return E2BIG; 4828 } else if (ifd->ifd_len < 1) { 4829 return EINVAL; 4830 } 4831 4832 char tun_name[IFNAMSIZ]; 4833 error = copyinstr(ifd->ifd_data, tun_name, ifd->ifd_len, NULL); 4834 if (error != 0) 4835 return error; 4836 4837 if (strncmp(tun_name, "tun", 3) != 0) 4838 return EINVAL; 4839 4840 error = rumpuser_wg_create(tun_name, wg, &wg->wg_user); 4841 4842 return error; 4843 } 4844 4845 static int 4846 wg_send_user(struct wg_peer *wgp, struct mbuf *m) 4847 { 4848 int error; 4849 struct psref psref; 4850 struct wg_sockaddr *wgsa; 4851 struct wg_softc *wg = wgp->wgp_sc; 4852 struct iovec iov[1]; 4853 4854 wgsa = wg_get_endpoint_sa(wgp, &psref); 4855 4856 iov[0].iov_base = mtod(m, void *); 4857 iov[0].iov_len = m->m_len; 4858 4859 /* Send messages to a peer via an ordinary socket. */ 4860 error = rumpuser_wg_send_peer(wg->wg_user, wgsatosa(wgsa), iov, 1); 4861 4862 wg_put_sa(wgp, wgsa, &psref); 4863 4864 m_freem(m); 4865 4866 return error; 4867 } 4868 4869 static void 4870 wg_input_user(struct ifnet *ifp, struct mbuf *m, const int af) 4871 { 4872 struct wg_softc *wg = ifp->if_softc; 4873 struct iovec iov[2]; 4874 struct sockaddr_storage ss; 4875 4876 KASSERT(af == AF_INET || af == AF_INET6); 4877 4878 WG_TRACE(""); 4879 4880 if (af == AF_INET) { 4881 struct sockaddr_in *sin = (struct sockaddr_in *)&ss; 4882 struct ip *ip; 4883 4884 KASSERT(m->m_len >= sizeof(struct ip)); 4885 ip = mtod(m, struct ip *); 4886 sockaddr_in_init(sin, &ip->ip_dst, 0); 4887 } else { 4888 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss; 4889 struct ip6_hdr *ip6; 4890 4891 KASSERT(m->m_len >= sizeof(struct ip6_hdr)); 4892 ip6 = mtod(m, struct ip6_hdr *); 4893 sockaddr_in6_init(sin6, &ip6->ip6_dst, 0, 0, 0); 4894 } 4895 4896 iov[0].iov_base = &ss; 4897 iov[0].iov_len = ss.ss_len; 4898 iov[1].iov_base = mtod(m, void *); 4899 iov[1].iov_len = m->m_len; 4900 4901 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 4902 4903 /* Send decrypted packets to users via a tun. */ 4904 rumpuser_wg_send_user(wg->wg_user, iov, 2); 4905 4906 m_freem(m); 4907 } 4908 4909 static int 4910 wg_bind_port_user(struct wg_softc *wg, const uint16_t port) 4911 { 4912 int error; 4913 uint16_t old_port = wg->wg_listen_port; 4914 4915 if (port != 0 && old_port == port) 4916 return 0; 4917 4918 error = rumpuser_wg_sock_bind(wg->wg_user, port); 4919 if (error == 0) 4920 wg->wg_listen_port = port; 4921 return error; 4922 } 4923 4924 /* 4925 * Receive user packets. 4926 */ 4927 void 4928 rumpkern_wg_recv_user(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 4929 { 4930 struct ifnet *ifp = &wg->wg_if; 4931 struct mbuf *m; 4932 const struct sockaddr *dst; 4933 4934 WG_TRACE(""); 4935 4936 dst = iov[0].iov_base; 4937 4938 m = m_gethdr(M_DONTWAIT, MT_DATA); 4939 if (m == NULL) 4940 return; 4941 m->m_len = m->m_pkthdr.len = 0; 4942 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 4943 4944 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 4945 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 4946 4947 (void)wg_output(ifp, m, dst, NULL); 4948 } 4949 4950 /* 4951 * Receive packets from a peer. 4952 */ 4953 void 4954 rumpkern_wg_recv_peer(struct wg_softc *wg, struct iovec *iov, size_t iovlen) 4955 { 4956 struct mbuf *m; 4957 const struct sockaddr *src; 4958 4959 WG_TRACE(""); 4960 4961 src = iov[0].iov_base; 4962 4963 m = m_gethdr(M_DONTWAIT, MT_DATA); 4964 if (m == NULL) 4965 return; 4966 m->m_len = m->m_pkthdr.len = 0; 4967 m_copyback(m, 0, iov[1].iov_len, iov[1].iov_base); 4968 4969 WG_DLOG("iov_len=%lu\n", iov[1].iov_len); 4970 WG_DUMP_BUF(iov[1].iov_base, iov[1].iov_len); 4971 4972 wg_handle_packet(wg, m, src); 4973 } 4974 #endif /* WG_RUMPKERNEL */ 4975 4976 /* 4977 * Module infrastructure 4978 */ 4979 #include "if_module.h" 4980 4981 IF_MODULE(MODULE_CLASS_DRIVER, wg, "") 4982