1 /* SPDX-License-Identifier: ISC 2 * 3 * Copyright (C) 2015-2021 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. 4 * Copyright (C) 2019-2021 Matt Dunwoodie <ncon@noconroy.net> 5 */ 6 7 #include "opt_inet.h" 8 #include "opt_inet6.h" 9 10 #include <sys/param.h> 11 #include <sys/systm.h> 12 #include <sys/kernel.h> 13 #include <sys/lock.h> 14 #include <sys/objcache.h> 15 #include <sys/queue.h> 16 #include <sys/socket.h> 17 #include <sys/time.h> 18 #include <netinet/in.h> 19 20 #include <crypto/chachapoly.h> 21 #include <crypto/blake2/blake2s.h> 22 #include <crypto/siphash/siphash.h> 23 24 #include "wg_cookie.h" 25 26 #define COOKIE_MAC1_KEY_LABEL "mac1----" 27 #define COOKIE_COOKIE_KEY_LABEL "cookie--" 28 #define COOKIE_SECRET_MAX_AGE 120 29 #define COOKIE_SECRET_LATENCY 5 30 31 /* Constants for initiation rate limiting */ 32 #define RATELIMIT_SIZE (1 << 13) 33 #define RATELIMIT_MASK (RATELIMIT_SIZE - 1) 34 #define RATELIMIT_SIZE_MAX (RATELIMIT_SIZE * 8) 35 #define NSEC_PER_SEC 1000000000LL 36 #define INITIATIONS_PER_SECOND 20 37 #define INITIATIONS_BURSTABLE 5 38 #define INITIATION_COST (NSEC_PER_SEC / INITIATIONS_PER_SECOND) 39 #define TOKEN_MAX (INITIATION_COST * INITIATIONS_BURSTABLE) 40 #define ELEMENT_TIMEOUT 1 41 #define IPV4_MASK_SIZE 4 /* Use all 4 bytes of IPv4 address */ 42 #define IPV6_MASK_SIZE 8 /* Use top 8 bytes (/64) of IPv6 address */ 43 44 struct ratelimit_key { 45 uint8_t ip[IPV6_MASK_SIZE]; 46 }; 47 48 struct ratelimit_entry { 49 LIST_ENTRY(ratelimit_entry) r_entry; 50 struct ratelimit_key r_key; 51 struct timespec r_last_time; /* nanouptime */ 52 uint64_t r_tokens; 53 }; 54 55 struct ratelimit { 56 uint8_t rl_secret[SIPHASH_KEY_LENGTH]; 57 struct lock rl_mtx; 58 struct callout rl_gc; 59 LIST_HEAD(, ratelimit_entry) rl_table[RATELIMIT_SIZE]; 60 size_t rl_table_num; 61 bool rl_initialized; 62 }; 63 64 static void precompute_key(uint8_t *, 65 const uint8_t[COOKIE_INPUT_SIZE], const char *); 66 static void macs_mac1(struct cookie_macs *, const void *, size_t, 67 const uint8_t[COOKIE_KEY_SIZE]); 68 static void macs_mac2(struct cookie_macs *, const void *, size_t, 69 const uint8_t[COOKIE_COOKIE_SIZE]); 70 static int timer_expired(struct timespec *, time_t, long); 71 static void make_cookie(struct cookie_checker *, 72 uint8_t[COOKIE_COOKIE_SIZE], struct sockaddr *); 73 static void ratelimit_init(struct ratelimit *); 74 static void ratelimit_deinit(struct ratelimit *); 75 static void ratelimit_gc_callout(void *); 76 static void ratelimit_gc_schedule(struct ratelimit *); 77 static void ratelimit_gc(struct ratelimit *, bool); 78 static int ratelimit_allow(struct ratelimit *, struct sockaddr *); 79 static uint64_t siphash13(const uint8_t [SIPHASH_KEY_LENGTH], const void *, size_t); 80 81 static struct ratelimit ratelimit_v4; 82 #ifdef INET6 83 static struct ratelimit ratelimit_v6; 84 #endif 85 86 static struct objcache *ratelimit_zone; 87 MALLOC_DEFINE(M_WG_RATELIMIT, "WG ratelimit", "wireguard ratelimit"); 88 89 /* Public Functions */ 90 int 91 cookie_init(void) 92 { 93 ratelimit_zone = objcache_create_simple(M_WG_RATELIMIT, 94 sizeof(struct ratelimit_entry)); 95 if (ratelimit_zone == NULL) 96 return ENOMEM; 97 98 ratelimit_init(&ratelimit_v4); 99 #ifdef INET6 100 ratelimit_init(&ratelimit_v6); 101 #endif 102 return (0); 103 } 104 105 void 106 cookie_deinit(void) 107 { 108 ratelimit_deinit(&ratelimit_v4); 109 #ifdef INET6 110 ratelimit_deinit(&ratelimit_v6); 111 #endif 112 if (ratelimit_zone != NULL) 113 objcache_destroy(ratelimit_zone); 114 } 115 116 void 117 cookie_checker_init(struct cookie_checker *cc) 118 { 119 bzero(cc, sizeof(*cc)); 120 121 lockinit(&cc->cc_key_lock, "cookie_checker_key", 0, 0); 122 lockinit(&cc->cc_secret_mtx, "cookie_checker_secret", 0, 0); 123 } 124 125 void 126 cookie_checker_free(struct cookie_checker *cc) 127 { 128 lockuninit(&cc->cc_key_lock); 129 lockuninit(&cc->cc_secret_mtx); 130 explicit_bzero(cc, sizeof(*cc)); 131 } 132 133 void 134 cookie_checker_update(struct cookie_checker *cc, 135 const uint8_t key[COOKIE_INPUT_SIZE]) 136 { 137 lockmgr(&cc->cc_key_lock, LK_EXCLUSIVE); 138 if (key) { 139 precompute_key(cc->cc_mac1_key, key, COOKIE_MAC1_KEY_LABEL); 140 precompute_key(cc->cc_cookie_key, key, COOKIE_COOKIE_KEY_LABEL); 141 } else { 142 bzero(cc->cc_mac1_key, sizeof(cc->cc_mac1_key)); 143 bzero(cc->cc_cookie_key, sizeof(cc->cc_cookie_key)); 144 } 145 lockmgr(&cc->cc_key_lock, LK_RELEASE); 146 } 147 148 void 149 cookie_checker_create_payload(struct cookie_checker *cc, 150 struct cookie_macs *macs, uint8_t nonce[COOKIE_NONCE_SIZE], 151 uint8_t ecookie[COOKIE_ENCRYPTED_SIZE], struct sockaddr *sa) 152 { 153 uint8_t cookie[COOKIE_COOKIE_SIZE]; 154 155 make_cookie(cc, cookie, sa); 156 karc4random_buf(nonce, COOKIE_NONCE_SIZE); 157 158 lockmgr(&cc->cc_key_lock, LK_SHARED); 159 xchacha20poly1305_encrypt(ecookie, cookie, COOKIE_COOKIE_SIZE, 160 macs->mac1, COOKIE_MAC_SIZE, nonce, cc->cc_cookie_key); 161 lockmgr(&cc->cc_key_lock, LK_RELEASE); 162 163 explicit_bzero(cookie, sizeof(cookie)); 164 } 165 166 void 167 cookie_maker_init(struct cookie_maker *cm, const uint8_t key[COOKIE_INPUT_SIZE]) 168 { 169 bzero(cm, sizeof(*cm)); 170 precompute_key(cm->cm_mac1_key, key, COOKIE_MAC1_KEY_LABEL); 171 precompute_key(cm->cm_cookie_key, key, COOKIE_COOKIE_KEY_LABEL); 172 lockinit(&cm->cm_lock, "cookie_maker", 0, 0); 173 } 174 175 void 176 cookie_maker_free(struct cookie_maker *cm) 177 { 178 lockuninit(&cm->cm_lock); 179 explicit_bzero(cm, sizeof(*cm)); 180 } 181 182 int 183 cookie_maker_consume_payload(struct cookie_maker *cm, 184 uint8_t nonce[COOKIE_NONCE_SIZE], uint8_t ecookie[COOKIE_ENCRYPTED_SIZE]) 185 { 186 uint8_t cookie[COOKIE_COOKIE_SIZE]; 187 int ret; 188 189 lockmgr(&cm->cm_lock, LK_SHARED); 190 if (!cm->cm_mac1_sent) { 191 ret = ETIMEDOUT; 192 goto error; 193 } 194 195 if (!xchacha20poly1305_decrypt(cookie, ecookie, COOKIE_ENCRYPTED_SIZE, 196 cm->cm_mac1_last, COOKIE_MAC_SIZE, nonce, cm->cm_cookie_key)) { 197 ret = EINVAL; 198 goto error; 199 } 200 lockmgr(&cm->cm_lock, LK_RELEASE); 201 202 lockmgr(&cm->cm_lock, LK_EXCLUSIVE); 203 memcpy(cm->cm_cookie, cookie, COOKIE_COOKIE_SIZE); 204 getnanouptime(&cm->cm_cookie_birthdate); 205 cm->cm_cookie_valid = true; 206 cm->cm_mac1_sent = false; 207 lockmgr(&cm->cm_lock, LK_RELEASE); 208 209 return 0; 210 error: 211 lockmgr(&cm->cm_lock, LK_RELEASE); 212 return ret; 213 } 214 215 void 216 cookie_maker_mac(struct cookie_maker *cm, struct cookie_macs *macs, void *buf, 217 size_t len) 218 { 219 lockmgr(&cm->cm_lock, LK_EXCLUSIVE); 220 macs_mac1(macs, buf, len, cm->cm_mac1_key); 221 memcpy(cm->cm_mac1_last, macs->mac1, COOKIE_MAC_SIZE); 222 cm->cm_mac1_sent = true; 223 224 if (cm->cm_cookie_valid && 225 !timer_expired(&cm->cm_cookie_birthdate, 226 COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY, 0)) { 227 macs_mac2(macs, buf, len, cm->cm_cookie); 228 } else { 229 bzero(macs->mac2, COOKIE_MAC_SIZE); 230 cm->cm_cookie_valid = false; 231 } 232 lockmgr(&cm->cm_lock, LK_RELEASE); 233 } 234 235 int 236 cookie_checker_validate_macs(struct cookie_checker *cc, struct cookie_macs *macs, 237 void *buf, size_t len, bool check_cookie, struct sockaddr *sa) 238 { 239 struct cookie_macs our_macs; 240 uint8_t cookie[COOKIE_COOKIE_SIZE]; 241 242 /* Validate incoming MACs */ 243 lockmgr(&cc->cc_key_lock, LK_SHARED); 244 macs_mac1(&our_macs, buf, len, cc->cc_mac1_key); 245 lockmgr(&cc->cc_key_lock, LK_RELEASE); 246 247 /* If mac1 is invald, we want to drop the packet */ 248 if (timingsafe_bcmp(our_macs.mac1, macs->mac1, COOKIE_MAC_SIZE) != 0) 249 return EINVAL; 250 251 if (check_cookie) { 252 make_cookie(cc, cookie, sa); 253 macs_mac2(&our_macs, buf, len, cookie); 254 255 /* If the mac2 is invalid, we want to send a cookie response */ 256 if (timingsafe_bcmp(our_macs.mac2, macs->mac2, COOKIE_MAC_SIZE) != 0) 257 return EAGAIN; 258 259 /* If the mac2 is valid, we may want rate limit the peer. 260 * ratelimit_allow will return either 0 or ECONNREFUSED, 261 * implying there is no ratelimiting, or we should ratelimit 262 * (refuse) respectively. */ 263 if (sa->sa_family == AF_INET) 264 return ratelimit_allow(&ratelimit_v4, sa); 265 #ifdef INET6 266 else if (sa->sa_family == AF_INET6) 267 return ratelimit_allow(&ratelimit_v6, sa); 268 #endif 269 else 270 return EAFNOSUPPORT; 271 } 272 273 return 0; 274 } 275 276 /* Private functions */ 277 static void 278 precompute_key(uint8_t *key, const uint8_t input[COOKIE_INPUT_SIZE], 279 const char *label) 280 { 281 struct blake2s_state blake; 282 blake2s_init(&blake, COOKIE_KEY_SIZE); 283 blake2s_update(&blake, label, strlen(label)); 284 blake2s_update(&blake, input, COOKIE_INPUT_SIZE); 285 blake2s_final(&blake, key); 286 } 287 288 static void 289 macs_mac1(struct cookie_macs *macs, const void *buf, size_t len, 290 const uint8_t key[COOKIE_KEY_SIZE]) 291 { 292 struct blake2s_state state; 293 blake2s_init_key(&state, COOKIE_MAC_SIZE, key, COOKIE_KEY_SIZE); 294 blake2s_update(&state, buf, len); 295 blake2s_final(&state, macs->mac1); 296 } 297 298 static void 299 macs_mac2(struct cookie_macs *macs, const void *buf, size_t len, 300 const uint8_t key[COOKIE_COOKIE_SIZE]) 301 { 302 struct blake2s_state state; 303 blake2s_init_key(&state, COOKIE_MAC_SIZE, key, COOKIE_COOKIE_SIZE); 304 blake2s_update(&state, buf, len); 305 blake2s_update(&state, macs->mac1, COOKIE_MAC_SIZE); 306 blake2s_final(&state, macs->mac2); 307 } 308 309 static __inline int 310 timer_expired(struct timespec *birthdate, time_t sec, long nsec) 311 { 312 struct timespec uptime; 313 struct timespec expire = { .tv_sec = sec, .tv_nsec = nsec }; 314 315 if (birthdate->tv_sec == 0 && birthdate->tv_nsec == 0) 316 return ETIMEDOUT; 317 318 getnanouptime(&uptime); 319 timespecadd(birthdate, &expire, &expire); 320 return timespeccmp(&uptime, &expire, >) ? ETIMEDOUT : 0; 321 } 322 323 static void 324 make_cookie(struct cookie_checker *cc, uint8_t cookie[COOKIE_COOKIE_SIZE], 325 struct sockaddr *sa) 326 { 327 struct blake2s_state state; 328 329 lockmgr(&cc->cc_secret_mtx, LK_EXCLUSIVE); 330 if (timer_expired(&cc->cc_secret_birthdate, 331 COOKIE_SECRET_MAX_AGE, 0)) { 332 karc4random_buf(cc->cc_secret, COOKIE_SECRET_SIZE); 333 getnanouptime(&cc->cc_secret_birthdate); 334 } 335 blake2s_init_key(&state, COOKIE_COOKIE_SIZE, cc->cc_secret, 336 COOKIE_SECRET_SIZE); 337 lockmgr(&cc->cc_secret_mtx, LK_RELEASE); 338 339 if (sa->sa_family == AF_INET) { 340 blake2s_update(&state, (uint8_t *)&satosin(sa)->sin_addr, 341 sizeof(struct in_addr)); 342 blake2s_update(&state, (uint8_t *)&satosin(sa)->sin_port, 343 sizeof(in_port_t)); 344 blake2s_final(&state, cookie); 345 #ifdef INET6 346 } else if (sa->sa_family == AF_INET6) { 347 blake2s_update(&state, (uint8_t *)&satosin6(sa)->sin6_addr, 348 sizeof(struct in6_addr)); 349 blake2s_update(&state, (uint8_t *)&satosin6(sa)->sin6_port, 350 sizeof(in_port_t)); 351 blake2s_final(&state, cookie); 352 #endif 353 } else { 354 karc4random_buf(cookie, COOKIE_COOKIE_SIZE); 355 } 356 } 357 358 static void 359 ratelimit_init(struct ratelimit *rl) 360 { 361 size_t i; 362 lockinit(&rl->rl_mtx, "ratelimit_lock", 0, 0); 363 callout_init_lk(&rl->rl_gc, &rl->rl_mtx); 364 karc4random_buf(rl->rl_secret, sizeof(rl->rl_secret)); 365 for (i = 0; i < RATELIMIT_SIZE; i++) 366 LIST_INIT(&rl->rl_table[i]); 367 rl->rl_table_num = 0; 368 rl->rl_initialized = true; 369 } 370 371 static void 372 ratelimit_deinit(struct ratelimit *rl) 373 { 374 if (!rl->rl_initialized) 375 return; 376 lockmgr(&rl->rl_mtx, LK_EXCLUSIVE); 377 callout_stop(&rl->rl_gc); 378 ratelimit_gc(rl, true); 379 lockmgr(&rl->rl_mtx, LK_RELEASE); 380 lockuninit(&rl->rl_mtx); 381 382 rl->rl_initialized = false; 383 } 384 385 static void 386 ratelimit_gc_callout(void *_rl) 387 { 388 /* callout will lock rl_mtx for us */ 389 ratelimit_gc(_rl, false); 390 } 391 392 static void 393 ratelimit_gc_schedule(struct ratelimit *rl) 394 { 395 /* Trigger another GC if needed. There is no point calling GC if there 396 * are no entries in the table. We also want to ensure that GC occurs 397 * on a regular interval, so don't override a currently pending GC. 398 * 399 * In the case of a forced ratelimit_gc, there will be no entries left 400 * so we will will not schedule another GC. */ 401 if (rl->rl_table_num > 0 && !callout_pending(&rl->rl_gc)) 402 callout_reset(&rl->rl_gc, ELEMENT_TIMEOUT * hz, 403 ratelimit_gc_callout, rl); 404 } 405 406 static void 407 ratelimit_gc(struct ratelimit *rl, bool force) 408 { 409 size_t i; 410 struct ratelimit_entry *r, *tr; 411 struct timespec expiry; 412 413 KKASSERT(lockstatus(&rl->rl_mtx, curthread) == LK_EXCLUSIVE); 414 415 if (rl->rl_table_num == 0) 416 return; 417 418 getnanouptime(&expiry); 419 expiry.tv_sec -= ELEMENT_TIMEOUT; 420 421 for (i = 0; i < RATELIMIT_SIZE; i++) { 422 LIST_FOREACH_MUTABLE(r, &rl->rl_table[i], r_entry, tr) { 423 if (force || 424 timespeccmp(&r->r_last_time, &expiry, <)) { 425 rl->rl_table_num--; 426 LIST_REMOVE(r, r_entry); 427 objcache_put(ratelimit_zone, r); 428 } 429 } 430 } 431 432 ratelimit_gc_schedule(rl); 433 } 434 435 static int 436 ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa) 437 { 438 uint64_t bucket, tokens; 439 struct timespec diff; 440 struct ratelimit_entry *r; 441 int ret = ECONNREFUSED; 442 struct ratelimit_key key = { 0 }; 443 size_t len = sizeof(key); 444 445 if (sa->sa_family == AF_INET) { 446 memcpy(key.ip, &satosin(sa)->sin_addr, IPV4_MASK_SIZE); 447 len -= IPV6_MASK_SIZE - IPV4_MASK_SIZE; 448 } 449 #ifdef INET6 450 else if (sa->sa_family == AF_INET6) 451 memcpy(key.ip, &satosin6(sa)->sin6_addr, IPV6_MASK_SIZE); 452 #endif 453 else 454 return ret; 455 456 bucket = siphash13(rl->rl_secret, &key, len) & RATELIMIT_MASK; 457 lockmgr(&rl->rl_mtx, LK_EXCLUSIVE); 458 459 LIST_FOREACH(r, &rl->rl_table[bucket], r_entry) { 460 if (bcmp(&r->r_key, &key, len) != 0) 461 continue; 462 463 /* If we get to here, we've found an entry for the endpoint. 464 * We apply standard token bucket, by calculating the time 465 * lapsed since our last_time, adding that, ensuring that we 466 * cap the tokens at TOKEN_MAX. If the endpoint has no tokens 467 * left (that is tokens <= INITIATION_COST) then we block the 468 * request, otherwise we subtract the INITITIATION_COST and 469 * return OK. */ 470 diff = r->r_last_time; 471 getnanouptime(&r->r_last_time); 472 timespecsub(&r->r_last_time, &diff, &diff); 473 474 tokens = r->r_tokens; 475 tokens += diff.tv_sec * NSEC_PER_SEC + diff.tv_nsec; 476 477 if (tokens > TOKEN_MAX) 478 tokens = TOKEN_MAX; 479 480 if (tokens >= INITIATION_COST) { 481 r->r_tokens = tokens - INITIATION_COST; 482 goto ok; 483 } else { 484 r->r_tokens = tokens; 485 goto error; 486 } 487 } 488 489 /* If we get to here, we didn't have an entry for the endpoint, let's 490 * add one if we have space. */ 491 if (rl->rl_table_num >= RATELIMIT_SIZE_MAX) 492 goto error; 493 494 /* Goto error if out of memory */ 495 if ((r = objcache_get(ratelimit_zone, M_NOWAIT)) == NULL) 496 goto error; 497 bzero(r, sizeof(*r)); /* objcache_get() doesn't ensure M_ZERO. */ 498 499 rl->rl_table_num++; 500 501 /* Insert entry into the hashtable and ensure it's initialised */ 502 LIST_INSERT_HEAD(&rl->rl_table[bucket], r, r_entry); 503 r->r_key = key; 504 r->r_tokens = TOKEN_MAX - INITIATION_COST; 505 getnanouptime(&r->r_last_time); 506 507 /* If we've added a new entry, let's trigger GC. */ 508 ratelimit_gc_schedule(rl); 509 ok: 510 ret = 0; 511 error: 512 lockmgr(&rl->rl_mtx, LK_RELEASE); 513 return ret; 514 } 515 516 static uint64_t siphash13(const uint8_t key[SIPHASH_KEY_LENGTH], const void *src, size_t len) 517 { 518 SIPHASH_CTX ctx; 519 return (SipHashX(&ctx, 1, 3, key, src, len)); 520 } 521 522 #ifdef SELFTESTS 523 #include "selftest/cookie.c" 524 #endif /* SELFTESTS */ 525