1 /* $OpenBSD: tls12_record_layer.c,v 1.19 2021/02/27 14:20:50 jsing Exp $ */ 2 /* 3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <limits.h> 19 #include <stdlib.h> 20 21 #include <openssl/evp.h> 22 23 #include "ssl_locl.h" 24 25 struct tls12_record_protection { 26 uint16_t epoch; 27 uint8_t seq_num[SSL3_SEQUENCE_SIZE]; 28 29 SSL_AEAD_CTX *aead_ctx; 30 31 int stream_mac; 32 33 uint8_t *mac_key; 34 size_t mac_key_len; 35 36 EVP_CIPHER_CTX *cipher_ctx; 37 EVP_MD_CTX *hash_ctx; 38 }; 39 40 static struct tls12_record_protection * 41 tls12_record_protection_new(void) 42 { 43 return calloc(1, sizeof(struct tls12_record_protection)); 44 } 45 46 static void 47 tls12_record_protection_clear(struct tls12_record_protection *rp) 48 { 49 memset(rp->seq_num, 0, sizeof(rp->seq_num)); 50 51 if (rp->aead_ctx != NULL) { 52 EVP_AEAD_CTX_cleanup(&rp->aead_ctx->ctx); 53 freezero(rp->aead_ctx, sizeof(*rp->aead_ctx)); 54 rp->aead_ctx = NULL; 55 } 56 57 EVP_CIPHER_CTX_free(rp->cipher_ctx); 58 rp->cipher_ctx = NULL; 59 60 EVP_MD_CTX_free(rp->hash_ctx); 61 rp->hash_ctx = NULL; 62 63 freezero(rp->mac_key, rp->mac_key_len); 64 rp->mac_key = NULL; 65 rp->mac_key_len = 0; 66 } 67 68 static void 69 tls12_record_protection_free(struct tls12_record_protection *rp) 70 { 71 if (rp == NULL) 72 return; 73 74 tls12_record_protection_clear(rp); 75 76 freezero(rp, sizeof(struct tls12_record_protection)); 77 } 78 79 static int 80 tls12_record_protection_engaged(struct tls12_record_protection *rp) 81 { 82 return rp->aead_ctx != NULL || rp->cipher_ctx != NULL; 83 } 84 85 static int 86 tls12_record_protection_eiv_len(struct tls12_record_protection *rp, 87 size_t *out_eiv_len) 88 { 89 int eiv_len; 90 91 *out_eiv_len = 0; 92 93 if (rp->cipher_ctx == NULL) 94 return 0; 95 96 eiv_len = 0; 97 if (EVP_CIPHER_CTX_mode(rp->cipher_ctx) == EVP_CIPH_CBC_MODE) 98 eiv_len = EVP_CIPHER_CTX_iv_length(rp->cipher_ctx); 99 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 100 return 0; 101 102 *out_eiv_len = eiv_len; 103 104 return 1; 105 } 106 107 static int 108 tls12_record_protection_block_size(struct tls12_record_protection *rp, 109 size_t *out_block_size) 110 { 111 int block_size; 112 113 *out_block_size = 0; 114 115 if (rp->cipher_ctx == NULL) 116 return 0; 117 118 block_size = EVP_CIPHER_CTX_block_size(rp->cipher_ctx); 119 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 120 return 0; 121 122 *out_block_size = block_size; 123 124 return 1; 125 } 126 127 static int 128 tls12_record_protection_mac_len(struct tls12_record_protection *rp, 129 size_t *out_mac_len) 130 { 131 int mac_len; 132 133 *out_mac_len = 0; 134 135 if (rp->hash_ctx == NULL) 136 return 0; 137 138 mac_len = EVP_MD_CTX_size(rp->hash_ctx); 139 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE) 140 return 0; 141 142 *out_mac_len = mac_len; 143 144 return 1; 145 } 146 147 struct tls12_record_layer { 148 uint16_t version; 149 int dtls; 150 151 uint8_t alert_desc; 152 153 const EVP_AEAD *aead; 154 const EVP_CIPHER *cipher; 155 const EVP_MD *handshake_hash; 156 const EVP_MD *mac_hash; 157 158 /* Pointers to active record protection (memory is not owned). */ 159 struct tls12_record_protection *read; 160 struct tls12_record_protection *write; 161 162 struct tls12_record_protection *read_current; 163 struct tls12_record_protection *write_current; 164 struct tls12_record_protection *write_previous; 165 }; 166 167 struct tls12_record_layer * 168 tls12_record_layer_new(void) 169 { 170 struct tls12_record_layer *rl; 171 172 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL) 173 goto err; 174 if ((rl->read_current = tls12_record_protection_new()) == NULL) 175 goto err; 176 if ((rl->write_current = tls12_record_protection_new()) == NULL) 177 goto err; 178 179 rl->read = rl->read_current; 180 rl->write = rl->write_current; 181 182 return rl; 183 184 err: 185 tls12_record_layer_free(rl); 186 187 return NULL; 188 } 189 190 void 191 tls12_record_layer_free(struct tls12_record_layer *rl) 192 { 193 if (rl == NULL) 194 return; 195 196 tls12_record_protection_free(rl->read_current); 197 tls12_record_protection_free(rl->write_current); 198 tls12_record_protection_free(rl->write_previous); 199 200 freezero(rl, sizeof(struct tls12_record_layer)); 201 } 202 203 void 204 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc) 205 { 206 *alert_desc = rl->alert_desc; 207 } 208 209 int 210 tls12_record_layer_write_overhead(struct tls12_record_layer *rl, 211 size_t *overhead) 212 { 213 size_t block_size, eiv_len, mac_len; 214 215 *overhead = 0; 216 217 if (rl->write->aead_ctx != NULL) { 218 *overhead = rl->write->aead_ctx->tag_len; 219 } else if (rl->write->cipher_ctx != NULL) { 220 eiv_len = 0; 221 if (rl->version != TLS1_VERSION) { 222 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 223 return 0; 224 } 225 if (!tls12_record_protection_block_size(rl->write, &block_size)) 226 return 0; 227 if (!tls12_record_protection_mac_len(rl->write, &mac_len)) 228 return 0; 229 230 *overhead = eiv_len + block_size + mac_len; 231 } 232 233 return 1; 234 } 235 236 int 237 tls12_record_layer_read_protected(struct tls12_record_layer *rl) 238 { 239 return tls12_record_protection_engaged(rl->read); 240 } 241 242 int 243 tls12_record_layer_write_protected(struct tls12_record_layer *rl) 244 { 245 return tls12_record_protection_engaged(rl->write); 246 } 247 248 void 249 tls12_record_layer_set_aead(struct tls12_record_layer *rl, const EVP_AEAD *aead) 250 { 251 rl->aead = aead; 252 } 253 254 void 255 tls12_record_layer_set_cipher_hash(struct tls12_record_layer *rl, 256 const EVP_CIPHER *cipher, const EVP_MD *handshake_hash, 257 const EVP_MD *mac_hash) 258 { 259 rl->cipher = cipher; 260 rl->handshake_hash = handshake_hash; 261 rl->mac_hash = mac_hash; 262 } 263 264 void 265 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version) 266 { 267 rl->version = version; 268 rl->dtls = ((version >> 8) == DTLS1_VERSION_MAJOR); 269 } 270 271 void 272 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 273 { 274 rl->write->epoch = epoch; 275 } 276 277 int 278 tls12_record_layer_use_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 279 { 280 if (rl->write->epoch == epoch) 281 return 1; 282 283 if (rl->write_current->epoch == epoch) { 284 rl->write = rl->write_current; 285 return 1; 286 } 287 288 if (rl->write_previous != NULL && rl->write_previous->epoch == epoch) { 289 rl->write = rl->write_previous; 290 return 1; 291 } 292 293 return 0; 294 } 295 296 void 297 tls12_record_layer_write_epoch_done(struct tls12_record_layer *rl, uint16_t epoch) 298 { 299 if (rl->write_previous == NULL || rl->write_previous->epoch != epoch) 300 return; 301 302 rl->write = rl->write_current; 303 304 tls12_record_protection_free(rl->write_previous); 305 rl->write_previous = NULL; 306 } 307 308 void 309 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl) 310 { 311 tls12_record_protection_clear(rl->read); 312 } 313 314 void 315 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl) 316 { 317 tls12_record_protection_clear(rl->write); 318 319 tls12_record_protection_free(rl->write_previous); 320 rl->write_previous = NULL; 321 } 322 323 void 324 tls12_record_layer_read_cipher_hash(struct tls12_record_layer *rl, 325 EVP_CIPHER_CTX **cipher, EVP_MD_CTX **hash) 326 { 327 *cipher = rl->read->cipher_ctx; 328 *hash = rl->read->hash_ctx; 329 } 330 331 void 332 tls12_record_layer_reflect_seq_num(struct tls12_record_layer *rl) 333 { 334 memcpy(rl->write->seq_num, rl->read->seq_num, 335 sizeof(rl->write->seq_num)); 336 } 337 338 static int 339 tls12_record_layer_set_mac_key(struct tls12_record_protection *rp, 340 const uint8_t *mac_key, size_t mac_key_len) 341 { 342 freezero(rp->mac_key, rp->mac_key_len); 343 rp->mac_key = NULL; 344 rp->mac_key_len = 0; 345 346 if (mac_key == NULL || mac_key_len == 0) 347 return 1; 348 349 if ((rp->mac_key = calloc(1, mac_key_len)) == NULL) 350 return 0; 351 352 memcpy(rp->mac_key, mac_key, mac_key_len); 353 rp->mac_key_len = mac_key_len; 354 355 return 1; 356 } 357 358 static int 359 tls12_record_layer_ccs_aead(struct tls12_record_layer *rl, 360 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 361 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 362 size_t iv_len) 363 { 364 size_t aead_nonce_len = EVP_AEAD_nonce_length(rl->aead); 365 366 if ((rp->aead_ctx = calloc(1, sizeof(*rp->aead_ctx))) == NULL) 367 return 0; 368 369 /* AES GCM cipher suites use variable nonce in record. */ 370 if (rl->aead == EVP_aead_aes_128_gcm() || 371 rl->aead == EVP_aead_aes_256_gcm()) 372 rp->aead_ctx->variable_nonce_in_record = 1; 373 374 /* ChaCha20 Poly1305 XORs the fixed and variable nonces. */ 375 if (rl->aead == EVP_aead_chacha20_poly1305()) 376 rp->aead_ctx->xor_fixed_nonce = 1; 377 378 if (iv_len > sizeof(rp->aead_ctx->fixed_nonce)) 379 return 0; 380 381 memcpy(rp->aead_ctx->fixed_nonce, iv, iv_len); 382 rp->aead_ctx->fixed_nonce_len = iv_len; 383 rp->aead_ctx->tag_len = EVP_AEAD_max_overhead(rl->aead); 384 rp->aead_ctx->variable_nonce_len = 8; 385 386 if (rp->aead_ctx->xor_fixed_nonce) { 387 /* Fixed nonce length must match, variable must not exceed. */ 388 if (rp->aead_ctx->fixed_nonce_len != aead_nonce_len) 389 return 0; 390 if (rp->aead_ctx->variable_nonce_len > aead_nonce_len) 391 return 0; 392 } else { 393 /* Concatenated nonce length must equal AEAD nonce length. */ 394 if (rp->aead_ctx->fixed_nonce_len + 395 rp->aead_ctx->variable_nonce_len != aead_nonce_len) 396 return 0; 397 } 398 399 if (!EVP_AEAD_CTX_init(&rp->aead_ctx->ctx, rl->aead, key, key_len, 400 EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) 401 return 0; 402 403 return 1; 404 } 405 406 static int 407 tls12_record_layer_ccs_cipher(struct tls12_record_layer *rl, 408 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 409 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 410 size_t iv_len) 411 { 412 EVP_PKEY *mac_pkey = NULL; 413 int gost_param_nid; 414 int mac_type; 415 int ret = 0; 416 417 mac_type = EVP_PKEY_HMAC; 418 rp->stream_mac = 0; 419 420 /* Special handling for GOST... */ 421 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 422 if (mac_key_len != 32) 423 goto err; 424 mac_type = EVP_PKEY_GOSTIMIT; 425 rp->stream_mac = 1; 426 } else { 427 if (EVP_MD_size(rl->mac_hash) != mac_key_len) 428 goto err; 429 } 430 431 if ((rp->cipher_ctx = EVP_CIPHER_CTX_new()) == NULL) 432 goto err; 433 if ((rp->hash_ctx = EVP_MD_CTX_new()) == NULL) 434 goto err; 435 436 if (!tls12_record_layer_set_mac_key(rp, mac_key, mac_key_len)) 437 goto err; 438 439 if ((mac_pkey = EVP_PKEY_new_mac_key(mac_type, NULL, mac_key, 440 mac_key_len)) == NULL) 441 goto err; 442 443 if (!EVP_CipherInit_ex(rp->cipher_ctx, rl->cipher, NULL, key, iv, 444 is_write)) 445 goto err; 446 447 if (EVP_DigestSignInit(rp->hash_ctx, NULL, rl->mac_hash, NULL, 448 mac_pkey) <= 0) 449 goto err; 450 451 /* More special handling for GOST... */ 452 if (EVP_CIPHER_type(rl->cipher) == NID_gost89_cnt) { 453 gost_param_nid = NID_id_tc26_gost_28147_param_Z; 454 if (EVP_MD_type(rl->handshake_hash) == NID_id_GostR3411_94) 455 gost_param_nid = NID_id_Gost28147_89_CryptoPro_A_ParamSet; 456 457 if (EVP_CIPHER_CTX_ctrl(rp->cipher_ctx, EVP_CTRL_GOST_SET_SBOX, 458 gost_param_nid, 0) <= 0) 459 goto err; 460 461 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 462 if (EVP_MD_CTX_ctrl(rp->hash_ctx, EVP_MD_CTRL_GOST_SET_SBOX, 463 gost_param_nid, 0) <= 0) 464 goto err; 465 } 466 } 467 468 ret = 1; 469 470 err: 471 EVP_PKEY_free(mac_pkey); 472 473 return ret; 474 } 475 476 static int 477 tls12_record_layer_change_cipher_state(struct tls12_record_layer *rl, 478 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 479 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 480 size_t iv_len) 481 { 482 /* Require unused record protection. */ 483 if (rp->cipher_ctx != NULL || rp->aead_ctx != NULL) 484 return 0; 485 486 if (mac_key_len > INT_MAX || key_len > INT_MAX || iv_len > INT_MAX) 487 return 0; 488 489 if (rl->aead != NULL) 490 return tls12_record_layer_ccs_aead(rl, rp, is_write, mac_key, 491 mac_key_len, key, key_len, iv, iv_len); 492 493 return tls12_record_layer_ccs_cipher(rl, rp, is_write, mac_key, 494 mac_key_len, key, key_len, iv, iv_len); 495 } 496 497 int 498 tls12_record_layer_change_read_cipher_state(struct tls12_record_layer *rl, 499 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 500 size_t key_len, const uint8_t *iv, size_t iv_len) 501 { 502 struct tls12_record_protection *read_new = NULL; 503 int ret = 0; 504 505 if ((read_new = tls12_record_protection_new()) == NULL) 506 goto err; 507 508 /* Read sequence number gets reset to zero. */ 509 510 if (!tls12_record_layer_change_cipher_state(rl, read_new, 0, 511 mac_key, mac_key_len, key, key_len, iv, iv_len)) 512 goto err; 513 514 tls12_record_protection_free(rl->read_current); 515 rl->read = rl->read_current = read_new; 516 read_new = NULL; 517 518 ret = 1; 519 520 err: 521 tls12_record_protection_free(read_new); 522 523 return ret; 524 } 525 526 int 527 tls12_record_layer_change_write_cipher_state(struct tls12_record_layer *rl, 528 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 529 size_t key_len, const uint8_t *iv, size_t iv_len) 530 { 531 struct tls12_record_protection *write_new; 532 int ret = 0; 533 534 if ((write_new = tls12_record_protection_new()) == NULL) 535 goto err; 536 537 /* Write sequence number gets reset to zero. */ 538 539 if (!tls12_record_layer_change_cipher_state(rl, write_new, 1, 540 mac_key, mac_key_len, key, key_len, iv, iv_len)) 541 goto err; 542 543 if (rl->dtls) { 544 tls12_record_protection_free(rl->write_previous); 545 rl->write_previous = rl->write_current; 546 rl->write_current = NULL; 547 } 548 tls12_record_protection_free(rl->write_current); 549 rl->write = rl->write_current = write_new; 550 write_new = NULL; 551 552 ret = 1; 553 554 err: 555 tls12_record_protection_free(write_new); 556 557 return ret; 558 } 559 560 static int 561 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb, 562 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len) 563 { 564 CBS seq; 565 566 CBS_init(&seq, seq_num, seq_num_len); 567 568 if (rl->dtls) { 569 if (!CBB_add_u16(cbb, epoch)) 570 return 0; 571 if (!CBS_skip(&seq, 2)) 572 return 0; 573 } 574 575 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq)); 576 } 577 578 static int 579 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl, 580 uint8_t content_type, uint16_t record_len, CBS *seq_num, uint8_t **out, 581 size_t *out_len) 582 { 583 CBB cbb; 584 585 *out = NULL; 586 *out_len = 0; 587 588 /* Build the pseudo-header used for MAC/AEAD. */ 589 if (!CBB_init(&cbb, 13)) 590 goto err; 591 592 if (!CBB_add_bytes(&cbb, CBS_data(seq_num), CBS_len(seq_num))) 593 goto err; 594 if (!CBB_add_u8(&cbb, content_type)) 595 goto err; 596 if (!CBB_add_u16(&cbb, rl->version)) 597 goto err; 598 if (!CBB_add_u16(&cbb, record_len)) 599 goto err; 600 601 if (!CBB_finish(&cbb, out, out_len)) 602 goto err; 603 604 return 1; 605 606 err: 607 CBB_cleanup(&cbb); 608 609 return 0; 610 } 611 612 static int 613 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb, 614 EVP_MD_CTX *hash_ctx, int stream_mac, CBS *seq_num, uint8_t content_type, 615 const uint8_t *content, size_t content_len, size_t *out_len) 616 { 617 EVP_MD_CTX *mac_ctx = NULL; 618 uint8_t *header = NULL; 619 size_t header_len = 0; 620 size_t mac_len; 621 uint8_t *mac; 622 int ret = 0; 623 624 if ((mac_ctx = EVP_MD_CTX_new()) == NULL) 625 goto err; 626 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx)) 627 goto err; 628 629 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 630 seq_num, &header, &header_len)) 631 goto err; 632 633 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0) 634 goto err; 635 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0) 636 goto err; 637 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0) 638 goto err; 639 if (!CBB_add_space(cbb, &mac, mac_len)) 640 goto err; 641 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0) 642 goto err; 643 if (mac_len == 0) 644 goto err; 645 646 if (stream_mac) { 647 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx)) 648 goto err; 649 } 650 651 *out_len = mac_len; 652 ret = 1; 653 654 err: 655 EVP_MD_CTX_free(mac_ctx); 656 freezero(header, header_len); 657 658 return ret; 659 } 660 661 static int 662 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb, 663 uint8_t content_type, CBS *seq_num, const uint8_t *content, 664 size_t content_len, size_t mac_len, size_t padding_len) 665 { 666 uint8_t *header = NULL; 667 size_t header_len = 0; 668 uint8_t *mac = NULL; 669 size_t out_mac_len = 0; 670 int ret = 0; 671 672 /* 673 * Must be constant time to avoid leaking details about CBC padding. 674 */ 675 676 if (!ssl3_cbc_record_digest_supported(rl->read->hash_ctx)) 677 goto err; 678 679 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 680 seq_num, &header, &header_len)) 681 goto err; 682 683 if (!CBB_add_space(cbb, &mac, mac_len)) 684 goto err; 685 if (!ssl3_cbc_digest_record(rl->read->hash_ctx, mac, &out_mac_len, header, 686 content, content_len + mac_len, content_len + mac_len + padding_len, 687 rl->read->mac_key, rl->read->mac_key_len)) 688 goto err; 689 if (mac_len != out_mac_len) 690 goto err; 691 692 ret = 1; 693 694 err: 695 freezero(header, header_len); 696 697 return ret; 698 } 699 700 static int 701 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb, 702 uint8_t content_type, CBS *seq_num, const uint8_t *content, 703 size_t content_len) 704 { 705 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 706 size_t out_len; 707 708 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 709 return 0; 710 711 return tls12_record_layer_mac(rl, cbb, rl->read->hash_ctx, 712 rl->read->stream_mac, seq_num, content_type, content, content_len, 713 &out_len); 714 } 715 716 static int 717 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb, 718 uint8_t content_type, CBS *seq_num, const uint8_t *content, 719 size_t content_len, size_t *out_len) 720 { 721 return tls12_record_layer_mac(rl, cbb, rl->write->hash_ctx, 722 rl->write->stream_mac, seq_num, content_type, content, content_len, 723 out_len); 724 } 725 726 static int 727 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl, 728 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 729 uint8_t **out, size_t *out_len) 730 { 731 CBB cbb; 732 733 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 734 return 0; 735 736 /* Fixed nonce and variable nonce (sequence number) are concatenated. */ 737 if (!CBB_init(&cbb, 16)) 738 goto err; 739 if (!CBB_add_bytes(&cbb, aead->fixed_nonce, 740 aead->fixed_nonce_len)) 741 goto err; 742 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 743 goto err; 744 if (!CBB_finish(&cbb, out, out_len)) 745 goto err; 746 747 return 1; 748 749 err: 750 CBB_cleanup(&cbb); 751 752 return 0; 753 } 754 755 static int 756 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl, 757 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 758 uint8_t **out, size_t *out_len) 759 { 760 uint8_t *nonce = NULL; 761 size_t nonce_len = 0; 762 uint8_t *pad; 763 CBB cbb; 764 int i; 765 766 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 767 return 0; 768 if (aead->fixed_nonce_len < aead->variable_nonce_len) 769 return 0; 770 771 /* 772 * Variable nonce (sequence number) is right padded, before the fixed 773 * nonce is XOR'd in. 774 */ 775 if (!CBB_init(&cbb, 16)) 776 goto err; 777 if (!CBB_add_space(&cbb, &pad, 778 aead->fixed_nonce_len - aead->variable_nonce_len)) 779 goto err; 780 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 781 goto err; 782 if (!CBB_finish(&cbb, &nonce, &nonce_len)) 783 goto err; 784 785 for (i = 0; i < aead->fixed_nonce_len; i++) 786 nonce[i] ^= aead->fixed_nonce[i]; 787 788 *out = nonce; 789 *out_len = nonce_len; 790 791 return 1; 792 793 err: 794 CBB_cleanup(&cbb); 795 freezero(nonce, nonce_len); 796 797 return 0; 798 } 799 800 static int 801 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl, 802 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 803 { 804 if (rl->read->aead_ctx != NULL || rl->read->cipher_ctx != NULL) 805 return 0; 806 807 /* XXX - decrypt/process in place for now. */ 808 *out = (uint8_t *)CBS_data(fragment); 809 *out_len = CBS_len(fragment); 810 811 return 1; 812 } 813 814 static int 815 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl, 816 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 817 size_t *out_len) 818 { 819 const SSL_AEAD_CTX *aead = rl->read->aead_ctx; 820 uint8_t *header = NULL, *nonce = NULL; 821 size_t header_len = 0, nonce_len = 0; 822 uint8_t *plain; 823 size_t plain_len; 824 CBS var_nonce; 825 int ret = 0; 826 827 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 828 if (aead->xor_fixed_nonce) { 829 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 830 CBS_data(seq_num), &nonce, &nonce_len)) 831 goto err; 832 } else if (aead->variable_nonce_in_record) { 833 if (!CBS_get_bytes(fragment, &var_nonce, 834 aead->variable_nonce_len)) 835 goto err; 836 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 837 CBS_data(&var_nonce), &nonce, &nonce_len)) 838 goto err; 839 } else { 840 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 841 CBS_data(seq_num), &nonce, &nonce_len)) 842 goto err; 843 } 844 845 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 846 if (CBS_len(fragment) < aead->tag_len) { 847 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 848 goto err; 849 } 850 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 851 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 852 goto err; 853 } 854 855 /* XXX - decrypt/process in place for now. */ 856 plain = (uint8_t *)CBS_data(fragment); 857 plain_len = CBS_len(fragment) - aead->tag_len; 858 859 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len, 860 seq_num, &header, &header_len)) 861 goto err; 862 863 if (!EVP_AEAD_CTX_open(&aead->ctx, plain, out_len, plain_len, 864 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment), 865 header, header_len)) { 866 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 867 goto err; 868 } 869 870 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) { 871 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 872 goto err; 873 } 874 875 if (*out_len != plain_len) 876 goto err; 877 878 *out = plain; 879 880 ret = 1; 881 882 err: 883 freezero(header, header_len); 884 freezero(nonce, nonce_len); 885 886 return ret; 887 } 888 889 static int 890 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl, 891 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 892 size_t *out_len) 893 { 894 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 895 SSL3_RECORD_INTERNAL rrec; 896 size_t block_size, eiv_len; 897 uint8_t *mac = NULL; 898 size_t mac_len = 0; 899 uint8_t *out_mac = NULL; 900 size_t out_mac_len = 0; 901 uint8_t *plain; 902 size_t plain_len; 903 size_t min_len; 904 CBB cbb_mac; 905 int ret = 0; 906 907 memset(&cbb_mac, 0, sizeof(cbb_mac)); 908 909 if (!tls12_record_protection_block_size(rl->read, &block_size)) 910 goto err; 911 912 /* Determine explicit IV length. */ 913 eiv_len = 0; 914 if (rl->version != TLS1_VERSION) { 915 if (!tls12_record_protection_eiv_len(rl->read, &eiv_len)) 916 goto err; 917 } 918 919 mac_len = 0; 920 if (rl->read->hash_ctx != NULL) { 921 if (!tls12_record_protection_mac_len(rl->read, &mac_len)) 922 goto err; 923 } 924 925 /* CBC has at least one padding byte. */ 926 min_len = eiv_len + mac_len; 927 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 928 min_len += 1; 929 930 if (CBS_len(fragment) < min_len) { 931 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 932 goto err; 933 } 934 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 935 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 936 goto err; 937 } 938 if (CBS_len(fragment) % block_size != 0) { 939 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 940 goto err; 941 } 942 943 /* XXX - decrypt/process in place for now. */ 944 plain = (uint8_t *)CBS_data(fragment); 945 plain_len = CBS_len(fragment); 946 947 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len)) 948 goto err; 949 950 rrec.data = plain; 951 rrec.input = plain; 952 rrec.length = plain_len; 953 954 /* 955 * We now have to remove padding, extract MAC, calculate MAC 956 * and compare MAC in constant time. 957 */ 958 if (block_size > 1) 959 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len); 960 961 if ((mac = calloc(1, mac_len)) == NULL) 962 goto err; 963 964 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE)) 965 goto err; 966 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) { 967 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length + 968 rrec.padding_length); 969 rrec.length -= mac_len; 970 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type, 971 seq_num, rrec.input, rrec.length, mac_len, 972 rrec.padding_length)) 973 goto err; 974 } else { 975 rrec.length -= mac_len; 976 memcpy(mac, rrec.data + rrec.length, mac_len); 977 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type, 978 seq_num, rrec.input, rrec.length)) 979 goto err; 980 } 981 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len)) 982 goto err; 983 if (mac_len != out_mac_len) 984 goto err; 985 986 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) { 987 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 988 goto err; 989 } 990 991 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) { 992 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 993 goto err; 994 } 995 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) { 996 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 997 goto err; 998 } 999 1000 *out = rrec.data; 1001 *out_len = rrec.length; 1002 1003 ret = 1; 1004 1005 err: 1006 CBB_cleanup(&cbb_mac); 1007 freezero(mac, mac_len); 1008 freezero(out_mac, out_mac_len); 1009 1010 return ret; 1011 } 1012 1013 int 1014 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf, 1015 size_t buf_len, uint8_t **out, size_t *out_len) 1016 { 1017 CBS cbs, fragment, seq_num; 1018 uint16_t version; 1019 uint8_t content_type; 1020 1021 CBS_init(&cbs, buf, buf_len); 1022 CBS_init(&seq_num, rl->read->seq_num, sizeof(rl->read->seq_num)); 1023 1024 if (!CBS_get_u8(&cbs, &content_type)) 1025 return 0; 1026 if (!CBS_get_u16(&cbs, &version)) 1027 return 0; 1028 if (rl->dtls) { 1029 /* 1030 * The DTLS sequence number is split into a 16 bit epoch and 1031 * 48 bit sequence number, however for the purposes of record 1032 * processing it is treated the same as a TLS 64 bit sequence 1033 * number. DTLS also uses explicit read sequence numbers, which 1034 * we need to extract from the DTLS record header. 1035 */ 1036 if (!CBS_get_bytes(&cbs, &seq_num, SSL3_SEQUENCE_SIZE)) 1037 return 0; 1038 if (!CBS_write_bytes(&seq_num, rl->read->seq_num, 1039 sizeof(rl->read->seq_num), NULL)) 1040 return 0; 1041 } 1042 if (!CBS_get_u16_length_prefixed(&cbs, &fragment)) 1043 return 0; 1044 1045 if (rl->read->aead_ctx != NULL) { 1046 if (!tls12_record_layer_open_record_protected_aead(rl, 1047 content_type, &seq_num, &fragment, out, out_len)) 1048 return 0; 1049 } else if (rl->read->cipher_ctx != NULL) { 1050 if (!tls12_record_layer_open_record_protected_cipher(rl, 1051 content_type, &seq_num, &fragment, out, out_len)) 1052 return 0; 1053 } else { 1054 if (!tls12_record_layer_open_record_plaintext(rl, 1055 content_type, &fragment, out, out_len)) 1056 return 0; 1057 } 1058 1059 if (!rl->dtls) 1060 tls1_record_sequence_increment(rl->read->seq_num); 1061 1062 return 1; 1063 } 1064 1065 static int 1066 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl, 1067 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 1068 { 1069 if (rl->write->aead_ctx != NULL || rl->write->cipher_ctx != NULL) 1070 return 0; 1071 1072 return CBB_add_bytes(out, content, content_len); 1073 } 1074 1075 static int 1076 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl, 1077 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1078 size_t content_len, CBB *out) 1079 { 1080 const SSL_AEAD_CTX *aead = rl->write->aead_ctx; 1081 uint8_t *header = NULL, *nonce = NULL; 1082 size_t header_len = 0, nonce_len = 0; 1083 size_t enc_record_len, out_len; 1084 uint8_t *enc_data; 1085 int ret = 0; 1086 1087 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 1088 if (aead->xor_fixed_nonce) { 1089 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 1090 CBS_data(seq_num), &nonce, &nonce_len)) 1091 goto err; 1092 } else { 1093 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 1094 CBS_data(seq_num), &nonce, &nonce_len)) 1095 goto err; 1096 } 1097 1098 if (aead->variable_nonce_in_record) { 1099 /* XXX - length check? */ 1100 if (!CBB_add_bytes(out, CBS_data(seq_num), 1101 aead->variable_nonce_len)) 1102 goto err; 1103 } 1104 1105 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 1106 seq_num, &header, &header_len)) 1107 goto err; 1108 1109 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 1110 enc_record_len = content_len + aead->tag_len; 1111 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1112 goto err; 1113 if (!CBB_add_space(out, &enc_data, enc_record_len)) 1114 goto err; 1115 1116 if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len, 1117 nonce, nonce_len, content, content_len, header, header_len)) 1118 goto err; 1119 1120 if (out_len != enc_record_len) 1121 goto err; 1122 1123 ret = 1; 1124 1125 err: 1126 freezero(header, header_len); 1127 freezero(nonce, nonce_len); 1128 1129 return ret; 1130 } 1131 1132 static int 1133 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl, 1134 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1135 size_t content_len, CBB *out) 1136 { 1137 EVP_CIPHER_CTX *enc = rl->write->cipher_ctx; 1138 size_t block_size, eiv_len, mac_len, pad_len; 1139 uint8_t *enc_data, *eiv, *pad, pad_val; 1140 uint8_t *plain = NULL; 1141 size_t plain_len = 0; 1142 int ret = 0; 1143 CBB cbb; 1144 1145 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH)) 1146 goto err; 1147 1148 /* Add explicit IV if necessary. */ 1149 eiv_len = 0; 1150 if (rl->version != TLS1_VERSION) { 1151 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 1152 goto err; 1153 } 1154 if (eiv_len > 0) { 1155 if (!CBB_add_space(&cbb, &eiv, eiv_len)) 1156 goto err; 1157 arc4random_buf(eiv, eiv_len); 1158 } 1159 1160 if (!CBB_add_bytes(&cbb, content, content_len)) 1161 goto err; 1162 1163 mac_len = 0; 1164 if (rl->write->hash_ctx != NULL) { 1165 if (!tls12_record_layer_write_mac(rl, &cbb, content_type, 1166 seq_num, content, content_len, &mac_len)) 1167 goto err; 1168 } 1169 1170 plain_len = eiv_len + content_len + mac_len; 1171 1172 /* Add padding to block size, if necessary. */ 1173 if (!tls12_record_protection_block_size(rl->write, &block_size)) 1174 goto err; 1175 if (block_size > 1) { 1176 pad_len = block_size - (plain_len % block_size); 1177 pad_val = pad_len - 1; 1178 1179 if (pad_len > 255) 1180 goto err; 1181 if (!CBB_add_space(&cbb, &pad, pad_len)) 1182 goto err; 1183 memset(pad, pad_val, pad_len); 1184 } 1185 1186 if (!CBB_finish(&cbb, &plain, &plain_len)) 1187 goto err; 1188 1189 if (plain_len % block_size != 0) 1190 goto err; 1191 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1192 goto err; 1193 1194 if (!CBB_add_space(out, &enc_data, plain_len)) 1195 goto err; 1196 if (!EVP_Cipher(enc, enc_data, plain, plain_len)) 1197 goto err; 1198 1199 ret = 1; 1200 1201 err: 1202 CBB_cleanup(&cbb); 1203 freezero(plain, plain_len); 1204 1205 return ret; 1206 } 1207 1208 int 1209 tls12_record_layer_seal_record(struct tls12_record_layer *rl, 1210 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb) 1211 { 1212 uint8_t *seq_num_data = NULL; 1213 size_t seq_num_len = 0; 1214 CBB fragment, seq_num_cbb; 1215 CBS seq_num; 1216 int ret = 0; 1217 1218 /* 1219 * Construct the effective sequence number - this is used in both 1220 * the DTLS header and for MAC calculations. 1221 */ 1222 if (!CBB_init(&seq_num_cbb, SSL3_SEQUENCE_SIZE)) 1223 goto err; 1224 if (!tls12_record_layer_build_seq_num(rl, &seq_num_cbb, rl->write->epoch, 1225 rl->write->seq_num, sizeof(rl->write->seq_num))) 1226 goto err; 1227 if (!CBB_finish(&seq_num_cbb, &seq_num_data, &seq_num_len)) 1228 goto err; 1229 CBS_init(&seq_num, seq_num_data, seq_num_len); 1230 1231 if (!CBB_add_u8(cbb, content_type)) 1232 goto err; 1233 if (!CBB_add_u16(cbb, rl->version)) 1234 goto err; 1235 if (rl->dtls) { 1236 if (!CBB_add_bytes(cbb, CBS_data(&seq_num), CBS_len(&seq_num))) 1237 goto err; 1238 } 1239 if (!CBB_add_u16_length_prefixed(cbb, &fragment)) 1240 goto err; 1241 1242 if (rl->write->aead_ctx != NULL) { 1243 if (!tls12_record_layer_seal_record_protected_aead(rl, 1244 content_type, &seq_num, content, content_len, &fragment)) 1245 goto err; 1246 } else if (rl->write->cipher_ctx != NULL) { 1247 if (!tls12_record_layer_seal_record_protected_cipher(rl, 1248 content_type, &seq_num, content, content_len, &fragment)) 1249 goto err; 1250 } else { 1251 if (!tls12_record_layer_seal_record_plaintext(rl, 1252 content_type, content, content_len, &fragment)) 1253 goto err; 1254 } 1255 1256 if (!CBB_flush(cbb)) 1257 goto err; 1258 1259 tls1_record_sequence_increment(rl->write->seq_num); 1260 1261 ret = 1; 1262 1263 err: 1264 CBB_cleanup(&seq_num_cbb); 1265 free(seq_num_data); 1266 1267 return ret; 1268 } 1269