1 /* $OpenBSD: tls12_record_layer.c,v 1.30 2021/05/16 15:49:01 jsing Exp $ */ 2 /* 3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <limits.h> 19 #include <stdlib.h> 20 21 #include <openssl/evp.h> 22 23 #include "ssl_locl.h" 24 25 #define TLS12_RECORD_SEQ_NUM_LEN 8 26 #define TLS12_AEAD_FIXED_NONCE_MAX_LEN 12 27 28 struct tls12_record_protection { 29 uint16_t epoch; 30 uint8_t seq_num[TLS12_RECORD_SEQ_NUM_LEN]; 31 32 EVP_AEAD_CTX *aead_ctx; 33 34 uint8_t *aead_fixed_nonce; 35 size_t aead_fixed_nonce_len; 36 37 size_t aead_variable_nonce_len; 38 size_t aead_tag_len; 39 40 int aead_xor_nonces; 41 int aead_variable_nonce_in_record; 42 43 EVP_CIPHER_CTX *cipher_ctx; 44 EVP_MD_CTX *hash_ctx; 45 46 int stream_mac; 47 48 uint8_t *mac_key; 49 size_t mac_key_len; 50 }; 51 52 static struct tls12_record_protection * 53 tls12_record_protection_new(void) 54 { 55 return calloc(1, sizeof(struct tls12_record_protection)); 56 } 57 58 static void 59 tls12_record_protection_clear(struct tls12_record_protection *rp) 60 { 61 if (rp->aead_ctx != NULL) { 62 EVP_AEAD_CTX_cleanup(rp->aead_ctx); 63 freezero(rp->aead_ctx, sizeof(*rp->aead_ctx)); 64 } 65 66 freezero(rp->aead_fixed_nonce, rp->aead_fixed_nonce_len); 67 68 EVP_CIPHER_CTX_free(rp->cipher_ctx); 69 EVP_MD_CTX_free(rp->hash_ctx); 70 71 freezero(rp->mac_key, rp->mac_key_len); 72 73 memset(rp, 0, sizeof(*rp)); 74 } 75 76 static void 77 tls12_record_protection_free(struct tls12_record_protection *rp) 78 { 79 if (rp == NULL) 80 return; 81 82 tls12_record_protection_clear(rp); 83 84 freezero(rp, sizeof(struct tls12_record_protection)); 85 } 86 87 static int 88 tls12_record_protection_engaged(struct tls12_record_protection *rp) 89 { 90 return rp->aead_ctx != NULL || rp->cipher_ctx != NULL; 91 } 92 93 static int 94 tls12_record_protection_unused(struct tls12_record_protection *rp) 95 { 96 return rp->aead_ctx == NULL && rp->cipher_ctx == NULL && 97 rp->hash_ctx == NULL && rp->mac_key == NULL; 98 } 99 100 static int 101 tls12_record_protection_eiv_len(struct tls12_record_protection *rp, 102 size_t *out_eiv_len) 103 { 104 int eiv_len; 105 106 *out_eiv_len = 0; 107 108 if (rp->cipher_ctx == NULL) 109 return 0; 110 111 eiv_len = 0; 112 if (EVP_CIPHER_CTX_mode(rp->cipher_ctx) == EVP_CIPH_CBC_MODE) 113 eiv_len = EVP_CIPHER_CTX_iv_length(rp->cipher_ctx); 114 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 115 return 0; 116 117 *out_eiv_len = eiv_len; 118 119 return 1; 120 } 121 122 static int 123 tls12_record_protection_block_size(struct tls12_record_protection *rp, 124 size_t *out_block_size) 125 { 126 int block_size; 127 128 *out_block_size = 0; 129 130 if (rp->cipher_ctx == NULL) 131 return 0; 132 133 block_size = EVP_CIPHER_CTX_block_size(rp->cipher_ctx); 134 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 135 return 0; 136 137 *out_block_size = block_size; 138 139 return 1; 140 } 141 142 static int 143 tls12_record_protection_mac_len(struct tls12_record_protection *rp, 144 size_t *out_mac_len) 145 { 146 int mac_len; 147 148 *out_mac_len = 0; 149 150 if (rp->hash_ctx == NULL) 151 return 0; 152 153 mac_len = EVP_MD_CTX_size(rp->hash_ctx); 154 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE) 155 return 0; 156 157 *out_mac_len = mac_len; 158 159 return 1; 160 } 161 162 struct tls12_record_layer { 163 uint16_t version; 164 int dtls; 165 166 uint8_t alert_desc; 167 168 const EVP_AEAD *aead; 169 const EVP_CIPHER *cipher; 170 const EVP_MD *handshake_hash; 171 const EVP_MD *mac_hash; 172 173 /* Pointers to active record protection (memory is not owned). */ 174 struct tls12_record_protection *read; 175 struct tls12_record_protection *write; 176 177 struct tls12_record_protection *read_current; 178 struct tls12_record_protection *write_current; 179 struct tls12_record_protection *write_previous; 180 }; 181 182 struct tls12_record_layer * 183 tls12_record_layer_new(void) 184 { 185 struct tls12_record_layer *rl; 186 187 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL) 188 goto err; 189 if ((rl->read_current = tls12_record_protection_new()) == NULL) 190 goto err; 191 if ((rl->write_current = tls12_record_protection_new()) == NULL) 192 goto err; 193 194 rl->read = rl->read_current; 195 rl->write = rl->write_current; 196 197 return rl; 198 199 err: 200 tls12_record_layer_free(rl); 201 202 return NULL; 203 } 204 205 void 206 tls12_record_layer_free(struct tls12_record_layer *rl) 207 { 208 if (rl == NULL) 209 return; 210 211 tls12_record_protection_free(rl->read_current); 212 tls12_record_protection_free(rl->write_current); 213 tls12_record_protection_free(rl->write_previous); 214 215 freezero(rl, sizeof(struct tls12_record_layer)); 216 } 217 218 void 219 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc) 220 { 221 *alert_desc = rl->alert_desc; 222 } 223 224 int 225 tls12_record_layer_write_overhead(struct tls12_record_layer *rl, 226 size_t *overhead) 227 { 228 size_t block_size, eiv_len, mac_len; 229 230 *overhead = 0; 231 232 if (rl->write->aead_ctx != NULL) { 233 *overhead = rl->write->aead_tag_len; 234 } else if (rl->write->cipher_ctx != NULL) { 235 eiv_len = 0; 236 if (rl->version != TLS1_VERSION) { 237 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 238 return 0; 239 } 240 if (!tls12_record_protection_block_size(rl->write, &block_size)) 241 return 0; 242 if (!tls12_record_protection_mac_len(rl->write, &mac_len)) 243 return 0; 244 245 *overhead = eiv_len + block_size + mac_len; 246 } 247 248 return 1; 249 } 250 251 int 252 tls12_record_layer_read_protected(struct tls12_record_layer *rl) 253 { 254 return tls12_record_protection_engaged(rl->read); 255 } 256 257 int 258 tls12_record_layer_write_protected(struct tls12_record_layer *rl) 259 { 260 return tls12_record_protection_engaged(rl->write); 261 } 262 263 void 264 tls12_record_layer_set_aead(struct tls12_record_layer *rl, const EVP_AEAD *aead) 265 { 266 rl->aead = aead; 267 } 268 269 void 270 tls12_record_layer_set_cipher_hash(struct tls12_record_layer *rl, 271 const EVP_CIPHER *cipher, const EVP_MD *handshake_hash, 272 const EVP_MD *mac_hash) 273 { 274 rl->cipher = cipher; 275 rl->handshake_hash = handshake_hash; 276 rl->mac_hash = mac_hash; 277 } 278 279 void 280 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version) 281 { 282 rl->version = version; 283 rl->dtls = ((version >> 8) == DTLS1_VERSION_MAJOR); 284 } 285 286 uint16_t 287 tls12_record_layer_write_epoch(struct tls12_record_layer *rl) 288 { 289 return rl->write->epoch; 290 } 291 292 int 293 tls12_record_layer_use_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 294 { 295 if (rl->write->epoch == epoch) 296 return 1; 297 298 if (rl->write_current->epoch == epoch) { 299 rl->write = rl->write_current; 300 return 1; 301 } 302 303 if (rl->write_previous != NULL && rl->write_previous->epoch == epoch) { 304 rl->write = rl->write_previous; 305 return 1; 306 } 307 308 return 0; 309 } 310 311 void 312 tls12_record_layer_write_epoch_done(struct tls12_record_layer *rl, uint16_t epoch) 313 { 314 if (rl->write_previous == NULL || rl->write_previous->epoch != epoch) 315 return; 316 317 rl->write = rl->write_current; 318 319 tls12_record_protection_free(rl->write_previous); 320 rl->write_previous = NULL; 321 } 322 323 void 324 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl) 325 { 326 tls12_record_protection_clear(rl->read); 327 } 328 329 void 330 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl) 331 { 332 tls12_record_protection_clear(rl->write); 333 334 tls12_record_protection_free(rl->write_previous); 335 rl->write_previous = NULL; 336 } 337 338 void 339 tls12_record_layer_read_cipher_hash(struct tls12_record_layer *rl, 340 EVP_CIPHER_CTX **cipher, EVP_MD_CTX **hash) 341 { 342 *cipher = rl->read->cipher_ctx; 343 *hash = rl->read->hash_ctx; 344 } 345 346 void 347 tls12_record_layer_reflect_seq_num(struct tls12_record_layer *rl) 348 { 349 memcpy(rl->write->seq_num, rl->read->seq_num, 350 sizeof(rl->write->seq_num)); 351 } 352 353 static const uint8_t tls12_max_seq_num[TLS12_RECORD_SEQ_NUM_LEN] = { 354 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 355 }; 356 357 int 358 tls12_record_layer_inc_seq_num(struct tls12_record_layer *rl, uint8_t *seq_num) 359 { 360 CBS max_seq_num; 361 int i; 362 363 /* 364 * RFC 5246 section 6.1 and RFC 6347 section 4.1 - both TLS and DTLS 365 * sequence numbers must not wrap. Note that for DTLS the first two 366 * bytes are used as an "epoch" and not part of the sequence number. 367 */ 368 CBS_init(&max_seq_num, seq_num, TLS12_RECORD_SEQ_NUM_LEN); 369 if (rl->dtls) { 370 if (!CBS_skip(&max_seq_num, 2)) 371 return 0; 372 } 373 if (CBS_mem_equal(&max_seq_num, tls12_max_seq_num, 374 CBS_len(&max_seq_num))) 375 return 0; 376 377 for (i = TLS12_RECORD_SEQ_NUM_LEN - 1; i >= 0; i--) { 378 if (++seq_num[i] != 0) 379 break; 380 } 381 382 return 1; 383 } 384 385 static int 386 tls12_record_layer_set_mac_key(struct tls12_record_protection *rp, 387 const uint8_t *mac_key, size_t mac_key_len) 388 { 389 freezero(rp->mac_key, rp->mac_key_len); 390 rp->mac_key = NULL; 391 rp->mac_key_len = 0; 392 393 if (mac_key == NULL || mac_key_len == 0) 394 return 1; 395 396 if ((rp->mac_key = calloc(1, mac_key_len)) == NULL) 397 return 0; 398 399 memcpy(rp->mac_key, mac_key, mac_key_len); 400 rp->mac_key_len = mac_key_len; 401 402 return 1; 403 } 404 405 static int 406 tls12_record_layer_ccs_aead(struct tls12_record_layer *rl, 407 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key, 408 CBS *iv) 409 { 410 size_t aead_nonce_len; 411 412 if (!tls12_record_protection_unused(rp)) 413 return 0; 414 415 if ((rp->aead_ctx = calloc(1, sizeof(*rp->aead_ctx))) == NULL) 416 return 0; 417 418 /* AES GCM cipher suites use variable nonce in record. */ 419 if (rl->aead == EVP_aead_aes_128_gcm() || 420 rl->aead == EVP_aead_aes_256_gcm()) 421 rp->aead_variable_nonce_in_record = 1; 422 423 /* ChaCha20 Poly1305 XORs the fixed and variable nonces. */ 424 if (rl->aead == EVP_aead_chacha20_poly1305()) 425 rp->aead_xor_nonces = 1; 426 427 if (!CBS_stow(iv, &rp->aead_fixed_nonce, &rp->aead_fixed_nonce_len)) 428 return 0; 429 430 rp->aead_tag_len = EVP_AEAD_max_overhead(rl->aead); 431 rp->aead_variable_nonce_len = 8; 432 433 aead_nonce_len = EVP_AEAD_nonce_length(rl->aead); 434 435 if (rp->aead_xor_nonces) { 436 /* Fixed nonce length must match, variable must not exceed. */ 437 if (rp->aead_fixed_nonce_len != aead_nonce_len) 438 return 0; 439 if (rp->aead_variable_nonce_len > aead_nonce_len) 440 return 0; 441 } else { 442 /* Concatenated nonce length must equal AEAD nonce length. */ 443 if (rp->aead_fixed_nonce_len + 444 rp->aead_variable_nonce_len != aead_nonce_len) 445 return 0; 446 } 447 448 if (!EVP_AEAD_CTX_init(rp->aead_ctx, rl->aead, CBS_data(key), 449 CBS_len(key), EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) 450 return 0; 451 452 return 1; 453 } 454 455 static int 456 tls12_record_layer_ccs_cipher(struct tls12_record_layer *rl, 457 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key, 458 CBS *iv) 459 { 460 EVP_PKEY *mac_pkey = NULL; 461 int gost_param_nid; 462 int mac_type; 463 int ret = 0; 464 465 if (!tls12_record_protection_unused(rp)) 466 goto err; 467 468 mac_type = EVP_PKEY_HMAC; 469 rp->stream_mac = 0; 470 471 if (CBS_len(iv) > INT_MAX || CBS_len(key) > INT_MAX) 472 goto err; 473 if (EVP_CIPHER_iv_length(rl->cipher) != CBS_len(iv)) 474 goto err; 475 if (EVP_CIPHER_key_length(rl->cipher) != CBS_len(key)) 476 goto err; 477 478 /* Special handling for GOST... */ 479 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 480 if (CBS_len(mac_key) != 32) 481 goto err; 482 mac_type = EVP_PKEY_GOSTIMIT; 483 rp->stream_mac = 1; 484 } else { 485 if (CBS_len(mac_key) > INT_MAX) 486 goto err; 487 if (EVP_MD_size(rl->mac_hash) != CBS_len(mac_key)) 488 goto err; 489 } 490 491 if ((rp->cipher_ctx = EVP_CIPHER_CTX_new()) == NULL) 492 goto err; 493 if ((rp->hash_ctx = EVP_MD_CTX_new()) == NULL) 494 goto err; 495 496 if (!tls12_record_layer_set_mac_key(rp, CBS_data(mac_key), 497 CBS_len(mac_key))) 498 goto err; 499 500 if ((mac_pkey = EVP_PKEY_new_mac_key(mac_type, NULL, CBS_data(mac_key), 501 CBS_len(mac_key))) == NULL) 502 goto err; 503 504 if (!EVP_CipherInit_ex(rp->cipher_ctx, rl->cipher, NULL, CBS_data(key), 505 CBS_data(iv), is_write)) 506 goto err; 507 508 if (EVP_DigestSignInit(rp->hash_ctx, NULL, rl->mac_hash, NULL, 509 mac_pkey) <= 0) 510 goto err; 511 512 /* More special handling for GOST... */ 513 if (EVP_CIPHER_type(rl->cipher) == NID_gost89_cnt) { 514 gost_param_nid = NID_id_tc26_gost_28147_param_Z; 515 if (EVP_MD_type(rl->handshake_hash) == NID_id_GostR3411_94) 516 gost_param_nid = NID_id_Gost28147_89_CryptoPro_A_ParamSet; 517 518 if (EVP_CIPHER_CTX_ctrl(rp->cipher_ctx, EVP_CTRL_GOST_SET_SBOX, 519 gost_param_nid, 0) <= 0) 520 goto err; 521 522 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 523 if (EVP_MD_CTX_ctrl(rp->hash_ctx, EVP_MD_CTRL_GOST_SET_SBOX, 524 gost_param_nid, 0) <= 0) 525 goto err; 526 } 527 } 528 529 ret = 1; 530 531 err: 532 EVP_PKEY_free(mac_pkey); 533 534 return ret; 535 } 536 537 static int 538 tls12_record_layer_change_cipher_state(struct tls12_record_layer *rl, 539 struct tls12_record_protection *rp, int is_write, CBS *mac_key, CBS *key, 540 CBS *iv) 541 { 542 if (rl->aead != NULL) 543 return tls12_record_layer_ccs_aead(rl, rp, is_write, mac_key, 544 key, iv); 545 546 return tls12_record_layer_ccs_cipher(rl, rp, is_write, mac_key, 547 key, iv); 548 } 549 550 int 551 tls12_record_layer_change_read_cipher_state(struct tls12_record_layer *rl, 552 CBS *mac_key, CBS *key, CBS *iv) 553 { 554 struct tls12_record_protection *read_new = NULL; 555 int ret = 0; 556 557 if ((read_new = tls12_record_protection_new()) == NULL) 558 goto err; 559 560 /* Read sequence number gets reset to zero. */ 561 562 if (!tls12_record_layer_change_cipher_state(rl, read_new, 0, 563 mac_key, key, iv)) 564 goto err; 565 566 tls12_record_protection_free(rl->read_current); 567 rl->read = rl->read_current = read_new; 568 read_new = NULL; 569 570 ret = 1; 571 572 err: 573 tls12_record_protection_free(read_new); 574 575 return ret; 576 } 577 578 int 579 tls12_record_layer_change_write_cipher_state(struct tls12_record_layer *rl, 580 CBS *mac_key, CBS *key, CBS *iv) 581 { 582 struct tls12_record_protection *write_new; 583 int ret = 0; 584 585 if ((write_new = tls12_record_protection_new()) == NULL) 586 goto err; 587 588 /* Write sequence number gets reset to zero. */ 589 590 /* DTLS epoch is incremented and is permitted to wrap. */ 591 if (rl->dtls) 592 write_new->epoch = rl->write_current->epoch + 1; 593 594 if (!tls12_record_layer_change_cipher_state(rl, write_new, 1, 595 mac_key, key, iv)) 596 goto err; 597 598 if (rl->dtls) { 599 tls12_record_protection_free(rl->write_previous); 600 rl->write_previous = rl->write_current; 601 rl->write_current = NULL; 602 } 603 tls12_record_protection_free(rl->write_current); 604 rl->write = rl->write_current = write_new; 605 write_new = NULL; 606 607 ret = 1; 608 609 err: 610 tls12_record_protection_free(write_new); 611 612 return ret; 613 } 614 615 static int 616 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb, 617 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len) 618 { 619 CBS seq; 620 621 CBS_init(&seq, seq_num, seq_num_len); 622 623 if (rl->dtls) { 624 if (!CBB_add_u16(cbb, epoch)) 625 return 0; 626 if (!CBS_skip(&seq, 2)) 627 return 0; 628 } 629 630 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq)); 631 } 632 633 static int 634 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl, 635 uint8_t content_type, uint16_t record_len, CBS *seq_num, uint8_t **out, 636 size_t *out_len) 637 { 638 CBB cbb; 639 640 *out = NULL; 641 *out_len = 0; 642 643 /* Build the pseudo-header used for MAC/AEAD. */ 644 if (!CBB_init(&cbb, 13)) 645 goto err; 646 647 if (!CBB_add_bytes(&cbb, CBS_data(seq_num), CBS_len(seq_num))) 648 goto err; 649 if (!CBB_add_u8(&cbb, content_type)) 650 goto err; 651 if (!CBB_add_u16(&cbb, rl->version)) 652 goto err; 653 if (!CBB_add_u16(&cbb, record_len)) 654 goto err; 655 656 if (!CBB_finish(&cbb, out, out_len)) 657 goto err; 658 659 return 1; 660 661 err: 662 CBB_cleanup(&cbb); 663 664 return 0; 665 } 666 667 static int 668 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb, 669 EVP_MD_CTX *hash_ctx, int stream_mac, CBS *seq_num, uint8_t content_type, 670 const uint8_t *content, size_t content_len, size_t *out_len) 671 { 672 EVP_MD_CTX *mac_ctx = NULL; 673 uint8_t *header = NULL; 674 size_t header_len = 0; 675 size_t mac_len; 676 uint8_t *mac; 677 int ret = 0; 678 679 if ((mac_ctx = EVP_MD_CTX_new()) == NULL) 680 goto err; 681 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx)) 682 goto err; 683 684 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 685 seq_num, &header, &header_len)) 686 goto err; 687 688 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0) 689 goto err; 690 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0) 691 goto err; 692 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0) 693 goto err; 694 if (!CBB_add_space(cbb, &mac, mac_len)) 695 goto err; 696 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0) 697 goto err; 698 if (mac_len == 0) 699 goto err; 700 701 if (stream_mac) { 702 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx)) 703 goto err; 704 } 705 706 *out_len = mac_len; 707 ret = 1; 708 709 err: 710 EVP_MD_CTX_free(mac_ctx); 711 freezero(header, header_len); 712 713 return ret; 714 } 715 716 static int 717 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb, 718 uint8_t content_type, CBS *seq_num, const uint8_t *content, 719 size_t content_len, size_t mac_len, size_t padding_len) 720 { 721 uint8_t *header = NULL; 722 size_t header_len = 0; 723 uint8_t *mac = NULL; 724 size_t out_mac_len = 0; 725 int ret = 0; 726 727 /* 728 * Must be constant time to avoid leaking details about CBC padding. 729 */ 730 731 if (!ssl3_cbc_record_digest_supported(rl->read->hash_ctx)) 732 goto err; 733 734 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 735 seq_num, &header, &header_len)) 736 goto err; 737 738 if (!CBB_add_space(cbb, &mac, mac_len)) 739 goto err; 740 if (!ssl3_cbc_digest_record(rl->read->hash_ctx, mac, &out_mac_len, header, 741 content, content_len + mac_len, content_len + mac_len + padding_len, 742 rl->read->mac_key, rl->read->mac_key_len)) 743 goto err; 744 if (mac_len != out_mac_len) 745 goto err; 746 747 ret = 1; 748 749 err: 750 freezero(header, header_len); 751 752 return ret; 753 } 754 755 static int 756 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb, 757 uint8_t content_type, CBS *seq_num, const uint8_t *content, 758 size_t content_len) 759 { 760 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 761 size_t out_len; 762 763 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 764 return 0; 765 766 return tls12_record_layer_mac(rl, cbb, rl->read->hash_ctx, 767 rl->read->stream_mac, seq_num, content_type, content, content_len, 768 &out_len); 769 } 770 771 static int 772 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb, 773 uint8_t content_type, CBS *seq_num, const uint8_t *content, 774 size_t content_len, size_t *out_len) 775 { 776 return tls12_record_layer_mac(rl, cbb, rl->write->hash_ctx, 777 rl->write->stream_mac, seq_num, content_type, content, content_len, 778 out_len); 779 } 780 781 static int 782 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl, 783 struct tls12_record_protection *rp, const uint8_t *seq_num, 784 uint8_t **out, size_t *out_len) 785 { 786 CBB cbb; 787 788 if (rp->aead_variable_nonce_len > SSL3_SEQUENCE_SIZE) 789 return 0; 790 791 /* Fixed nonce and variable nonce (sequence number) are concatenated. */ 792 if (!CBB_init(&cbb, 16)) 793 goto err; 794 if (!CBB_add_bytes(&cbb, rp->aead_fixed_nonce, 795 rp->aead_fixed_nonce_len)) 796 goto err; 797 if (!CBB_add_bytes(&cbb, seq_num, rp->aead_variable_nonce_len)) 798 goto err; 799 if (!CBB_finish(&cbb, out, out_len)) 800 goto err; 801 802 return 1; 803 804 err: 805 CBB_cleanup(&cbb); 806 807 return 0; 808 } 809 810 static int 811 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl, 812 struct tls12_record_protection *rp, const uint8_t *seq_num, 813 uint8_t **out, size_t *out_len) 814 { 815 uint8_t *nonce = NULL; 816 size_t nonce_len = 0; 817 uint8_t *pad; 818 CBB cbb; 819 int i; 820 821 if (rp->aead_variable_nonce_len > SSL3_SEQUENCE_SIZE) 822 return 0; 823 if (rp->aead_fixed_nonce_len < rp->aead_variable_nonce_len) 824 return 0; 825 826 /* 827 * Variable nonce (sequence number) is right padded, before the fixed 828 * nonce is XOR'd in. 829 */ 830 if (!CBB_init(&cbb, 16)) 831 goto err; 832 if (!CBB_add_space(&cbb, &pad, 833 rp->aead_fixed_nonce_len - rp->aead_variable_nonce_len)) 834 goto err; 835 if (!CBB_add_bytes(&cbb, seq_num, rp->aead_variable_nonce_len)) 836 goto err; 837 if (!CBB_finish(&cbb, &nonce, &nonce_len)) 838 goto err; 839 840 for (i = 0; i < rp->aead_fixed_nonce_len; i++) 841 nonce[i] ^= rp->aead_fixed_nonce[i]; 842 843 *out = nonce; 844 *out_len = nonce_len; 845 846 return 1; 847 848 err: 849 CBB_cleanup(&cbb); 850 freezero(nonce, nonce_len); 851 852 return 0; 853 } 854 855 static int 856 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl, 857 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 858 { 859 if (tls12_record_protection_engaged(rl->read)) 860 return 0; 861 862 /* XXX - decrypt/process in place for now. */ 863 *out = (uint8_t *)CBS_data(fragment); 864 *out_len = CBS_len(fragment); 865 866 return 1; 867 } 868 869 static int 870 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl, 871 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 872 size_t *out_len) 873 { 874 struct tls12_record_protection *rp = rl->read; 875 uint8_t *header = NULL, *nonce = NULL; 876 size_t header_len = 0, nonce_len = 0; 877 uint8_t *plain; 878 size_t plain_len; 879 CBS var_nonce; 880 int ret = 0; 881 882 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 883 if (rp->aead_xor_nonces) { 884 if (!tls12_record_layer_aead_xored_nonce(rl, rp, 885 CBS_data(seq_num), &nonce, &nonce_len)) 886 goto err; 887 } else if (rp->aead_variable_nonce_in_record) { 888 if (!CBS_get_bytes(fragment, &var_nonce, 889 rp->aead_variable_nonce_len)) 890 goto err; 891 if (!tls12_record_layer_aead_concat_nonce(rl, rp, 892 CBS_data(&var_nonce), &nonce, &nonce_len)) 893 goto err; 894 } else { 895 if (!tls12_record_layer_aead_concat_nonce(rl, rp, 896 CBS_data(seq_num), &nonce, &nonce_len)) 897 goto err; 898 } 899 900 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 901 if (CBS_len(fragment) < rp->aead_tag_len) { 902 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 903 goto err; 904 } 905 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 906 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 907 goto err; 908 } 909 910 /* XXX - decrypt/process in place for now. */ 911 plain = (uint8_t *)CBS_data(fragment); 912 plain_len = CBS_len(fragment) - rp->aead_tag_len; 913 914 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len, 915 seq_num, &header, &header_len)) 916 goto err; 917 918 if (!EVP_AEAD_CTX_open(rp->aead_ctx, plain, out_len, plain_len, 919 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment), 920 header, header_len)) { 921 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 922 goto err; 923 } 924 925 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) { 926 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 927 goto err; 928 } 929 930 if (*out_len != plain_len) 931 goto err; 932 933 *out = plain; 934 935 ret = 1; 936 937 err: 938 freezero(header, header_len); 939 freezero(nonce, nonce_len); 940 941 return ret; 942 } 943 944 static int 945 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl, 946 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 947 size_t *out_len) 948 { 949 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 950 SSL3_RECORD_INTERNAL rrec; 951 size_t block_size, eiv_len; 952 uint8_t *mac = NULL; 953 size_t mac_len = 0; 954 uint8_t *out_mac = NULL; 955 size_t out_mac_len = 0; 956 uint8_t *plain; 957 size_t plain_len; 958 size_t min_len; 959 CBB cbb_mac; 960 int ret = 0; 961 962 memset(&cbb_mac, 0, sizeof(cbb_mac)); 963 memset(&rrec, 0, sizeof(rrec)); 964 965 if (!tls12_record_protection_block_size(rl->read, &block_size)) 966 goto err; 967 968 /* Determine explicit IV length. */ 969 eiv_len = 0; 970 if (rl->version != TLS1_VERSION) { 971 if (!tls12_record_protection_eiv_len(rl->read, &eiv_len)) 972 goto err; 973 } 974 975 mac_len = 0; 976 if (rl->read->hash_ctx != NULL) { 977 if (!tls12_record_protection_mac_len(rl->read, &mac_len)) 978 goto err; 979 } 980 981 /* CBC has at least one padding byte. */ 982 min_len = eiv_len + mac_len; 983 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 984 min_len += 1; 985 986 if (CBS_len(fragment) < min_len) { 987 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 988 goto err; 989 } 990 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 991 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 992 goto err; 993 } 994 if (CBS_len(fragment) % block_size != 0) { 995 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 996 goto err; 997 } 998 999 /* XXX - decrypt/process in place for now. */ 1000 plain = (uint8_t *)CBS_data(fragment); 1001 plain_len = CBS_len(fragment); 1002 1003 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len)) 1004 goto err; 1005 1006 rrec.data = plain; 1007 rrec.input = plain; 1008 rrec.length = plain_len; 1009 1010 /* 1011 * We now have to remove padding, extract MAC, calculate MAC 1012 * and compare MAC in constant time. 1013 */ 1014 if (block_size > 1) 1015 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len); 1016 1017 if ((mac = calloc(1, mac_len)) == NULL) 1018 goto err; 1019 1020 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE)) 1021 goto err; 1022 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) { 1023 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length + 1024 rrec.padding_length); 1025 rrec.length -= mac_len; 1026 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type, 1027 seq_num, rrec.input, rrec.length, mac_len, 1028 rrec.padding_length)) 1029 goto err; 1030 } else { 1031 rrec.length -= mac_len; 1032 memcpy(mac, rrec.data + rrec.length, mac_len); 1033 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type, 1034 seq_num, rrec.input, rrec.length)) 1035 goto err; 1036 } 1037 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len)) 1038 goto err; 1039 if (mac_len != out_mac_len) 1040 goto err; 1041 1042 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) { 1043 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 1044 goto err; 1045 } 1046 1047 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) { 1048 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 1049 goto err; 1050 } 1051 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) { 1052 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 1053 goto err; 1054 } 1055 1056 *out = rrec.data; 1057 *out_len = rrec.length; 1058 1059 ret = 1; 1060 1061 err: 1062 CBB_cleanup(&cbb_mac); 1063 freezero(mac, mac_len); 1064 freezero(out_mac, out_mac_len); 1065 1066 return ret; 1067 } 1068 1069 int 1070 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf, 1071 size_t buf_len, uint8_t **out, size_t *out_len) 1072 { 1073 CBS cbs, fragment, seq_num; 1074 uint16_t version; 1075 uint8_t content_type; 1076 1077 CBS_init(&cbs, buf, buf_len); 1078 CBS_init(&seq_num, rl->read->seq_num, sizeof(rl->read->seq_num)); 1079 1080 if (!CBS_get_u8(&cbs, &content_type)) 1081 return 0; 1082 if (!CBS_get_u16(&cbs, &version)) 1083 return 0; 1084 if (rl->dtls) { 1085 /* 1086 * The DTLS sequence number is split into a 16 bit epoch and 1087 * 48 bit sequence number, however for the purposes of record 1088 * processing it is treated the same as a TLS 64 bit sequence 1089 * number. DTLS also uses explicit read sequence numbers, which 1090 * we need to extract from the DTLS record header. 1091 */ 1092 if (!CBS_get_bytes(&cbs, &seq_num, SSL3_SEQUENCE_SIZE)) 1093 return 0; 1094 if (!CBS_write_bytes(&seq_num, rl->read->seq_num, 1095 sizeof(rl->read->seq_num), NULL)) 1096 return 0; 1097 } 1098 if (!CBS_get_u16_length_prefixed(&cbs, &fragment)) 1099 return 0; 1100 1101 if (rl->read->aead_ctx != NULL) { 1102 if (!tls12_record_layer_open_record_protected_aead(rl, 1103 content_type, &seq_num, &fragment, out, out_len)) 1104 return 0; 1105 } else if (rl->read->cipher_ctx != NULL) { 1106 if (!tls12_record_layer_open_record_protected_cipher(rl, 1107 content_type, &seq_num, &fragment, out, out_len)) 1108 return 0; 1109 } else { 1110 if (!tls12_record_layer_open_record_plaintext(rl, 1111 content_type, &fragment, out, out_len)) 1112 return 0; 1113 } 1114 1115 if (!rl->dtls) { 1116 if (!tls12_record_layer_inc_seq_num(rl, rl->read->seq_num)) 1117 return 0; 1118 } 1119 1120 return 1; 1121 } 1122 1123 static int 1124 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl, 1125 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 1126 { 1127 if (tls12_record_protection_engaged(rl->write)) 1128 return 0; 1129 1130 return CBB_add_bytes(out, content, content_len); 1131 } 1132 1133 static int 1134 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl, 1135 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1136 size_t content_len, CBB *out) 1137 { 1138 struct tls12_record_protection *rp = rl->write; 1139 uint8_t *header = NULL, *nonce = NULL; 1140 size_t header_len = 0, nonce_len = 0; 1141 size_t enc_record_len, out_len; 1142 uint8_t *enc_data; 1143 int ret = 0; 1144 1145 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 1146 if (rp->aead_xor_nonces) { 1147 if (!tls12_record_layer_aead_xored_nonce(rl, rp, 1148 CBS_data(seq_num), &nonce, &nonce_len)) 1149 goto err; 1150 } else { 1151 if (!tls12_record_layer_aead_concat_nonce(rl, rp, 1152 CBS_data(seq_num), &nonce, &nonce_len)) 1153 goto err; 1154 } 1155 1156 if (rp->aead_variable_nonce_in_record) { 1157 /* XXX - length check? */ 1158 if (!CBB_add_bytes(out, CBS_data(seq_num), 1159 rp->aead_variable_nonce_len)) 1160 goto err; 1161 } 1162 1163 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 1164 seq_num, &header, &header_len)) 1165 goto err; 1166 1167 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 1168 enc_record_len = content_len + rp->aead_tag_len; 1169 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1170 goto err; 1171 if (!CBB_add_space(out, &enc_data, enc_record_len)) 1172 goto err; 1173 1174 if (!EVP_AEAD_CTX_seal(rp->aead_ctx, enc_data, &out_len, enc_record_len, 1175 nonce, nonce_len, content, content_len, header, header_len)) 1176 goto err; 1177 1178 if (out_len != enc_record_len) 1179 goto err; 1180 1181 ret = 1; 1182 1183 err: 1184 freezero(header, header_len); 1185 freezero(nonce, nonce_len); 1186 1187 return ret; 1188 } 1189 1190 static int 1191 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl, 1192 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1193 size_t content_len, CBB *out) 1194 { 1195 EVP_CIPHER_CTX *enc = rl->write->cipher_ctx; 1196 size_t block_size, eiv_len, mac_len, pad_len; 1197 uint8_t *enc_data, *eiv, *pad, pad_val; 1198 uint8_t *plain = NULL; 1199 size_t plain_len = 0; 1200 int ret = 0; 1201 CBB cbb; 1202 1203 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH)) 1204 goto err; 1205 1206 /* Add explicit IV if necessary. */ 1207 eiv_len = 0; 1208 if (rl->version != TLS1_VERSION) { 1209 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 1210 goto err; 1211 } 1212 if (eiv_len > 0) { 1213 if (!CBB_add_space(&cbb, &eiv, eiv_len)) 1214 goto err; 1215 arc4random_buf(eiv, eiv_len); 1216 } 1217 1218 if (!CBB_add_bytes(&cbb, content, content_len)) 1219 goto err; 1220 1221 mac_len = 0; 1222 if (rl->write->hash_ctx != NULL) { 1223 if (!tls12_record_layer_write_mac(rl, &cbb, content_type, 1224 seq_num, content, content_len, &mac_len)) 1225 goto err; 1226 } 1227 1228 plain_len = eiv_len + content_len + mac_len; 1229 1230 /* Add padding to block size, if necessary. */ 1231 if (!tls12_record_protection_block_size(rl->write, &block_size)) 1232 goto err; 1233 if (block_size > 1) { 1234 pad_len = block_size - (plain_len % block_size); 1235 pad_val = pad_len - 1; 1236 1237 if (pad_len > 255) 1238 goto err; 1239 if (!CBB_add_space(&cbb, &pad, pad_len)) 1240 goto err; 1241 memset(pad, pad_val, pad_len); 1242 } 1243 1244 if (!CBB_finish(&cbb, &plain, &plain_len)) 1245 goto err; 1246 1247 if (plain_len % block_size != 0) 1248 goto err; 1249 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1250 goto err; 1251 1252 if (!CBB_add_space(out, &enc_data, plain_len)) 1253 goto err; 1254 if (!EVP_Cipher(enc, enc_data, plain, plain_len)) 1255 goto err; 1256 1257 ret = 1; 1258 1259 err: 1260 CBB_cleanup(&cbb); 1261 freezero(plain, plain_len); 1262 1263 return ret; 1264 } 1265 1266 int 1267 tls12_record_layer_seal_record(struct tls12_record_layer *rl, 1268 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb) 1269 { 1270 uint8_t *seq_num_data = NULL; 1271 size_t seq_num_len = 0; 1272 CBB fragment, seq_num_cbb; 1273 CBS seq_num; 1274 int ret = 0; 1275 1276 /* 1277 * Construct the effective sequence number - this is used in both 1278 * the DTLS header and for MAC calculations. 1279 */ 1280 if (!CBB_init(&seq_num_cbb, SSL3_SEQUENCE_SIZE)) 1281 goto err; 1282 if (!tls12_record_layer_build_seq_num(rl, &seq_num_cbb, rl->write->epoch, 1283 rl->write->seq_num, sizeof(rl->write->seq_num))) 1284 goto err; 1285 if (!CBB_finish(&seq_num_cbb, &seq_num_data, &seq_num_len)) 1286 goto err; 1287 CBS_init(&seq_num, seq_num_data, seq_num_len); 1288 1289 if (!CBB_add_u8(cbb, content_type)) 1290 goto err; 1291 if (!CBB_add_u16(cbb, rl->version)) 1292 goto err; 1293 if (rl->dtls) { 1294 if (!CBB_add_bytes(cbb, CBS_data(&seq_num), CBS_len(&seq_num))) 1295 goto err; 1296 } 1297 if (!CBB_add_u16_length_prefixed(cbb, &fragment)) 1298 goto err; 1299 1300 if (rl->write->aead_ctx != NULL) { 1301 if (!tls12_record_layer_seal_record_protected_aead(rl, 1302 content_type, &seq_num, content, content_len, &fragment)) 1303 goto err; 1304 } else if (rl->write->cipher_ctx != NULL) { 1305 if (!tls12_record_layer_seal_record_protected_cipher(rl, 1306 content_type, &seq_num, content, content_len, &fragment)) 1307 goto err; 1308 } else { 1309 if (!tls12_record_layer_seal_record_plaintext(rl, 1310 content_type, content, content_len, &fragment)) 1311 goto err; 1312 } 1313 1314 if (!CBB_flush(cbb)) 1315 goto err; 1316 1317 if (!tls12_record_layer_inc_seq_num(rl, rl->write->seq_num)) 1318 goto err; 1319 1320 ret = 1; 1321 1322 err: 1323 CBB_cleanup(&seq_num_cbb); 1324 free(seq_num_data); 1325 1326 return ret; 1327 } 1328