1 /* $OpenBSD: tls12_record_layer.c,v 1.26 2021/04/19 17:26:39 jsing Exp $ */ 2 /* 3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <limits.h> 19 #include <stdlib.h> 20 21 #include <openssl/evp.h> 22 23 #include "ssl_locl.h" 24 25 #define TLS12_RECORD_SEQ_NUM_LEN 8 26 27 struct tls12_record_protection { 28 uint16_t epoch; 29 uint8_t seq_num[TLS12_RECORD_SEQ_NUM_LEN]; 30 31 SSL_AEAD_CTX *aead_ctx; 32 33 EVP_CIPHER_CTX *cipher_ctx; 34 EVP_MD_CTX *hash_ctx; 35 36 int stream_mac; 37 38 uint8_t *mac_key; 39 size_t mac_key_len; 40 }; 41 42 static struct tls12_record_protection * 43 tls12_record_protection_new(void) 44 { 45 return calloc(1, sizeof(struct tls12_record_protection)); 46 } 47 48 static void 49 tls12_record_protection_clear(struct tls12_record_protection *rp) 50 { 51 memset(rp->seq_num, 0, sizeof(rp->seq_num)); 52 53 if (rp->aead_ctx != NULL) { 54 EVP_AEAD_CTX_cleanup(&rp->aead_ctx->ctx); 55 freezero(rp->aead_ctx, sizeof(*rp->aead_ctx)); 56 rp->aead_ctx = NULL; 57 } 58 59 EVP_CIPHER_CTX_free(rp->cipher_ctx); 60 rp->cipher_ctx = NULL; 61 62 EVP_MD_CTX_free(rp->hash_ctx); 63 rp->hash_ctx = NULL; 64 65 freezero(rp->mac_key, rp->mac_key_len); 66 rp->mac_key = NULL; 67 rp->mac_key_len = 0; 68 } 69 70 static void 71 tls12_record_protection_free(struct tls12_record_protection *rp) 72 { 73 if (rp == NULL) 74 return; 75 76 tls12_record_protection_clear(rp); 77 78 freezero(rp, sizeof(struct tls12_record_protection)); 79 } 80 81 static int 82 tls12_record_protection_engaged(struct tls12_record_protection *rp) 83 { 84 return rp->aead_ctx != NULL || rp->cipher_ctx != NULL; 85 } 86 87 static int 88 tls12_record_protection_unused(struct tls12_record_protection *rp) 89 { 90 return rp->aead_ctx == NULL && rp->cipher_ctx == NULL && 91 rp->hash_ctx == NULL && rp->mac_key == NULL; 92 } 93 94 static int 95 tls12_record_protection_eiv_len(struct tls12_record_protection *rp, 96 size_t *out_eiv_len) 97 { 98 int eiv_len; 99 100 *out_eiv_len = 0; 101 102 if (rp->cipher_ctx == NULL) 103 return 0; 104 105 eiv_len = 0; 106 if (EVP_CIPHER_CTX_mode(rp->cipher_ctx) == EVP_CIPH_CBC_MODE) 107 eiv_len = EVP_CIPHER_CTX_iv_length(rp->cipher_ctx); 108 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 109 return 0; 110 111 *out_eiv_len = eiv_len; 112 113 return 1; 114 } 115 116 static int 117 tls12_record_protection_block_size(struct tls12_record_protection *rp, 118 size_t *out_block_size) 119 { 120 int block_size; 121 122 *out_block_size = 0; 123 124 if (rp->cipher_ctx == NULL) 125 return 0; 126 127 block_size = EVP_CIPHER_CTX_block_size(rp->cipher_ctx); 128 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 129 return 0; 130 131 *out_block_size = block_size; 132 133 return 1; 134 } 135 136 static int 137 tls12_record_protection_mac_len(struct tls12_record_protection *rp, 138 size_t *out_mac_len) 139 { 140 int mac_len; 141 142 *out_mac_len = 0; 143 144 if (rp->hash_ctx == NULL) 145 return 0; 146 147 mac_len = EVP_MD_CTX_size(rp->hash_ctx); 148 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE) 149 return 0; 150 151 *out_mac_len = mac_len; 152 153 return 1; 154 } 155 156 struct tls12_record_layer { 157 uint16_t version; 158 int dtls; 159 160 uint8_t alert_desc; 161 162 const EVP_AEAD *aead; 163 const EVP_CIPHER *cipher; 164 const EVP_MD *handshake_hash; 165 const EVP_MD *mac_hash; 166 167 /* Pointers to active record protection (memory is not owned). */ 168 struct tls12_record_protection *read; 169 struct tls12_record_protection *write; 170 171 struct tls12_record_protection *read_current; 172 struct tls12_record_protection *write_current; 173 struct tls12_record_protection *write_previous; 174 }; 175 176 struct tls12_record_layer * 177 tls12_record_layer_new(void) 178 { 179 struct tls12_record_layer *rl; 180 181 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL) 182 goto err; 183 if ((rl->read_current = tls12_record_protection_new()) == NULL) 184 goto err; 185 if ((rl->write_current = tls12_record_protection_new()) == NULL) 186 goto err; 187 188 rl->read = rl->read_current; 189 rl->write = rl->write_current; 190 191 return rl; 192 193 err: 194 tls12_record_layer_free(rl); 195 196 return NULL; 197 } 198 199 void 200 tls12_record_layer_free(struct tls12_record_layer *rl) 201 { 202 if (rl == NULL) 203 return; 204 205 tls12_record_protection_free(rl->read_current); 206 tls12_record_protection_free(rl->write_current); 207 tls12_record_protection_free(rl->write_previous); 208 209 freezero(rl, sizeof(struct tls12_record_layer)); 210 } 211 212 void 213 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc) 214 { 215 *alert_desc = rl->alert_desc; 216 } 217 218 int 219 tls12_record_layer_write_overhead(struct tls12_record_layer *rl, 220 size_t *overhead) 221 { 222 size_t block_size, eiv_len, mac_len; 223 224 *overhead = 0; 225 226 if (rl->write->aead_ctx != NULL) { 227 *overhead = rl->write->aead_ctx->tag_len; 228 } else if (rl->write->cipher_ctx != NULL) { 229 eiv_len = 0; 230 if (rl->version != TLS1_VERSION) { 231 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 232 return 0; 233 } 234 if (!tls12_record_protection_block_size(rl->write, &block_size)) 235 return 0; 236 if (!tls12_record_protection_mac_len(rl->write, &mac_len)) 237 return 0; 238 239 *overhead = eiv_len + block_size + mac_len; 240 } 241 242 return 1; 243 } 244 245 int 246 tls12_record_layer_read_protected(struct tls12_record_layer *rl) 247 { 248 return tls12_record_protection_engaged(rl->read); 249 } 250 251 int 252 tls12_record_layer_write_protected(struct tls12_record_layer *rl) 253 { 254 return tls12_record_protection_engaged(rl->write); 255 } 256 257 const EVP_AEAD * 258 tls12_record_layer_aead(struct tls12_record_layer *rl) 259 { 260 return rl->aead; 261 } 262 263 const EVP_CIPHER * 264 tls12_record_layer_cipher(struct tls12_record_layer *rl) 265 { 266 return rl->cipher; 267 } 268 269 void 270 tls12_record_layer_set_aead(struct tls12_record_layer *rl, const EVP_AEAD *aead) 271 { 272 rl->aead = aead; 273 } 274 275 void 276 tls12_record_layer_set_cipher_hash(struct tls12_record_layer *rl, 277 const EVP_CIPHER *cipher, const EVP_MD *handshake_hash, 278 const EVP_MD *mac_hash) 279 { 280 rl->cipher = cipher; 281 rl->handshake_hash = handshake_hash; 282 rl->mac_hash = mac_hash; 283 } 284 285 void 286 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version) 287 { 288 rl->version = version; 289 rl->dtls = ((version >> 8) == DTLS1_VERSION_MAJOR); 290 } 291 292 void 293 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 294 { 295 rl->write->epoch = epoch; 296 } 297 298 int 299 tls12_record_layer_use_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 300 { 301 if (rl->write->epoch == epoch) 302 return 1; 303 304 if (rl->write_current->epoch == epoch) { 305 rl->write = rl->write_current; 306 return 1; 307 } 308 309 if (rl->write_previous != NULL && rl->write_previous->epoch == epoch) { 310 rl->write = rl->write_previous; 311 return 1; 312 } 313 314 return 0; 315 } 316 317 void 318 tls12_record_layer_write_epoch_done(struct tls12_record_layer *rl, uint16_t epoch) 319 { 320 if (rl->write_previous == NULL || rl->write_previous->epoch != epoch) 321 return; 322 323 rl->write = rl->write_current; 324 325 tls12_record_protection_free(rl->write_previous); 326 rl->write_previous = NULL; 327 } 328 329 void 330 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl) 331 { 332 tls12_record_protection_clear(rl->read); 333 } 334 335 void 336 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl) 337 { 338 tls12_record_protection_clear(rl->write); 339 340 tls12_record_protection_free(rl->write_previous); 341 rl->write_previous = NULL; 342 } 343 344 void 345 tls12_record_layer_read_cipher_hash(struct tls12_record_layer *rl, 346 EVP_CIPHER_CTX **cipher, EVP_MD_CTX **hash) 347 { 348 *cipher = rl->read->cipher_ctx; 349 *hash = rl->read->hash_ctx; 350 } 351 352 void 353 tls12_record_layer_reflect_seq_num(struct tls12_record_layer *rl) 354 { 355 memcpy(rl->write->seq_num, rl->read->seq_num, 356 sizeof(rl->write->seq_num)); 357 } 358 359 static const uint8_t tls12_max_seq_num[TLS12_RECORD_SEQ_NUM_LEN] = { 360 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 361 }; 362 363 int 364 tls12_record_layer_inc_seq_num(struct tls12_record_layer *rl, uint8_t *seq_num) 365 { 366 CBS max_seq_num; 367 int i; 368 369 /* 370 * RFC 5246 section 6.1 and RFC 6347 section 4.1 - both TLS and DTLS 371 * sequence numbers must not wrap. Note that for DTLS the first two 372 * bytes are used as an "epoch" and not part of the sequence number. 373 */ 374 CBS_init(&max_seq_num, seq_num, TLS12_RECORD_SEQ_NUM_LEN); 375 if (rl->dtls) { 376 if (!CBS_skip(&max_seq_num, 2)) 377 return 0; 378 } 379 if (CBS_mem_equal(&max_seq_num, tls12_max_seq_num, 380 CBS_len(&max_seq_num))) 381 return 0; 382 383 for (i = TLS12_RECORD_SEQ_NUM_LEN - 1; i >= 0; i--) { 384 if (++seq_num[i] != 0) 385 break; 386 } 387 388 return 1; 389 } 390 391 static int 392 tls12_record_layer_set_mac_key(struct tls12_record_protection *rp, 393 const uint8_t *mac_key, size_t mac_key_len) 394 { 395 freezero(rp->mac_key, rp->mac_key_len); 396 rp->mac_key = NULL; 397 rp->mac_key_len = 0; 398 399 if (mac_key == NULL || mac_key_len == 0) 400 return 1; 401 402 if ((rp->mac_key = calloc(1, mac_key_len)) == NULL) 403 return 0; 404 405 memcpy(rp->mac_key, mac_key, mac_key_len); 406 rp->mac_key_len = mac_key_len; 407 408 return 1; 409 } 410 411 static int 412 tls12_record_layer_ccs_aead(struct tls12_record_layer *rl, 413 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 414 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 415 size_t iv_len) 416 { 417 size_t aead_nonce_len; 418 419 if (!tls12_record_protection_unused(rp)) 420 return 0; 421 422 if ((rp->aead_ctx = calloc(1, sizeof(*rp->aead_ctx))) == NULL) 423 return 0; 424 425 /* AES GCM cipher suites use variable nonce in record. */ 426 if (rl->aead == EVP_aead_aes_128_gcm() || 427 rl->aead == EVP_aead_aes_256_gcm()) 428 rp->aead_ctx->variable_nonce_in_record = 1; 429 430 /* ChaCha20 Poly1305 XORs the fixed and variable nonces. */ 431 if (rl->aead == EVP_aead_chacha20_poly1305()) 432 rp->aead_ctx->xor_fixed_nonce = 1; 433 434 if (iv_len > sizeof(rp->aead_ctx->fixed_nonce)) 435 return 0; 436 437 memcpy(rp->aead_ctx->fixed_nonce, iv, iv_len); 438 rp->aead_ctx->fixed_nonce_len = iv_len; 439 rp->aead_ctx->tag_len = EVP_AEAD_max_overhead(rl->aead); 440 rp->aead_ctx->variable_nonce_len = 8; 441 442 aead_nonce_len = EVP_AEAD_nonce_length(rl->aead); 443 444 if (rp->aead_ctx->xor_fixed_nonce) { 445 /* Fixed nonce length must match, variable must not exceed. */ 446 if (rp->aead_ctx->fixed_nonce_len != aead_nonce_len) 447 return 0; 448 if (rp->aead_ctx->variable_nonce_len > aead_nonce_len) 449 return 0; 450 } else { 451 /* Concatenated nonce length must equal AEAD nonce length. */ 452 if (rp->aead_ctx->fixed_nonce_len + 453 rp->aead_ctx->variable_nonce_len != aead_nonce_len) 454 return 0; 455 } 456 457 if (!EVP_AEAD_CTX_init(&rp->aead_ctx->ctx, rl->aead, key, key_len, 458 EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) 459 return 0; 460 461 return 1; 462 } 463 464 static int 465 tls12_record_layer_ccs_cipher(struct tls12_record_layer *rl, 466 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 467 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 468 size_t iv_len) 469 { 470 EVP_PKEY *mac_pkey = NULL; 471 int gost_param_nid; 472 int mac_type; 473 int ret = 0; 474 475 if (!tls12_record_protection_unused(rp)) 476 goto err; 477 478 mac_type = EVP_PKEY_HMAC; 479 rp->stream_mac = 0; 480 481 if (iv_len > INT_MAX || key_len > INT_MAX) 482 goto err; 483 if (EVP_CIPHER_iv_length(rl->cipher) != iv_len) 484 goto err; 485 if (EVP_CIPHER_key_length(rl->cipher) != key_len) 486 goto err; 487 488 /* Special handling for GOST... */ 489 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 490 if (mac_key_len != 32) 491 goto err; 492 mac_type = EVP_PKEY_GOSTIMIT; 493 rp->stream_mac = 1; 494 } else { 495 if (mac_key_len > INT_MAX) 496 goto err; 497 if (EVP_MD_size(rl->mac_hash) != mac_key_len) 498 goto err; 499 } 500 501 if ((rp->cipher_ctx = EVP_CIPHER_CTX_new()) == NULL) 502 goto err; 503 if ((rp->hash_ctx = EVP_MD_CTX_new()) == NULL) 504 goto err; 505 506 if (!tls12_record_layer_set_mac_key(rp, mac_key, mac_key_len)) 507 goto err; 508 509 if ((mac_pkey = EVP_PKEY_new_mac_key(mac_type, NULL, mac_key, 510 mac_key_len)) == NULL) 511 goto err; 512 513 if (!EVP_CipherInit_ex(rp->cipher_ctx, rl->cipher, NULL, key, iv, 514 is_write)) 515 goto err; 516 517 if (EVP_DigestSignInit(rp->hash_ctx, NULL, rl->mac_hash, NULL, 518 mac_pkey) <= 0) 519 goto err; 520 521 /* More special handling for GOST... */ 522 if (EVP_CIPHER_type(rl->cipher) == NID_gost89_cnt) { 523 gost_param_nid = NID_id_tc26_gost_28147_param_Z; 524 if (EVP_MD_type(rl->handshake_hash) == NID_id_GostR3411_94) 525 gost_param_nid = NID_id_Gost28147_89_CryptoPro_A_ParamSet; 526 527 if (EVP_CIPHER_CTX_ctrl(rp->cipher_ctx, EVP_CTRL_GOST_SET_SBOX, 528 gost_param_nid, 0) <= 0) 529 goto err; 530 531 if (EVP_MD_type(rl->mac_hash) == NID_id_Gost28147_89_MAC) { 532 if (EVP_MD_CTX_ctrl(rp->hash_ctx, EVP_MD_CTRL_GOST_SET_SBOX, 533 gost_param_nid, 0) <= 0) 534 goto err; 535 } 536 } 537 538 ret = 1; 539 540 err: 541 EVP_PKEY_free(mac_pkey); 542 543 return ret; 544 } 545 546 static int 547 tls12_record_layer_change_cipher_state(struct tls12_record_layer *rl, 548 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 549 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 550 size_t iv_len) 551 { 552 if (rl->aead != NULL) 553 return tls12_record_layer_ccs_aead(rl, rp, is_write, mac_key, 554 mac_key_len, key, key_len, iv, iv_len); 555 556 return tls12_record_layer_ccs_cipher(rl, rp, is_write, mac_key, 557 mac_key_len, key, key_len, iv, iv_len); 558 } 559 560 int 561 tls12_record_layer_change_read_cipher_state(struct tls12_record_layer *rl, 562 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 563 size_t key_len, const uint8_t *iv, size_t iv_len) 564 { 565 struct tls12_record_protection *read_new = NULL; 566 int ret = 0; 567 568 if ((read_new = tls12_record_protection_new()) == NULL) 569 goto err; 570 571 /* Read sequence number gets reset to zero. */ 572 573 if (!tls12_record_layer_change_cipher_state(rl, read_new, 0, 574 mac_key, mac_key_len, key, key_len, iv, iv_len)) 575 goto err; 576 577 tls12_record_protection_free(rl->read_current); 578 rl->read = rl->read_current = read_new; 579 read_new = NULL; 580 581 ret = 1; 582 583 err: 584 tls12_record_protection_free(read_new); 585 586 return ret; 587 } 588 589 int 590 tls12_record_layer_change_write_cipher_state(struct tls12_record_layer *rl, 591 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 592 size_t key_len, const uint8_t *iv, size_t iv_len) 593 { 594 struct tls12_record_protection *write_new; 595 int ret = 0; 596 597 if ((write_new = tls12_record_protection_new()) == NULL) 598 goto err; 599 600 /* Write sequence number gets reset to zero. */ 601 602 if (!tls12_record_layer_change_cipher_state(rl, write_new, 1, 603 mac_key, mac_key_len, key, key_len, iv, iv_len)) 604 goto err; 605 606 if (rl->dtls) { 607 tls12_record_protection_free(rl->write_previous); 608 rl->write_previous = rl->write_current; 609 rl->write_current = NULL; 610 } 611 tls12_record_protection_free(rl->write_current); 612 rl->write = rl->write_current = write_new; 613 write_new = NULL; 614 615 ret = 1; 616 617 err: 618 tls12_record_protection_free(write_new); 619 620 return ret; 621 } 622 623 static int 624 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb, 625 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len) 626 { 627 CBS seq; 628 629 CBS_init(&seq, seq_num, seq_num_len); 630 631 if (rl->dtls) { 632 if (!CBB_add_u16(cbb, epoch)) 633 return 0; 634 if (!CBS_skip(&seq, 2)) 635 return 0; 636 } 637 638 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq)); 639 } 640 641 static int 642 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl, 643 uint8_t content_type, uint16_t record_len, CBS *seq_num, uint8_t **out, 644 size_t *out_len) 645 { 646 CBB cbb; 647 648 *out = NULL; 649 *out_len = 0; 650 651 /* Build the pseudo-header used for MAC/AEAD. */ 652 if (!CBB_init(&cbb, 13)) 653 goto err; 654 655 if (!CBB_add_bytes(&cbb, CBS_data(seq_num), CBS_len(seq_num))) 656 goto err; 657 if (!CBB_add_u8(&cbb, content_type)) 658 goto err; 659 if (!CBB_add_u16(&cbb, rl->version)) 660 goto err; 661 if (!CBB_add_u16(&cbb, record_len)) 662 goto err; 663 664 if (!CBB_finish(&cbb, out, out_len)) 665 goto err; 666 667 return 1; 668 669 err: 670 CBB_cleanup(&cbb); 671 672 return 0; 673 } 674 675 static int 676 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb, 677 EVP_MD_CTX *hash_ctx, int stream_mac, CBS *seq_num, uint8_t content_type, 678 const uint8_t *content, size_t content_len, size_t *out_len) 679 { 680 EVP_MD_CTX *mac_ctx = NULL; 681 uint8_t *header = NULL; 682 size_t header_len = 0; 683 size_t mac_len; 684 uint8_t *mac; 685 int ret = 0; 686 687 if ((mac_ctx = EVP_MD_CTX_new()) == NULL) 688 goto err; 689 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx)) 690 goto err; 691 692 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 693 seq_num, &header, &header_len)) 694 goto err; 695 696 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0) 697 goto err; 698 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0) 699 goto err; 700 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0) 701 goto err; 702 if (!CBB_add_space(cbb, &mac, mac_len)) 703 goto err; 704 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0) 705 goto err; 706 if (mac_len == 0) 707 goto err; 708 709 if (stream_mac) { 710 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx)) 711 goto err; 712 } 713 714 *out_len = mac_len; 715 ret = 1; 716 717 err: 718 EVP_MD_CTX_free(mac_ctx); 719 freezero(header, header_len); 720 721 return ret; 722 } 723 724 static int 725 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb, 726 uint8_t content_type, CBS *seq_num, const uint8_t *content, 727 size_t content_len, size_t mac_len, size_t padding_len) 728 { 729 uint8_t *header = NULL; 730 size_t header_len = 0; 731 uint8_t *mac = NULL; 732 size_t out_mac_len = 0; 733 int ret = 0; 734 735 /* 736 * Must be constant time to avoid leaking details about CBC padding. 737 */ 738 739 if (!ssl3_cbc_record_digest_supported(rl->read->hash_ctx)) 740 goto err; 741 742 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 743 seq_num, &header, &header_len)) 744 goto err; 745 746 if (!CBB_add_space(cbb, &mac, mac_len)) 747 goto err; 748 if (!ssl3_cbc_digest_record(rl->read->hash_ctx, mac, &out_mac_len, header, 749 content, content_len + mac_len, content_len + mac_len + padding_len, 750 rl->read->mac_key, rl->read->mac_key_len)) 751 goto err; 752 if (mac_len != out_mac_len) 753 goto err; 754 755 ret = 1; 756 757 err: 758 freezero(header, header_len); 759 760 return ret; 761 } 762 763 static int 764 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb, 765 uint8_t content_type, CBS *seq_num, const uint8_t *content, 766 size_t content_len) 767 { 768 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 769 size_t out_len; 770 771 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 772 return 0; 773 774 return tls12_record_layer_mac(rl, cbb, rl->read->hash_ctx, 775 rl->read->stream_mac, seq_num, content_type, content, content_len, 776 &out_len); 777 } 778 779 static int 780 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb, 781 uint8_t content_type, CBS *seq_num, const uint8_t *content, 782 size_t content_len, size_t *out_len) 783 { 784 return tls12_record_layer_mac(rl, cbb, rl->write->hash_ctx, 785 rl->write->stream_mac, seq_num, content_type, content, content_len, 786 out_len); 787 } 788 789 static int 790 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl, 791 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 792 uint8_t **out, size_t *out_len) 793 { 794 CBB cbb; 795 796 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 797 return 0; 798 799 /* Fixed nonce and variable nonce (sequence number) are concatenated. */ 800 if (!CBB_init(&cbb, 16)) 801 goto err; 802 if (!CBB_add_bytes(&cbb, aead->fixed_nonce, 803 aead->fixed_nonce_len)) 804 goto err; 805 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 806 goto err; 807 if (!CBB_finish(&cbb, out, out_len)) 808 goto err; 809 810 return 1; 811 812 err: 813 CBB_cleanup(&cbb); 814 815 return 0; 816 } 817 818 static int 819 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl, 820 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 821 uint8_t **out, size_t *out_len) 822 { 823 uint8_t *nonce = NULL; 824 size_t nonce_len = 0; 825 uint8_t *pad; 826 CBB cbb; 827 int i; 828 829 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 830 return 0; 831 if (aead->fixed_nonce_len < aead->variable_nonce_len) 832 return 0; 833 834 /* 835 * Variable nonce (sequence number) is right padded, before the fixed 836 * nonce is XOR'd in. 837 */ 838 if (!CBB_init(&cbb, 16)) 839 goto err; 840 if (!CBB_add_space(&cbb, &pad, 841 aead->fixed_nonce_len - aead->variable_nonce_len)) 842 goto err; 843 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 844 goto err; 845 if (!CBB_finish(&cbb, &nonce, &nonce_len)) 846 goto err; 847 848 for (i = 0; i < aead->fixed_nonce_len; i++) 849 nonce[i] ^= aead->fixed_nonce[i]; 850 851 *out = nonce; 852 *out_len = nonce_len; 853 854 return 1; 855 856 err: 857 CBB_cleanup(&cbb); 858 freezero(nonce, nonce_len); 859 860 return 0; 861 } 862 863 static int 864 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl, 865 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 866 { 867 if (tls12_record_protection_engaged(rl->read)) 868 return 0; 869 870 /* XXX - decrypt/process in place for now. */ 871 *out = (uint8_t *)CBS_data(fragment); 872 *out_len = CBS_len(fragment); 873 874 return 1; 875 } 876 877 static int 878 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl, 879 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 880 size_t *out_len) 881 { 882 const SSL_AEAD_CTX *aead = rl->read->aead_ctx; 883 uint8_t *header = NULL, *nonce = NULL; 884 size_t header_len = 0, nonce_len = 0; 885 uint8_t *plain; 886 size_t plain_len; 887 CBS var_nonce; 888 int ret = 0; 889 890 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 891 if (aead->xor_fixed_nonce) { 892 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 893 CBS_data(seq_num), &nonce, &nonce_len)) 894 goto err; 895 } else if (aead->variable_nonce_in_record) { 896 if (!CBS_get_bytes(fragment, &var_nonce, 897 aead->variable_nonce_len)) 898 goto err; 899 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 900 CBS_data(&var_nonce), &nonce, &nonce_len)) 901 goto err; 902 } else { 903 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 904 CBS_data(seq_num), &nonce, &nonce_len)) 905 goto err; 906 } 907 908 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 909 if (CBS_len(fragment) < aead->tag_len) { 910 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 911 goto err; 912 } 913 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 914 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 915 goto err; 916 } 917 918 /* XXX - decrypt/process in place for now. */ 919 plain = (uint8_t *)CBS_data(fragment); 920 plain_len = CBS_len(fragment) - aead->tag_len; 921 922 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len, 923 seq_num, &header, &header_len)) 924 goto err; 925 926 if (!EVP_AEAD_CTX_open(&aead->ctx, plain, out_len, plain_len, 927 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment), 928 header, header_len)) { 929 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 930 goto err; 931 } 932 933 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) { 934 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 935 goto err; 936 } 937 938 if (*out_len != plain_len) 939 goto err; 940 941 *out = plain; 942 943 ret = 1; 944 945 err: 946 freezero(header, header_len); 947 freezero(nonce, nonce_len); 948 949 return ret; 950 } 951 952 static int 953 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl, 954 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 955 size_t *out_len) 956 { 957 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 958 SSL3_RECORD_INTERNAL rrec; 959 size_t block_size, eiv_len; 960 uint8_t *mac = NULL; 961 size_t mac_len = 0; 962 uint8_t *out_mac = NULL; 963 size_t out_mac_len = 0; 964 uint8_t *plain; 965 size_t plain_len; 966 size_t min_len; 967 CBB cbb_mac; 968 int ret = 0; 969 970 memset(&cbb_mac, 0, sizeof(cbb_mac)); 971 memset(&rrec, 0, sizeof(rrec)); 972 973 if (!tls12_record_protection_block_size(rl->read, &block_size)) 974 goto err; 975 976 /* Determine explicit IV length. */ 977 eiv_len = 0; 978 if (rl->version != TLS1_VERSION) { 979 if (!tls12_record_protection_eiv_len(rl->read, &eiv_len)) 980 goto err; 981 } 982 983 mac_len = 0; 984 if (rl->read->hash_ctx != NULL) { 985 if (!tls12_record_protection_mac_len(rl->read, &mac_len)) 986 goto err; 987 } 988 989 /* CBC has at least one padding byte. */ 990 min_len = eiv_len + mac_len; 991 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 992 min_len += 1; 993 994 if (CBS_len(fragment) < min_len) { 995 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 996 goto err; 997 } 998 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 999 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 1000 goto err; 1001 } 1002 if (CBS_len(fragment) % block_size != 0) { 1003 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 1004 goto err; 1005 } 1006 1007 /* XXX - decrypt/process in place for now. */ 1008 plain = (uint8_t *)CBS_data(fragment); 1009 plain_len = CBS_len(fragment); 1010 1011 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len)) 1012 goto err; 1013 1014 rrec.data = plain; 1015 rrec.input = plain; 1016 rrec.length = plain_len; 1017 1018 /* 1019 * We now have to remove padding, extract MAC, calculate MAC 1020 * and compare MAC in constant time. 1021 */ 1022 if (block_size > 1) 1023 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len); 1024 1025 if ((mac = calloc(1, mac_len)) == NULL) 1026 goto err; 1027 1028 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE)) 1029 goto err; 1030 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) { 1031 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length + 1032 rrec.padding_length); 1033 rrec.length -= mac_len; 1034 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type, 1035 seq_num, rrec.input, rrec.length, mac_len, 1036 rrec.padding_length)) 1037 goto err; 1038 } else { 1039 rrec.length -= mac_len; 1040 memcpy(mac, rrec.data + rrec.length, mac_len); 1041 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type, 1042 seq_num, rrec.input, rrec.length)) 1043 goto err; 1044 } 1045 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len)) 1046 goto err; 1047 if (mac_len != out_mac_len) 1048 goto err; 1049 1050 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) { 1051 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 1052 goto err; 1053 } 1054 1055 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) { 1056 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 1057 goto err; 1058 } 1059 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) { 1060 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 1061 goto err; 1062 } 1063 1064 *out = rrec.data; 1065 *out_len = rrec.length; 1066 1067 ret = 1; 1068 1069 err: 1070 CBB_cleanup(&cbb_mac); 1071 freezero(mac, mac_len); 1072 freezero(out_mac, out_mac_len); 1073 1074 return ret; 1075 } 1076 1077 int 1078 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf, 1079 size_t buf_len, uint8_t **out, size_t *out_len) 1080 { 1081 CBS cbs, fragment, seq_num; 1082 uint16_t version; 1083 uint8_t content_type; 1084 1085 CBS_init(&cbs, buf, buf_len); 1086 CBS_init(&seq_num, rl->read->seq_num, sizeof(rl->read->seq_num)); 1087 1088 if (!CBS_get_u8(&cbs, &content_type)) 1089 return 0; 1090 if (!CBS_get_u16(&cbs, &version)) 1091 return 0; 1092 if (rl->dtls) { 1093 /* 1094 * The DTLS sequence number is split into a 16 bit epoch and 1095 * 48 bit sequence number, however for the purposes of record 1096 * processing it is treated the same as a TLS 64 bit sequence 1097 * number. DTLS also uses explicit read sequence numbers, which 1098 * we need to extract from the DTLS record header. 1099 */ 1100 if (!CBS_get_bytes(&cbs, &seq_num, SSL3_SEQUENCE_SIZE)) 1101 return 0; 1102 if (!CBS_write_bytes(&seq_num, rl->read->seq_num, 1103 sizeof(rl->read->seq_num), NULL)) 1104 return 0; 1105 } 1106 if (!CBS_get_u16_length_prefixed(&cbs, &fragment)) 1107 return 0; 1108 1109 if (rl->read->aead_ctx != NULL) { 1110 if (!tls12_record_layer_open_record_protected_aead(rl, 1111 content_type, &seq_num, &fragment, out, out_len)) 1112 return 0; 1113 } else if (rl->read->cipher_ctx != NULL) { 1114 if (!tls12_record_layer_open_record_protected_cipher(rl, 1115 content_type, &seq_num, &fragment, out, out_len)) 1116 return 0; 1117 } else { 1118 if (!tls12_record_layer_open_record_plaintext(rl, 1119 content_type, &fragment, out, out_len)) 1120 return 0; 1121 } 1122 1123 if (!rl->dtls) { 1124 if (!tls12_record_layer_inc_seq_num(rl, rl->read->seq_num)) 1125 return 0; 1126 } 1127 1128 return 1; 1129 } 1130 1131 static int 1132 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl, 1133 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 1134 { 1135 if (tls12_record_protection_engaged(rl->write)) 1136 return 0; 1137 1138 return CBB_add_bytes(out, content, content_len); 1139 } 1140 1141 static int 1142 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl, 1143 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1144 size_t content_len, CBB *out) 1145 { 1146 const SSL_AEAD_CTX *aead = rl->write->aead_ctx; 1147 uint8_t *header = NULL, *nonce = NULL; 1148 size_t header_len = 0, nonce_len = 0; 1149 size_t enc_record_len, out_len; 1150 uint8_t *enc_data; 1151 int ret = 0; 1152 1153 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 1154 if (aead->xor_fixed_nonce) { 1155 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 1156 CBS_data(seq_num), &nonce, &nonce_len)) 1157 goto err; 1158 } else { 1159 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 1160 CBS_data(seq_num), &nonce, &nonce_len)) 1161 goto err; 1162 } 1163 1164 if (aead->variable_nonce_in_record) { 1165 /* XXX - length check? */ 1166 if (!CBB_add_bytes(out, CBS_data(seq_num), 1167 aead->variable_nonce_len)) 1168 goto err; 1169 } 1170 1171 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 1172 seq_num, &header, &header_len)) 1173 goto err; 1174 1175 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 1176 enc_record_len = content_len + aead->tag_len; 1177 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1178 goto err; 1179 if (!CBB_add_space(out, &enc_data, enc_record_len)) 1180 goto err; 1181 1182 if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len, 1183 nonce, nonce_len, content, content_len, header, header_len)) 1184 goto err; 1185 1186 if (out_len != enc_record_len) 1187 goto err; 1188 1189 ret = 1; 1190 1191 err: 1192 freezero(header, header_len); 1193 freezero(nonce, nonce_len); 1194 1195 return ret; 1196 } 1197 1198 static int 1199 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl, 1200 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1201 size_t content_len, CBB *out) 1202 { 1203 EVP_CIPHER_CTX *enc = rl->write->cipher_ctx; 1204 size_t block_size, eiv_len, mac_len, pad_len; 1205 uint8_t *enc_data, *eiv, *pad, pad_val; 1206 uint8_t *plain = NULL; 1207 size_t plain_len = 0; 1208 int ret = 0; 1209 CBB cbb; 1210 1211 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH)) 1212 goto err; 1213 1214 /* Add explicit IV if necessary. */ 1215 eiv_len = 0; 1216 if (rl->version != TLS1_VERSION) { 1217 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 1218 goto err; 1219 } 1220 if (eiv_len > 0) { 1221 if (!CBB_add_space(&cbb, &eiv, eiv_len)) 1222 goto err; 1223 arc4random_buf(eiv, eiv_len); 1224 } 1225 1226 if (!CBB_add_bytes(&cbb, content, content_len)) 1227 goto err; 1228 1229 mac_len = 0; 1230 if (rl->write->hash_ctx != NULL) { 1231 if (!tls12_record_layer_write_mac(rl, &cbb, content_type, 1232 seq_num, content, content_len, &mac_len)) 1233 goto err; 1234 } 1235 1236 plain_len = eiv_len + content_len + mac_len; 1237 1238 /* Add padding to block size, if necessary. */ 1239 if (!tls12_record_protection_block_size(rl->write, &block_size)) 1240 goto err; 1241 if (block_size > 1) { 1242 pad_len = block_size - (plain_len % block_size); 1243 pad_val = pad_len - 1; 1244 1245 if (pad_len > 255) 1246 goto err; 1247 if (!CBB_add_space(&cbb, &pad, pad_len)) 1248 goto err; 1249 memset(pad, pad_val, pad_len); 1250 } 1251 1252 if (!CBB_finish(&cbb, &plain, &plain_len)) 1253 goto err; 1254 1255 if (plain_len % block_size != 0) 1256 goto err; 1257 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1258 goto err; 1259 1260 if (!CBB_add_space(out, &enc_data, plain_len)) 1261 goto err; 1262 if (!EVP_Cipher(enc, enc_data, plain, plain_len)) 1263 goto err; 1264 1265 ret = 1; 1266 1267 err: 1268 CBB_cleanup(&cbb); 1269 freezero(plain, plain_len); 1270 1271 return ret; 1272 } 1273 1274 int 1275 tls12_record_layer_seal_record(struct tls12_record_layer *rl, 1276 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb) 1277 { 1278 uint8_t *seq_num_data = NULL; 1279 size_t seq_num_len = 0; 1280 CBB fragment, seq_num_cbb; 1281 CBS seq_num; 1282 int ret = 0; 1283 1284 /* 1285 * Construct the effective sequence number - this is used in both 1286 * the DTLS header and for MAC calculations. 1287 */ 1288 if (!CBB_init(&seq_num_cbb, SSL3_SEQUENCE_SIZE)) 1289 goto err; 1290 if (!tls12_record_layer_build_seq_num(rl, &seq_num_cbb, rl->write->epoch, 1291 rl->write->seq_num, sizeof(rl->write->seq_num))) 1292 goto err; 1293 if (!CBB_finish(&seq_num_cbb, &seq_num_data, &seq_num_len)) 1294 goto err; 1295 CBS_init(&seq_num, seq_num_data, seq_num_len); 1296 1297 if (!CBB_add_u8(cbb, content_type)) 1298 goto err; 1299 if (!CBB_add_u16(cbb, rl->version)) 1300 goto err; 1301 if (rl->dtls) { 1302 if (!CBB_add_bytes(cbb, CBS_data(&seq_num), CBS_len(&seq_num))) 1303 goto err; 1304 } 1305 if (!CBB_add_u16_length_prefixed(cbb, &fragment)) 1306 goto err; 1307 1308 if (rl->write->aead_ctx != NULL) { 1309 if (!tls12_record_layer_seal_record_protected_aead(rl, 1310 content_type, &seq_num, content, content_len, &fragment)) 1311 goto err; 1312 } else if (rl->write->cipher_ctx != NULL) { 1313 if (!tls12_record_layer_seal_record_protected_cipher(rl, 1314 content_type, &seq_num, content, content_len, &fragment)) 1315 goto err; 1316 } else { 1317 if (!tls12_record_layer_seal_record_plaintext(rl, 1318 content_type, content, content_len, &fragment)) 1319 goto err; 1320 } 1321 1322 if (!CBB_flush(cbb)) 1323 goto err; 1324 1325 if (!tls12_record_layer_inc_seq_num(rl, rl->write->seq_num)) 1326 goto err; 1327 1328 ret = 1; 1329 1330 err: 1331 CBB_cleanup(&seq_num_cbb); 1332 free(seq_num_data); 1333 1334 return ret; 1335 } 1336