1 /* $OpenBSD: tls12_record_layer.c,v 1.17 2021/01/28 18:32:46 jsing Exp $ */ 2 /* 3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <limits.h> 19 #include <stdlib.h> 20 21 #include <openssl/evp.h> 22 23 #include "ssl_locl.h" 24 25 struct tls12_record_protection { 26 uint16_t epoch; 27 uint8_t seq_num[SSL3_SEQUENCE_SIZE]; 28 29 SSL_AEAD_CTX *aead_ctx; 30 31 int stream_mac; 32 33 uint8_t *mac_key; 34 size_t mac_key_len; 35 36 /* 37 * XXX - for now these are just pointers to externally managed 38 * structs/memory. These should eventually be owned by the record layer. 39 */ 40 EVP_CIPHER_CTX *cipher_ctx; 41 EVP_MD_CTX *hash_ctx; 42 }; 43 44 static struct tls12_record_protection * 45 tls12_record_protection_new(void) 46 { 47 return calloc(1, sizeof(struct tls12_record_protection)); 48 } 49 50 static void 51 tls12_record_protection_clear(struct tls12_record_protection *rp) 52 { 53 memset(rp->seq_num, 0, sizeof(rp->seq_num)); 54 55 if (rp->aead_ctx != NULL) { 56 EVP_AEAD_CTX_cleanup(&rp->aead_ctx->ctx); 57 freezero(rp->aead_ctx, sizeof(*rp->aead_ctx)); 58 rp->aead_ctx = NULL; 59 } 60 61 freezero(rp->mac_key, rp->mac_key_len); 62 rp->mac_key = NULL; 63 rp->mac_key_len = 0; 64 } 65 66 static void 67 tls12_record_protection_free(struct tls12_record_protection *rp) 68 { 69 if (rp == NULL) 70 return; 71 72 tls12_record_protection_clear(rp); 73 74 freezero(rp, sizeof(struct tls12_record_protection)); 75 } 76 77 static int 78 tls12_record_protection_engaged(struct tls12_record_protection *rp) 79 { 80 return rp->aead_ctx != NULL || rp->cipher_ctx != NULL; 81 } 82 83 static int 84 tls12_record_protection_eiv_len(struct tls12_record_protection *rp, 85 size_t *out_eiv_len) 86 { 87 int eiv_len; 88 89 *out_eiv_len = 0; 90 91 if (rp->cipher_ctx == NULL) 92 return 0; 93 94 eiv_len = 0; 95 if (EVP_CIPHER_CTX_mode(rp->cipher_ctx) == EVP_CIPH_CBC_MODE) 96 eiv_len = EVP_CIPHER_CTX_iv_length(rp->cipher_ctx); 97 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 98 return 0; 99 100 *out_eiv_len = eiv_len; 101 102 return 1; 103 } 104 105 static int 106 tls12_record_protection_block_size(struct tls12_record_protection *rp, 107 size_t *out_block_size) 108 { 109 int block_size; 110 111 *out_block_size = 0; 112 113 if (rp->cipher_ctx == NULL) 114 return 0; 115 116 block_size = EVP_CIPHER_CTX_block_size(rp->cipher_ctx); 117 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 118 return 0; 119 120 *out_block_size = block_size; 121 122 return 1; 123 } 124 125 static int 126 tls12_record_protection_mac_len(struct tls12_record_protection *rp, 127 size_t *out_mac_len) 128 { 129 int mac_len; 130 131 *out_mac_len = 0; 132 133 if (rp->hash_ctx == NULL) 134 return 0; 135 136 mac_len = EVP_MD_CTX_size(rp->hash_ctx); 137 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE) 138 return 0; 139 140 *out_mac_len = mac_len; 141 142 return 1; 143 } 144 145 struct tls12_record_layer { 146 uint16_t version; 147 int dtls; 148 149 uint8_t alert_desc; 150 151 const EVP_AEAD *aead; 152 153 /* Pointers to active record protection (memory is not owned). */ 154 struct tls12_record_protection *read; 155 struct tls12_record_protection *write; 156 157 struct tls12_record_protection *read_current; 158 struct tls12_record_protection *write_current; 159 struct tls12_record_protection *write_previous; 160 }; 161 162 struct tls12_record_layer * 163 tls12_record_layer_new(void) 164 { 165 struct tls12_record_layer *rl; 166 167 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL) 168 goto err; 169 if ((rl->read_current = tls12_record_protection_new()) == NULL) 170 goto err; 171 if ((rl->write_current = tls12_record_protection_new()) == NULL) 172 goto err; 173 174 rl->read = rl->read_current; 175 rl->write = rl->write_current; 176 177 return rl; 178 179 err: 180 tls12_record_layer_free(rl); 181 182 return NULL; 183 } 184 185 void 186 tls12_record_layer_free(struct tls12_record_layer *rl) 187 { 188 if (rl == NULL) 189 return; 190 191 tls12_record_protection_free(rl->read_current); 192 tls12_record_protection_free(rl->write_current); 193 tls12_record_protection_free(rl->write_previous); 194 195 freezero(rl, sizeof(struct tls12_record_layer)); 196 } 197 198 void 199 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc) 200 { 201 *alert_desc = rl->alert_desc; 202 } 203 204 int 205 tls12_record_layer_write_overhead(struct tls12_record_layer *rl, 206 size_t *overhead) 207 { 208 size_t block_size, eiv_len, mac_len; 209 210 *overhead = 0; 211 212 if (rl->write->aead_ctx != NULL) { 213 *overhead = rl->write->aead_ctx->tag_len; 214 } else if (rl->write->cipher_ctx != NULL) { 215 eiv_len = 0; 216 if (rl->version != TLS1_VERSION) { 217 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 218 return 0; 219 } 220 if (!tls12_record_protection_block_size(rl->write, &block_size)) 221 return 0; 222 if (!tls12_record_protection_mac_len(rl->write, &mac_len)) 223 return 0; 224 225 *overhead = eiv_len + block_size + mac_len; 226 } 227 228 return 1; 229 } 230 231 int 232 tls12_record_layer_read_protected(struct tls12_record_layer *rl) 233 { 234 return tls12_record_protection_engaged(rl->read); 235 } 236 237 int 238 tls12_record_layer_write_protected(struct tls12_record_layer *rl) 239 { 240 return tls12_record_protection_engaged(rl->write); 241 } 242 243 void 244 tls12_record_layer_set_aead(struct tls12_record_layer *rl, const EVP_AEAD *aead) 245 { 246 rl->aead = aead; 247 } 248 249 void 250 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version) 251 { 252 rl->version = version; 253 rl->dtls = (version == DTLS1_VERSION); 254 } 255 256 void 257 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 258 { 259 rl->write->epoch = epoch; 260 } 261 262 int 263 tls12_record_layer_use_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 264 { 265 if (rl->write->epoch == epoch) 266 return 1; 267 268 if (rl->write_current->epoch == epoch) { 269 rl->write = rl->write_current; 270 return 1; 271 } 272 273 if (rl->write_previous != NULL && rl->write_previous->epoch == epoch) { 274 rl->write = rl->write_previous; 275 return 1; 276 } 277 278 return 0; 279 } 280 281 void 282 tls12_record_layer_write_epoch_done(struct tls12_record_layer *rl, uint16_t epoch) 283 { 284 if (rl->write_previous == NULL || rl->write_previous->epoch != epoch) 285 return; 286 287 rl->write = rl->write_current; 288 289 tls12_record_protection_free(rl->write_previous); 290 rl->write_previous = NULL; 291 } 292 293 static void 294 tls12_record_layer_set_read_state(struct tls12_record_layer *rl, 295 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 296 { 297 rl->read->cipher_ctx = cipher_ctx; 298 rl->read->hash_ctx = hash_ctx; 299 rl->read->stream_mac = stream_mac; 300 } 301 302 static void 303 tls12_record_layer_set_write_state(struct tls12_record_layer *rl, 304 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 305 { 306 rl->write->cipher_ctx = cipher_ctx; 307 rl->write->hash_ctx = hash_ctx; 308 rl->write->stream_mac = stream_mac; 309 } 310 311 void 312 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl) 313 { 314 tls12_record_layer_set_read_state(rl, NULL, NULL, 0); 315 tls12_record_protection_clear(rl->read); 316 } 317 318 void 319 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl) 320 { 321 tls12_record_layer_set_write_state(rl, NULL, NULL, 0); 322 tls12_record_protection_clear(rl->write); 323 324 tls12_record_protection_free(rl->write_previous); 325 rl->write_previous = NULL; 326 } 327 328 void 329 tls12_record_layer_reflect_seq_num(struct tls12_record_layer *rl) 330 { 331 memcpy(rl->write->seq_num, rl->read->seq_num, 332 sizeof(rl->write->seq_num)); 333 } 334 335 int 336 tls12_record_layer_set_read_cipher_hash(struct tls12_record_layer *rl, 337 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 338 { 339 tls12_record_layer_set_read_state(rl, cipher_ctx, hash_ctx, 340 stream_mac); 341 342 return 1; 343 } 344 345 int 346 tls12_record_layer_set_write_cipher_hash(struct tls12_record_layer *rl, 347 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 348 { 349 tls12_record_layer_set_write_state(rl, cipher_ctx, hash_ctx, 350 stream_mac); 351 352 return 1; 353 } 354 355 int 356 tls12_record_layer_set_read_mac_key(struct tls12_record_layer *rl, 357 const uint8_t *mac_key, size_t mac_key_len) 358 { 359 freezero(rl->read->mac_key, rl->read->mac_key_len); 360 rl->read->mac_key = NULL; 361 rl->read->mac_key_len = 0; 362 363 if (mac_key == NULL || mac_key_len == 0) 364 return 1; 365 366 if ((rl->read->mac_key = calloc(1, mac_key_len)) == NULL) 367 return 0; 368 369 memcpy(rl->read->mac_key, mac_key, mac_key_len); 370 rl->read->mac_key_len = mac_key_len; 371 372 return 1; 373 } 374 375 static int 376 tls12_record_layer_ccs_aead(struct tls12_record_layer *rl, 377 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 378 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 379 size_t iv_len) 380 { 381 size_t aead_nonce_len = EVP_AEAD_nonce_length(rl->aead); 382 383 if ((rp->aead_ctx = calloc(1, sizeof(*rp->aead_ctx))) == NULL) 384 return 0; 385 386 /* AES GCM cipher suites use variable nonce in record. */ 387 if (rl->aead == EVP_aead_aes_128_gcm() || 388 rl->aead == EVP_aead_aes_256_gcm()) 389 rp->aead_ctx->variable_nonce_in_record = 1; 390 391 /* ChaCha20 Poly1305 XORs the fixed and variable nonces. */ 392 if (rl->aead == EVP_aead_chacha20_poly1305()) 393 rp->aead_ctx->xor_fixed_nonce = 1; 394 395 if (iv_len > sizeof(rp->aead_ctx->fixed_nonce)) 396 return 0; 397 398 memcpy(rp->aead_ctx->fixed_nonce, iv, iv_len); 399 rp->aead_ctx->fixed_nonce_len = iv_len; 400 rp->aead_ctx->tag_len = EVP_AEAD_max_overhead(rl->aead); 401 rp->aead_ctx->variable_nonce_len = 8; 402 403 if (rp->aead_ctx->xor_fixed_nonce) { 404 /* Fixed nonce length must match, variable must not exceed. */ 405 if (rp->aead_ctx->fixed_nonce_len != aead_nonce_len) 406 return 0; 407 if (rp->aead_ctx->variable_nonce_len > aead_nonce_len) 408 return 0; 409 } else { 410 /* Concatenated nonce length must equal AEAD nonce length. */ 411 if (rp->aead_ctx->fixed_nonce_len + 412 rp->aead_ctx->variable_nonce_len != aead_nonce_len) 413 return 0; 414 } 415 416 if (!EVP_AEAD_CTX_init(&rp->aead_ctx->ctx, rl->aead, key, key_len, 417 EVP_AEAD_DEFAULT_TAG_LENGTH, NULL)) 418 return 0; 419 420 return 1; 421 } 422 423 static int 424 tls12_record_layer_change_cipher_state(struct tls12_record_layer *rl, 425 struct tls12_record_protection *rp, int is_write, const uint8_t *mac_key, 426 size_t mac_key_len, const uint8_t *key, size_t key_len, const uint8_t *iv, 427 size_t iv_len) 428 { 429 /* Require unused record protection. */ 430 if (rp->cipher_ctx != NULL || rp->aead_ctx != NULL) 431 return 0; 432 433 if (mac_key_len > INT_MAX || key_len > INT_MAX || iv_len > INT_MAX) 434 return 0; 435 436 /* XXX - only aead for now. */ 437 if (rl->aead == NULL) 438 return 1; 439 440 return tls12_record_layer_ccs_aead(rl, rp, is_write, mac_key, 441 mac_key_len, key, key_len, iv, iv_len); 442 } 443 444 int 445 tls12_record_layer_change_read_cipher_state(struct tls12_record_layer *rl, 446 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 447 size_t key_len, const uint8_t *iv, size_t iv_len) 448 { 449 struct tls12_record_protection *read_new = NULL; 450 int ret = 0; 451 452 if ((read_new = tls12_record_protection_new()) == NULL) 453 goto err; 454 455 /* Read sequence number gets reset to zero. */ 456 457 if (!tls12_record_layer_change_cipher_state(rl, read_new, 0, 458 mac_key, mac_key_len, key, key_len, iv, iv_len)) 459 goto err; 460 461 tls12_record_protection_free(rl->read_current); 462 rl->read = rl->read_current = read_new; 463 read_new = NULL; 464 465 ret = 1; 466 467 err: 468 tls12_record_protection_free(read_new); 469 470 return ret; 471 } 472 473 int 474 tls12_record_layer_change_write_cipher_state(struct tls12_record_layer *rl, 475 const uint8_t *mac_key, size_t mac_key_len, const uint8_t *key, 476 size_t key_len, const uint8_t *iv, size_t iv_len) 477 { 478 struct tls12_record_protection *write_new; 479 int ret = 0; 480 481 if ((write_new = tls12_record_protection_new()) == NULL) 482 goto err; 483 484 /* Write sequence number gets reset to zero. */ 485 486 if (!tls12_record_layer_change_cipher_state(rl, write_new, 1, 487 mac_key, mac_key_len, key, key_len, iv, iv_len)) 488 goto err; 489 490 if (rl->dtls) { 491 tls12_record_protection_free(rl->write_previous); 492 rl->write_previous = rl->write_current; 493 rl->write_current = NULL; 494 } 495 tls12_record_protection_free(rl->write_current); 496 rl->write = rl->write_current = write_new; 497 write_new = NULL; 498 499 ret = 1; 500 501 err: 502 tls12_record_protection_free(write_new); 503 504 return ret; 505 } 506 507 static int 508 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb, 509 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len) 510 { 511 CBS seq; 512 513 CBS_init(&seq, seq_num, seq_num_len); 514 515 if (rl->dtls) { 516 if (!CBB_add_u16(cbb, epoch)) 517 return 0; 518 if (!CBS_skip(&seq, 2)) 519 return 0; 520 } 521 522 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq)); 523 } 524 525 static int 526 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl, 527 uint8_t content_type, uint16_t record_len, CBS *seq_num, uint8_t **out, 528 size_t *out_len) 529 { 530 CBB cbb; 531 532 *out = NULL; 533 *out_len = 0; 534 535 /* Build the pseudo-header used for MAC/AEAD. */ 536 if (!CBB_init(&cbb, 13)) 537 goto err; 538 539 if (!CBB_add_bytes(&cbb, CBS_data(seq_num), CBS_len(seq_num))) 540 goto err; 541 if (!CBB_add_u8(&cbb, content_type)) 542 goto err; 543 if (!CBB_add_u16(&cbb, rl->version)) 544 goto err; 545 if (!CBB_add_u16(&cbb, record_len)) 546 goto err; 547 548 if (!CBB_finish(&cbb, out, out_len)) 549 goto err; 550 551 return 1; 552 553 err: 554 CBB_cleanup(&cbb); 555 556 return 0; 557 } 558 559 static int 560 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb, 561 EVP_MD_CTX *hash_ctx, int stream_mac, CBS *seq_num, uint8_t content_type, 562 const uint8_t *content, size_t content_len, size_t *out_len) 563 { 564 EVP_MD_CTX *mac_ctx = NULL; 565 uint8_t *header = NULL; 566 size_t header_len = 0; 567 size_t mac_len; 568 uint8_t *mac; 569 int ret = 0; 570 571 if ((mac_ctx = EVP_MD_CTX_new()) == NULL) 572 goto err; 573 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx)) 574 goto err; 575 576 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 577 seq_num, &header, &header_len)) 578 goto err; 579 580 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0) 581 goto err; 582 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0) 583 goto err; 584 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0) 585 goto err; 586 if (!CBB_add_space(cbb, &mac, mac_len)) 587 goto err; 588 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0) 589 goto err; 590 if (mac_len == 0) 591 goto err; 592 593 if (stream_mac) { 594 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx)) 595 goto err; 596 } 597 598 *out_len = mac_len; 599 ret = 1; 600 601 err: 602 EVP_MD_CTX_free(mac_ctx); 603 freezero(header, header_len); 604 605 return ret; 606 } 607 608 static int 609 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb, 610 uint8_t content_type, CBS *seq_num, const uint8_t *content, 611 size_t content_len, size_t mac_len, size_t padding_len) 612 { 613 uint8_t *header = NULL; 614 size_t header_len = 0; 615 uint8_t *mac = NULL; 616 size_t out_mac_len = 0; 617 int ret = 0; 618 619 /* 620 * Must be constant time to avoid leaking details about CBC padding. 621 */ 622 623 if (!ssl3_cbc_record_digest_supported(rl->read->hash_ctx)) 624 goto err; 625 626 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 627 seq_num, &header, &header_len)) 628 goto err; 629 630 if (!CBB_add_space(cbb, &mac, mac_len)) 631 goto err; 632 if (!ssl3_cbc_digest_record(rl->read->hash_ctx, mac, &out_mac_len, header, 633 content, content_len + mac_len, content_len + mac_len + padding_len, 634 rl->read->mac_key, rl->read->mac_key_len)) 635 goto err; 636 if (mac_len != out_mac_len) 637 goto err; 638 639 ret = 1; 640 641 err: 642 freezero(header, header_len); 643 644 return ret; 645 } 646 647 static int 648 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb, 649 uint8_t content_type, CBS *seq_num, const uint8_t *content, 650 size_t content_len) 651 { 652 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 653 size_t out_len; 654 655 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 656 return 0; 657 658 return tls12_record_layer_mac(rl, cbb, rl->read->hash_ctx, 659 rl->read->stream_mac, seq_num, content_type, content, content_len, 660 &out_len); 661 } 662 663 static int 664 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb, 665 uint8_t content_type, CBS *seq_num, const uint8_t *content, 666 size_t content_len, size_t *out_len) 667 { 668 return tls12_record_layer_mac(rl, cbb, rl->write->hash_ctx, 669 rl->write->stream_mac, seq_num, content_type, content, content_len, 670 out_len); 671 } 672 673 static int 674 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl, 675 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 676 uint8_t **out, size_t *out_len) 677 { 678 CBB cbb; 679 680 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 681 return 0; 682 683 /* Fixed nonce and variable nonce (sequence number) are concatenated. */ 684 if (!CBB_init(&cbb, 16)) 685 goto err; 686 if (!CBB_add_bytes(&cbb, aead->fixed_nonce, 687 aead->fixed_nonce_len)) 688 goto err; 689 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 690 goto err; 691 if (!CBB_finish(&cbb, out, out_len)) 692 goto err; 693 694 return 1; 695 696 err: 697 CBB_cleanup(&cbb); 698 699 return 0; 700 } 701 702 static int 703 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl, 704 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 705 uint8_t **out, size_t *out_len) 706 { 707 uint8_t *nonce = NULL; 708 size_t nonce_len = 0; 709 uint8_t *pad; 710 CBB cbb; 711 int i; 712 713 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 714 return 0; 715 if (aead->fixed_nonce_len < aead->variable_nonce_len) 716 return 0; 717 718 /* 719 * Variable nonce (sequence number) is right padded, before the fixed 720 * nonce is XOR'd in. 721 */ 722 if (!CBB_init(&cbb, 16)) 723 goto err; 724 if (!CBB_add_space(&cbb, &pad, 725 aead->fixed_nonce_len - aead->variable_nonce_len)) 726 goto err; 727 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 728 goto err; 729 if (!CBB_finish(&cbb, &nonce, &nonce_len)) 730 goto err; 731 732 for (i = 0; i < aead->fixed_nonce_len; i++) 733 nonce[i] ^= aead->fixed_nonce[i]; 734 735 *out = nonce; 736 *out_len = nonce_len; 737 738 return 1; 739 740 err: 741 CBB_cleanup(&cbb); 742 freezero(nonce, nonce_len); 743 744 return 0; 745 } 746 747 static int 748 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl, 749 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 750 { 751 if (rl->read->aead_ctx != NULL || rl->read->cipher_ctx != NULL) 752 return 0; 753 754 /* XXX - decrypt/process in place for now. */ 755 *out = (uint8_t *)CBS_data(fragment); 756 *out_len = CBS_len(fragment); 757 758 return 1; 759 } 760 761 static int 762 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl, 763 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 764 size_t *out_len) 765 { 766 const SSL_AEAD_CTX *aead = rl->read->aead_ctx; 767 uint8_t *header = NULL, *nonce = NULL; 768 size_t header_len = 0, nonce_len = 0; 769 uint8_t *plain; 770 size_t plain_len; 771 CBS var_nonce; 772 int ret = 0; 773 774 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 775 if (aead->xor_fixed_nonce) { 776 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 777 CBS_data(seq_num), &nonce, &nonce_len)) 778 goto err; 779 } else if (aead->variable_nonce_in_record) { 780 if (!CBS_get_bytes(fragment, &var_nonce, 781 aead->variable_nonce_len)) 782 goto err; 783 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 784 CBS_data(&var_nonce), &nonce, &nonce_len)) 785 goto err; 786 } else { 787 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 788 CBS_data(seq_num), &nonce, &nonce_len)) 789 goto err; 790 } 791 792 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 793 if (CBS_len(fragment) < aead->tag_len) { 794 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 795 goto err; 796 } 797 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 798 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 799 goto err; 800 } 801 802 /* XXX - decrypt/process in place for now. */ 803 plain = (uint8_t *)CBS_data(fragment); 804 plain_len = CBS_len(fragment) - aead->tag_len; 805 806 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len, 807 seq_num, &header, &header_len)) 808 goto err; 809 810 if (!EVP_AEAD_CTX_open(&aead->ctx, plain, out_len, plain_len, 811 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment), 812 header, header_len)) { 813 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 814 goto err; 815 } 816 817 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) { 818 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 819 goto err; 820 } 821 822 if (*out_len != plain_len) 823 goto err; 824 825 *out = plain; 826 827 ret = 1; 828 829 err: 830 freezero(header, header_len); 831 freezero(nonce, nonce_len); 832 833 return ret; 834 } 835 836 static int 837 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl, 838 uint8_t content_type, CBS *seq_num, CBS *fragment, uint8_t **out, 839 size_t *out_len) 840 { 841 EVP_CIPHER_CTX *enc = rl->read->cipher_ctx; 842 SSL3_RECORD_INTERNAL rrec; 843 size_t block_size, eiv_len; 844 uint8_t *mac = NULL; 845 size_t mac_len = 0; 846 uint8_t *out_mac = NULL; 847 size_t out_mac_len = 0; 848 uint8_t *plain; 849 size_t plain_len; 850 size_t min_len; 851 CBB cbb_mac; 852 int ret = 0; 853 854 memset(&cbb_mac, 0, sizeof(cbb_mac)); 855 856 if (!tls12_record_protection_block_size(rl->read, &block_size)) 857 goto err; 858 859 /* Determine explicit IV length. */ 860 eiv_len = 0; 861 if (rl->version != TLS1_VERSION) { 862 if (!tls12_record_protection_eiv_len(rl->read, &eiv_len)) 863 goto err; 864 } 865 866 mac_len = 0; 867 if (rl->read->hash_ctx != NULL) { 868 if (!tls12_record_protection_mac_len(rl->read, &mac_len)) 869 goto err; 870 } 871 872 /* CBC has at least one padding byte. */ 873 min_len = eiv_len + mac_len; 874 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 875 min_len += 1; 876 877 if (CBS_len(fragment) < min_len) { 878 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 879 goto err; 880 } 881 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 882 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 883 goto err; 884 } 885 if (CBS_len(fragment) % block_size != 0) { 886 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 887 goto err; 888 } 889 890 /* XXX - decrypt/process in place for now. */ 891 plain = (uint8_t *)CBS_data(fragment); 892 plain_len = CBS_len(fragment); 893 894 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len)) 895 goto err; 896 897 rrec.data = plain; 898 rrec.input = plain; 899 rrec.length = plain_len; 900 901 /* 902 * We now have to remove padding, extract MAC, calculate MAC 903 * and compare MAC in constant time. 904 */ 905 if (block_size > 1) 906 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len); 907 908 if ((mac = calloc(1, mac_len)) == NULL) 909 goto err; 910 911 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE)) 912 goto err; 913 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) { 914 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length + 915 rrec.padding_length); 916 rrec.length -= mac_len; 917 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type, 918 seq_num, rrec.input, rrec.length, mac_len, 919 rrec.padding_length)) 920 goto err; 921 } else { 922 rrec.length -= mac_len; 923 memcpy(mac, rrec.data + rrec.length, mac_len); 924 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type, 925 seq_num, rrec.input, rrec.length)) 926 goto err; 927 } 928 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len)) 929 goto err; 930 if (mac_len != out_mac_len) 931 goto err; 932 933 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) { 934 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 935 goto err; 936 } 937 938 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) { 939 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 940 goto err; 941 } 942 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) { 943 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 944 goto err; 945 } 946 947 *out = rrec.data; 948 *out_len = rrec.length; 949 950 ret = 1; 951 952 err: 953 CBB_cleanup(&cbb_mac); 954 freezero(mac, mac_len); 955 freezero(out_mac, out_mac_len); 956 957 return ret; 958 } 959 960 int 961 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf, 962 size_t buf_len, uint8_t **out, size_t *out_len) 963 { 964 CBS cbs, fragment, seq_num; 965 uint16_t version; 966 uint8_t content_type; 967 968 CBS_init(&cbs, buf, buf_len); 969 CBS_init(&seq_num, rl->read->seq_num, sizeof(rl->read->seq_num)); 970 971 if (!CBS_get_u8(&cbs, &content_type)) 972 return 0; 973 if (!CBS_get_u16(&cbs, &version)) 974 return 0; 975 if (rl->dtls) { 976 /* 977 * The DTLS sequence number is split into a 16 bit epoch and 978 * 48 bit sequence number, however for the purposes of record 979 * processing it is treated the same as a TLS 64 bit sequence 980 * number. DTLS also uses explicit read sequence numbers, which 981 * we need to extract from the DTLS record header. 982 */ 983 if (!CBS_get_bytes(&cbs, &seq_num, SSL3_SEQUENCE_SIZE)) 984 return 0; 985 if (!CBS_write_bytes(&seq_num, rl->read->seq_num, 986 sizeof(rl->read->seq_num), NULL)) 987 return 0; 988 } 989 if (!CBS_get_u16_length_prefixed(&cbs, &fragment)) 990 return 0; 991 992 if (rl->read->aead_ctx != NULL) { 993 if (!tls12_record_layer_open_record_protected_aead(rl, 994 content_type, &seq_num, &fragment, out, out_len)) 995 return 0; 996 } else if (rl->read->cipher_ctx != NULL) { 997 if (!tls12_record_layer_open_record_protected_cipher(rl, 998 content_type, &seq_num, &fragment, out, out_len)) 999 return 0; 1000 } else { 1001 if (!tls12_record_layer_open_record_plaintext(rl, 1002 content_type, &fragment, out, out_len)) 1003 return 0; 1004 } 1005 1006 if (!rl->dtls) 1007 tls1_record_sequence_increment(rl->read->seq_num); 1008 1009 return 1; 1010 } 1011 1012 static int 1013 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl, 1014 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 1015 { 1016 if (rl->write->aead_ctx != NULL || rl->write->cipher_ctx != NULL) 1017 return 0; 1018 1019 return CBB_add_bytes(out, content, content_len); 1020 } 1021 1022 static int 1023 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl, 1024 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1025 size_t content_len, CBB *out) 1026 { 1027 const SSL_AEAD_CTX *aead = rl->write->aead_ctx; 1028 uint8_t *header = NULL, *nonce = NULL; 1029 size_t header_len = 0, nonce_len = 0; 1030 size_t enc_record_len, out_len; 1031 uint8_t *enc_data; 1032 int ret = 0; 1033 1034 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 1035 if (aead->xor_fixed_nonce) { 1036 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 1037 CBS_data(seq_num), &nonce, &nonce_len)) 1038 goto err; 1039 } else { 1040 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 1041 CBS_data(seq_num), &nonce, &nonce_len)) 1042 goto err; 1043 } 1044 1045 if (aead->variable_nonce_in_record) { 1046 /* XXX - length check? */ 1047 if (!CBB_add_bytes(out, CBS_data(seq_num), 1048 aead->variable_nonce_len)) 1049 goto err; 1050 } 1051 1052 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 1053 seq_num, &header, &header_len)) 1054 goto err; 1055 1056 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 1057 enc_record_len = content_len + aead->tag_len; 1058 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1059 goto err; 1060 if (!CBB_add_space(out, &enc_data, enc_record_len)) 1061 goto err; 1062 1063 if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len, 1064 nonce, nonce_len, content, content_len, header, header_len)) 1065 goto err; 1066 1067 if (out_len != enc_record_len) 1068 goto err; 1069 1070 ret = 1; 1071 1072 err: 1073 freezero(header, header_len); 1074 freezero(nonce, nonce_len); 1075 1076 return ret; 1077 } 1078 1079 static int 1080 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl, 1081 uint8_t content_type, CBS *seq_num, const uint8_t *content, 1082 size_t content_len, CBB *out) 1083 { 1084 EVP_CIPHER_CTX *enc = rl->write->cipher_ctx; 1085 size_t block_size, eiv_len, mac_len, pad_len; 1086 uint8_t *enc_data, *eiv, *pad, pad_val; 1087 uint8_t *plain = NULL; 1088 size_t plain_len = 0; 1089 int ret = 0; 1090 CBB cbb; 1091 1092 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH)) 1093 goto err; 1094 1095 /* Add explicit IV if necessary. */ 1096 eiv_len = 0; 1097 if (rl->version != TLS1_VERSION) { 1098 if (!tls12_record_protection_eiv_len(rl->write, &eiv_len)) 1099 goto err; 1100 } 1101 if (eiv_len > 0) { 1102 if (!CBB_add_space(&cbb, &eiv, eiv_len)) 1103 goto err; 1104 arc4random_buf(eiv, eiv_len); 1105 } 1106 1107 if (!CBB_add_bytes(&cbb, content, content_len)) 1108 goto err; 1109 1110 mac_len = 0; 1111 if (rl->write->hash_ctx != NULL) { 1112 if (!tls12_record_layer_write_mac(rl, &cbb, content_type, 1113 seq_num, content, content_len, &mac_len)) 1114 goto err; 1115 } 1116 1117 plain_len = eiv_len + content_len + mac_len; 1118 1119 /* Add padding to block size, if necessary. */ 1120 if (!tls12_record_protection_block_size(rl->write, &block_size)) 1121 goto err; 1122 if (block_size > 1) { 1123 pad_len = block_size - (plain_len % block_size); 1124 pad_val = pad_len - 1; 1125 1126 if (pad_len > 255) 1127 goto err; 1128 if (!CBB_add_space(&cbb, &pad, pad_len)) 1129 goto err; 1130 memset(pad, pad_val, pad_len); 1131 } 1132 1133 if (!CBB_finish(&cbb, &plain, &plain_len)) 1134 goto err; 1135 1136 if (plain_len % block_size != 0) 1137 goto err; 1138 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 1139 goto err; 1140 1141 if (!CBB_add_space(out, &enc_data, plain_len)) 1142 goto err; 1143 if (!EVP_Cipher(enc, enc_data, plain, plain_len)) 1144 goto err; 1145 1146 ret = 1; 1147 1148 err: 1149 CBB_cleanup(&cbb); 1150 freezero(plain, plain_len); 1151 1152 return ret; 1153 } 1154 1155 int 1156 tls12_record_layer_seal_record(struct tls12_record_layer *rl, 1157 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb) 1158 { 1159 uint8_t *seq_num_data = NULL; 1160 size_t seq_num_len = 0; 1161 CBB fragment, seq_num_cbb; 1162 CBS seq_num; 1163 int ret = 0; 1164 1165 /* 1166 * Construct the effective sequence number - this is used in both 1167 * the DTLS header and for MAC calculations. 1168 */ 1169 if (!CBB_init(&seq_num_cbb, SSL3_SEQUENCE_SIZE)) 1170 goto err; 1171 if (!tls12_record_layer_build_seq_num(rl, &seq_num_cbb, rl->write->epoch, 1172 rl->write->seq_num, sizeof(rl->write->seq_num))) 1173 goto err; 1174 if (!CBB_finish(&seq_num_cbb, &seq_num_data, &seq_num_len)) 1175 goto err; 1176 CBS_init(&seq_num, seq_num_data, seq_num_len); 1177 1178 if (!CBB_add_u8(cbb, content_type)) 1179 goto err; 1180 if (!CBB_add_u16(cbb, rl->version)) 1181 goto err; 1182 if (rl->dtls) { 1183 if (!CBB_add_bytes(cbb, CBS_data(&seq_num), CBS_len(&seq_num))) 1184 goto err; 1185 } 1186 if (!CBB_add_u16_length_prefixed(cbb, &fragment)) 1187 goto err; 1188 1189 if (rl->write->aead_ctx != NULL) { 1190 if (!tls12_record_layer_seal_record_protected_aead(rl, 1191 content_type, &seq_num, content, content_len, &fragment)) 1192 goto err; 1193 } else if (rl->write->cipher_ctx != NULL) { 1194 if (!tls12_record_layer_seal_record_protected_cipher(rl, 1195 content_type, &seq_num, content, content_len, &fragment)) 1196 goto err; 1197 } else { 1198 if (!tls12_record_layer_seal_record_plaintext(rl, 1199 content_type, content, content_len, &fragment)) 1200 goto err; 1201 } 1202 1203 if (!CBB_flush(cbb)) 1204 goto err; 1205 1206 tls1_record_sequence_increment(rl->write->seq_num); 1207 1208 ret = 1; 1209 1210 err: 1211 CBB_cleanup(&seq_num_cbb); 1212 free(seq_num_data); 1213 1214 return ret; 1215 } 1216