1 /* $OpenBSD: tls12_record_layer.c,v 1.5 2020/10/03 17:35:17 jsing Exp $ */ 2 /* 3 * Copyright (c) 2020 Joel Sing <jsing@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <stdlib.h> 19 20 #include <openssl/evp.h> 21 22 #include "ssl_locl.h" 23 24 struct tls12_record_layer { 25 uint16_t version; 26 int dtls; 27 28 uint8_t alert_desc; 29 30 uint16_t read_epoch; 31 uint16_t write_epoch; 32 33 int read_stream_mac; 34 int write_stream_mac; 35 36 /* 37 * XXX - for now these are just pointers to externally managed 38 * structs/memory. These should eventually be owned by the record layer. 39 */ 40 SSL_AEAD_CTX *read_aead_ctx; 41 SSL_AEAD_CTX *write_aead_ctx; 42 43 EVP_CIPHER_CTX *read_cipher_ctx; 44 EVP_MD_CTX *read_hash_ctx; 45 EVP_CIPHER_CTX *write_cipher_ctx; 46 EVP_MD_CTX *write_hash_ctx; 47 48 const uint8_t *read_mac_key; 49 size_t read_mac_key_len; 50 51 uint8_t *read_seq_num; 52 uint8_t *write_seq_num; 53 }; 54 55 struct tls12_record_layer * 56 tls12_record_layer_new(void) 57 { 58 struct tls12_record_layer *rl; 59 60 if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL) 61 return NULL; 62 63 return rl; 64 } 65 66 void 67 tls12_record_layer_free(struct tls12_record_layer *rl) 68 { 69 freezero(rl, sizeof(struct tls12_record_layer)); 70 } 71 72 void 73 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc) 74 { 75 *alert_desc = rl->alert_desc; 76 } 77 78 void 79 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version) 80 { 81 rl->version = version; 82 rl->dtls = (version == DTLS1_VERSION); 83 } 84 85 void 86 tls12_record_layer_set_read_epoch(struct tls12_record_layer *rl, uint16_t epoch) 87 { 88 rl->read_epoch = epoch; 89 } 90 91 void 92 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch) 93 { 94 rl->write_epoch = epoch; 95 } 96 97 static void 98 tls12_record_layer_set_read_state(struct tls12_record_layer *rl, 99 SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, 100 int stream_mac) 101 { 102 rl->read_aead_ctx = aead_ctx; 103 104 rl->read_cipher_ctx = cipher_ctx; 105 rl->read_hash_ctx = hash_ctx; 106 rl->read_stream_mac = stream_mac; 107 } 108 109 static void 110 tls12_record_layer_set_write_state(struct tls12_record_layer *rl, 111 SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, 112 int stream_mac) 113 { 114 rl->write_aead_ctx = aead_ctx; 115 116 rl->write_cipher_ctx = cipher_ctx; 117 rl->write_hash_ctx = hash_ctx; 118 rl->write_stream_mac = stream_mac; 119 } 120 121 void 122 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl) 123 { 124 tls12_record_layer_set_read_state(rl, NULL, NULL, NULL, 0); 125 tls12_record_layer_set_read_mac_key(rl, NULL, 0); 126 rl->read_seq_num = NULL; 127 } 128 129 void 130 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl) 131 { 132 tls12_record_layer_set_write_state(rl, NULL, NULL, NULL, 0); 133 rl->write_seq_num = NULL; 134 } 135 136 void 137 tls12_record_layer_set_read_seq_num(struct tls12_record_layer *rl, 138 uint8_t *seq_num) 139 { 140 rl->read_seq_num = seq_num; 141 } 142 143 void 144 tls12_record_layer_set_write_seq_num(struct tls12_record_layer *rl, 145 uint8_t *seq_num) 146 { 147 rl->write_seq_num = seq_num; 148 } 149 150 int 151 tls12_record_layer_set_read_aead(struct tls12_record_layer *rl, 152 SSL_AEAD_CTX *aead_ctx) 153 { 154 tls12_record_layer_set_read_state(rl, aead_ctx, NULL, NULL, 0); 155 156 return 1; 157 } 158 159 int 160 tls12_record_layer_set_write_aead(struct tls12_record_layer *rl, 161 SSL_AEAD_CTX *aead_ctx) 162 { 163 tls12_record_layer_set_write_state(rl, aead_ctx, NULL, NULL, 0); 164 165 return 1; 166 } 167 168 int 169 tls12_record_layer_set_read_cipher_hash(struct tls12_record_layer *rl, 170 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 171 { 172 tls12_record_layer_set_read_state(rl, NULL, cipher_ctx, hash_ctx, 173 stream_mac); 174 175 return 1; 176 } 177 178 int 179 tls12_record_layer_set_write_cipher_hash(struct tls12_record_layer *rl, 180 EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac) 181 { 182 tls12_record_layer_set_write_state(rl, NULL, cipher_ctx, hash_ctx, 183 stream_mac); 184 185 return 1; 186 } 187 188 int 189 tls12_record_layer_set_read_mac_key(struct tls12_record_layer *rl, 190 const uint8_t *mac_key, size_t mac_key_len) 191 { 192 rl->read_mac_key = mac_key; 193 rl->read_mac_key_len = mac_key_len; 194 195 return 1; 196 } 197 198 static int 199 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb, 200 uint16_t epoch, uint8_t *seq_num, size_t seq_num_len) 201 { 202 CBS seq; 203 204 CBS_init(&seq, seq_num, seq_num_len); 205 206 if (rl->dtls) { 207 if (!CBB_add_u16(cbb, epoch)) 208 return 0; 209 if (!CBS_skip(&seq, 2)) 210 return 0; 211 } 212 213 return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq)); 214 } 215 216 static int 217 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl, 218 uint8_t content_type, uint16_t record_len, uint16_t epoch, uint8_t *seq_num, 219 size_t seq_num_len, uint8_t **out, size_t *out_len) 220 { 221 CBB cbb; 222 223 *out = NULL; 224 *out_len = 0; 225 226 /* Build the pseudo-header used for MAC/AEAD. */ 227 if (!CBB_init(&cbb, 13)) 228 goto err; 229 230 if (!tls12_record_layer_build_seq_num(rl, &cbb, epoch, 231 seq_num, seq_num_len)) 232 goto err; 233 if (!CBB_add_u8(&cbb, content_type)) 234 goto err; 235 if (!CBB_add_u16(&cbb, rl->version)) 236 goto err; 237 if (!CBB_add_u16(&cbb, record_len)) 238 goto err; 239 240 if (!CBB_finish(&cbb, out, out_len)) 241 goto err; 242 243 return 1; 244 245 err: 246 CBB_cleanup(&cbb); 247 248 return 0; 249 } 250 251 static int 252 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb, 253 EVP_MD_CTX *hash_ctx, int stream_mac, uint16_t epoch, uint8_t *seq_num, 254 size_t seq_num_len, uint8_t content_type, const uint8_t *content, 255 size_t content_len, size_t *out_len) 256 { 257 EVP_MD_CTX *mac_ctx = NULL; 258 uint8_t *header = NULL; 259 size_t header_len = 0; 260 size_t mac_len; 261 uint8_t *mac; 262 int ret = 0; 263 264 if ((mac_ctx = EVP_MD_CTX_new()) == NULL) 265 goto err; 266 if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx)) 267 goto err; 268 269 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 270 epoch, seq_num, seq_num_len, &header, &header_len)) 271 goto err; 272 273 if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0) 274 goto err; 275 if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0) 276 goto err; 277 if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0) 278 goto err; 279 if (!CBB_add_space(cbb, &mac, mac_len)) 280 goto err; 281 if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0) 282 goto err; 283 if (mac_len == 0) 284 goto err; 285 286 if (stream_mac) { 287 if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx)) 288 goto err; 289 } 290 291 *out_len = mac_len; 292 ret = 1; 293 294 err: 295 EVP_MD_CTX_free(mac_ctx); 296 freezero(header, header_len); 297 298 return ret; 299 } 300 301 static int 302 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb, 303 uint8_t content_type, const uint8_t *content, size_t content_len, 304 size_t mac_len, size_t padding_len) 305 { 306 uint8_t *header = NULL; 307 size_t header_len = 0; 308 uint8_t *mac = NULL; 309 size_t out_mac_len = 0; 310 int ret = 0; 311 312 /* 313 * Must be constant time to avoid leaking details about CBC padding. 314 */ 315 316 if (!ssl3_cbc_record_digest_supported(rl->read_hash_ctx)) 317 goto err; 318 319 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 320 rl->read_epoch, rl->read_seq_num, SSL3_SEQUENCE_SIZE, 321 &header, &header_len)) 322 goto err; 323 324 if (!CBB_add_space(cbb, &mac, mac_len)) 325 goto err; 326 if (!ssl3_cbc_digest_record(rl->read_hash_ctx, mac, &out_mac_len, header, 327 content, content_len + mac_len, content_len + mac_len + padding_len, 328 rl->read_mac_key, rl->read_mac_key_len)) 329 goto err; 330 if (mac_len != out_mac_len) 331 goto err; 332 333 ret = 1; 334 335 err: 336 freezero(header, header_len); 337 338 return ret; 339 } 340 341 static int 342 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb, 343 uint8_t content_type, const uint8_t *content, size_t content_len) 344 { 345 EVP_CIPHER_CTX *enc = rl->read_cipher_ctx; 346 size_t out_len; 347 348 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 349 return 0; 350 351 return tls12_record_layer_mac(rl, cbb, rl->read_hash_ctx, 352 rl->read_stream_mac, rl->read_epoch, rl->read_seq_num, 353 SSL3_SEQUENCE_SIZE, content_type, content, content_len, &out_len); 354 } 355 356 static int 357 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb, 358 uint8_t content_type, const uint8_t *content, size_t content_len, 359 size_t *out_len) 360 { 361 return tls12_record_layer_mac(rl, cbb, rl->write_hash_ctx, 362 rl->write_stream_mac, rl->write_epoch, rl->write_seq_num, 363 SSL3_SEQUENCE_SIZE, content_type, content, content_len, out_len); 364 } 365 366 static int 367 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl, 368 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 369 uint8_t **out, size_t *out_len) 370 { 371 CBB cbb; 372 373 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 374 return 0; 375 376 /* Fixed nonce and variable nonce (sequence number) are concatenated. */ 377 if (!CBB_init(&cbb, 16)) 378 goto err; 379 if (!CBB_add_bytes(&cbb, aead->fixed_nonce, 380 aead->fixed_nonce_len)) 381 goto err; 382 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 383 goto err; 384 if (!CBB_finish(&cbb, out, out_len)) 385 goto err; 386 387 return 1; 388 389 err: 390 CBB_cleanup(&cbb); 391 392 return 0; 393 } 394 395 static int 396 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl, 397 const SSL_AEAD_CTX *aead, const uint8_t *seq_num, 398 uint8_t **out, size_t *out_len) 399 { 400 uint8_t *nonce = NULL; 401 size_t nonce_len = 0; 402 uint8_t *pad; 403 CBB cbb; 404 int i; 405 406 if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE) 407 return 0; 408 if (aead->fixed_nonce_len < aead->variable_nonce_len) 409 return 0; 410 411 /* 412 * Variable nonce (sequence number) is right padded, before the fixed 413 * nonce is XOR'd in. 414 */ 415 if (!CBB_init(&cbb, 16)) 416 goto err; 417 if (!CBB_add_space(&cbb, &pad, 418 aead->fixed_nonce_len - aead->variable_nonce_len)) 419 goto err; 420 if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len)) 421 goto err; 422 if (!CBB_finish(&cbb, &nonce, &nonce_len)) 423 goto err; 424 425 for (i = 0; i < aead->fixed_nonce_len; i++) 426 nonce[i] ^= aead->fixed_nonce[i]; 427 428 *out = nonce; 429 *out_len = nonce_len; 430 431 return 1; 432 433 err: 434 CBB_cleanup(&cbb); 435 freezero(nonce, nonce_len); 436 437 return 0; 438 } 439 440 static int 441 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl, 442 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 443 { 444 if (rl->read_aead_ctx != NULL || rl->read_cipher_ctx != NULL) 445 return 0; 446 447 /* XXX - decrypt/process in place for now. */ 448 *out = (uint8_t *)CBS_data(fragment); 449 *out_len = CBS_len(fragment); 450 451 return 1; 452 } 453 454 static int 455 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl, 456 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 457 { 458 const SSL_AEAD_CTX *aead = rl->read_aead_ctx; 459 uint8_t *header = NULL, *nonce = NULL; 460 size_t header_len = 0, nonce_len = 0; 461 uint8_t *plain; 462 size_t plain_len; 463 uint16_t epoch = 0; 464 CBS var_nonce; 465 int ret = 0; 466 467 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 468 if (aead->xor_fixed_nonce) { 469 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 470 rl->read_seq_num, &nonce, &nonce_len)) 471 goto err; 472 } else if (aead->variable_nonce_in_record) { 473 if (!CBS_get_bytes(fragment, &var_nonce, 474 aead->variable_nonce_len)) 475 goto err; 476 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 477 CBS_data(&var_nonce), &nonce, &nonce_len)) 478 goto err; 479 } else { 480 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 481 rl->read_seq_num, &nonce, &nonce_len)) 482 goto err; 483 } 484 485 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 486 if (CBS_len(fragment) < aead->tag_len) { 487 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 488 goto err; 489 } 490 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 491 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 492 goto err; 493 } 494 495 /* XXX - decrypt/process in place for now. */ 496 plain = (uint8_t *)CBS_data(fragment); 497 plain_len = CBS_len(fragment) - aead->tag_len; 498 499 if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len, 500 epoch, rl->read_seq_num, SSL3_SEQUENCE_SIZE, &header, &header_len)) 501 goto err; 502 503 if (!EVP_AEAD_CTX_open(&aead->ctx, plain, out_len, plain_len, 504 nonce, nonce_len, CBS_data(fragment), CBS_len(fragment), 505 header, header_len)) { 506 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 507 goto err; 508 } 509 510 if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) { 511 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 512 goto err; 513 } 514 515 if (*out_len != plain_len) 516 goto err; 517 518 *out = plain; 519 520 ret = 1; 521 522 err: 523 freezero(header, header_len); 524 freezero(nonce, nonce_len); 525 526 return ret; 527 } 528 529 static int 530 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl, 531 uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len) 532 { 533 EVP_CIPHER_CTX *enc = rl->read_cipher_ctx; 534 SSL3_RECORD_INTERNAL rrec; 535 int block_size, eiv_len; 536 uint8_t *mac = NULL; 537 int mac_len = 0; 538 uint8_t *out_mac = NULL; 539 size_t out_mac_len = 0; 540 uint8_t *plain; 541 size_t plain_len; 542 size_t min_len; 543 CBB cbb_mac; 544 int ret = 0; 545 546 memset(&cbb_mac, 0, sizeof(cbb_mac)); 547 548 block_size = EVP_CIPHER_CTX_block_size(enc); 549 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 550 goto err; 551 552 /* Determine explicit IV length. */ 553 eiv_len = 0; 554 if (rl->version != TLS1_VERSION && 555 EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 556 eiv_len = EVP_CIPHER_CTX_iv_length(enc); 557 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 558 goto err; 559 560 mac_len = 0; 561 if (rl->read_hash_ctx != NULL) { 562 mac_len = EVP_MD_CTX_size(rl->read_hash_ctx); 563 if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE) 564 goto err; 565 } 566 567 /* CBC has at least one padding byte. */ 568 min_len = eiv_len + mac_len; 569 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 570 min_len += 1; 571 572 if (CBS_len(fragment) < min_len) { 573 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 574 goto err; 575 } 576 if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) { 577 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 578 goto err; 579 } 580 if (CBS_len(fragment) % block_size != 0) { 581 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 582 goto err; 583 } 584 585 /* XXX - decrypt/process in place for now. */ 586 plain = (uint8_t *)CBS_data(fragment); 587 plain_len = CBS_len(fragment); 588 589 if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len)) 590 goto err; 591 592 rrec.data = plain; 593 rrec.input = plain; 594 rrec.length = plain_len; 595 596 /* 597 * We now have to remove padding, extract MAC, calculate MAC 598 * and compare MAC in constant time. 599 */ 600 if (block_size > 1) 601 ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len); 602 603 if ((mac = calloc(1, mac_len)) == NULL) 604 goto err; 605 606 if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE)) 607 goto err; 608 if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) { 609 ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length + 610 rrec.padding_length); 611 rrec.length -= mac_len; 612 if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type, 613 rrec.input, rrec.length, mac_len, rrec.padding_length)) 614 goto err; 615 } else { 616 rrec.length -= mac_len; 617 memcpy(mac, rrec.data + rrec.length, mac_len); 618 if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type, 619 rrec.input, rrec.length)) 620 goto err; 621 } 622 if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len)) 623 goto err; 624 if (mac_len != out_mac_len) 625 goto err; 626 627 if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) { 628 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 629 goto err; 630 } 631 632 if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) { 633 rl->alert_desc = SSL_AD_BAD_RECORD_MAC; 634 goto err; 635 } 636 if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) { 637 rl->alert_desc = SSL_AD_RECORD_OVERFLOW; 638 goto err; 639 } 640 641 *out = rrec.data; 642 *out_len = rrec.length; 643 644 ret = 1; 645 646 err: 647 CBB_cleanup(&cbb_mac); 648 freezero(mac, mac_len); 649 freezero(out_mac, out_mac_len); 650 651 return ret; 652 } 653 654 int 655 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf, 656 size_t buf_len, uint8_t **out, size_t *out_len) 657 { 658 CBS cbs, fragment, seq_no; 659 uint16_t epoch, version; 660 uint8_t content_type; 661 662 CBS_init(&cbs, buf, buf_len); 663 664 if (!CBS_get_u8(&cbs, &content_type)) 665 return 0; 666 if (!CBS_get_u16(&cbs, &version)) 667 return 0; 668 if (rl->dtls) { 669 if (!CBS_get_u16(&cbs, &epoch)) 670 return 0; 671 if (!CBS_get_bytes(&cbs, &seq_no, 6)) 672 return 0; 673 } 674 if (!CBS_get_u16_length_prefixed(&cbs, &fragment)) 675 return 0; 676 677 if (rl->read_aead_ctx != NULL) { 678 if (!tls12_record_layer_open_record_protected_aead(rl, 679 content_type, &fragment, out, out_len)) 680 return 0; 681 } else if (rl->read_cipher_ctx != NULL) { 682 if (!tls12_record_layer_open_record_protected_cipher(rl, 683 content_type, &fragment, out, out_len)) 684 return 0; 685 } else { 686 if (!tls12_record_layer_open_record_plaintext(rl, 687 content_type, &fragment, out, out_len)) 688 return 0; 689 } 690 691 if (!rl->dtls) 692 tls1_record_sequence_increment(rl->read_seq_num); 693 694 return 1; 695 } 696 697 static int 698 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl, 699 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 700 { 701 if (rl->write_aead_ctx != NULL || rl->write_cipher_ctx != NULL) 702 return 0; 703 704 return CBB_add_bytes(out, content, content_len); 705 } 706 707 static int 708 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl, 709 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 710 { 711 const SSL_AEAD_CTX *aead = rl->write_aead_ctx; 712 uint8_t *header = NULL, *nonce = NULL; 713 size_t header_len = 0, nonce_len = 0; 714 size_t enc_record_len, out_len; 715 uint16_t epoch = 0; 716 uint8_t *enc_data; 717 int ret = 0; 718 719 /* XXX - move to nonce allocated in record layer, matching TLSv1.3 */ 720 if (aead->xor_fixed_nonce) { 721 if (!tls12_record_layer_aead_xored_nonce(rl, aead, 722 rl->write_seq_num, &nonce, &nonce_len)) 723 goto err; 724 } else { 725 if (!tls12_record_layer_aead_concat_nonce(rl, aead, 726 rl->write_seq_num, &nonce, &nonce_len)) 727 goto err; 728 } 729 730 if (aead->variable_nonce_in_record) { 731 /* XXX - length check? */ 732 if (!CBB_add_bytes(out, rl->write_seq_num, aead->variable_nonce_len)) 733 goto err; 734 } 735 736 if (!tls12_record_layer_pseudo_header(rl, content_type, content_len, 737 epoch, rl->write_seq_num, SSL3_SEQUENCE_SIZE, &header, &header_len)) 738 goto err; 739 740 /* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */ 741 enc_record_len = content_len + aead->tag_len; 742 if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 743 goto err; 744 if (!CBB_add_space(out, &enc_data, enc_record_len)) 745 goto err; 746 747 if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len, 748 nonce, nonce_len, content, content_len, header, header_len)) 749 goto err; 750 751 if (out_len != enc_record_len) 752 goto err; 753 754 ret = 1; 755 756 err: 757 freezero(header, header_len); 758 freezero(nonce, nonce_len); 759 760 return ret; 761 } 762 763 static int 764 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl, 765 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out) 766 { 767 EVP_CIPHER_CTX *enc = rl->write_cipher_ctx; 768 size_t mac_len, pad_len; 769 int block_size, eiv_len; 770 uint8_t *enc_data, *eiv, *pad, pad_val; 771 uint8_t *plain = NULL; 772 size_t plain_len = 0; 773 int ret = 0; 774 CBB cbb; 775 776 if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH)) 777 goto err; 778 779 /* Add explicit IV if necessary. */ 780 eiv_len = 0; 781 if (rl->version != TLS1_VERSION && 782 EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) 783 eiv_len = EVP_CIPHER_CTX_iv_length(enc); 784 if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH) 785 goto err; 786 if (eiv_len > 0) { 787 if (!CBB_add_space(&cbb, &eiv, eiv_len)) 788 goto err; 789 arc4random_buf(eiv, eiv_len); 790 } 791 792 if (!CBB_add_bytes(&cbb, content, content_len)) 793 goto err; 794 795 mac_len = 0; 796 if (rl->write_hash_ctx != NULL) { 797 if (!tls12_record_layer_write_mac(rl, &cbb, content_type, 798 content, content_len, &mac_len)) 799 goto err; 800 } 801 802 plain_len = (size_t)eiv_len + content_len + mac_len; 803 804 /* Add padding to block size, if necessary. */ 805 block_size = EVP_CIPHER_CTX_block_size(enc); 806 if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH) 807 goto err; 808 if (block_size > 1) { 809 pad_len = block_size - (plain_len % block_size); 810 pad_val = pad_len - 1; 811 812 if (pad_len > 255) 813 goto err; 814 if (!CBB_add_space(&cbb, &pad, pad_len)) 815 goto err; 816 memset(pad, pad_val, pad_len); 817 } 818 819 if (!CBB_finish(&cbb, &plain, &plain_len)) 820 goto err; 821 822 if (plain_len % block_size != 0) 823 goto err; 824 if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH) 825 goto err; 826 827 if (!CBB_add_space(out, &enc_data, plain_len)) 828 goto err; 829 if (!EVP_Cipher(enc, enc_data, plain, plain_len)) 830 goto err; 831 832 ret = 1; 833 834 err: 835 CBB_cleanup(&cbb); 836 freezero(plain, plain_len); 837 838 return ret; 839 } 840 841 int 842 tls12_record_layer_seal_record(struct tls12_record_layer *rl, 843 uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb) 844 { 845 CBB fragment; 846 847 if (!CBB_add_u8(cbb, content_type)) 848 return 0; 849 if (!CBB_add_u16(cbb, rl->version)) 850 return 0; 851 if (rl->dtls) { 852 if (!tls12_record_layer_build_seq_num(rl, cbb, 853 rl->write_epoch, rl->write_seq_num, 854 SSL3_SEQUENCE_SIZE)) 855 return 0; 856 } 857 if (!CBB_add_u16_length_prefixed(cbb, &fragment)) 858 return 0; 859 860 if (rl->write_aead_ctx != NULL) { 861 if (!tls12_record_layer_seal_record_protected_aead(rl, 862 content_type, content, content_len, &fragment)) 863 return 0; 864 } else if (rl->write_cipher_ctx != NULL) { 865 if (!tls12_record_layer_seal_record_protected_cipher(rl, 866 content_type, content, content_len, &fragment)) 867 return 0; 868 } else { 869 if (!tls12_record_layer_seal_record_plaintext(rl, 870 content_type, content, content_len, &fragment)) 871 return 0; 872 } 873 874 if (!CBB_flush(cbb)) 875 return 0; 876 877 tls1_record_sequence_increment(rl->write_seq_num); 878 879 return 1; 880 } 881