xref: /openbsd-src/lib/libssl/tls12_record_layer.c (revision cba26e98faa2b48aa4705f205ed876af460243a2)
1 /* $OpenBSD: tls12_record_layer.c,v 1.7 2021/01/07 15:37:19 jsing Exp $ */
2 /*
3  * Copyright (c) 2020 Joel Sing <jsing@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <stdlib.h>
19 
20 #include <openssl/evp.h>
21 
22 #include "ssl_locl.h"
23 
24 struct tls12_record_layer {
25 	uint16_t version;
26 	int dtls;
27 
28 	uint8_t alert_desc;
29 
30 	uint16_t read_epoch;
31 	uint16_t write_epoch;
32 
33 	int read_stream_mac;
34 	int write_stream_mac;
35 
36 	uint8_t *read_mac_key;
37 	size_t read_mac_key_len;
38 
39 	/*
40 	 * XXX - for now these are just pointers to externally managed
41 	 * structs/memory. These should eventually be owned by the record layer.
42 	 */
43 	SSL_AEAD_CTX *read_aead_ctx;
44 	SSL_AEAD_CTX *write_aead_ctx;
45 
46 	EVP_CIPHER_CTX *read_cipher_ctx;
47 	EVP_MD_CTX *read_hash_ctx;
48 	EVP_CIPHER_CTX *write_cipher_ctx;
49 	EVP_MD_CTX *write_hash_ctx;
50 
51 	uint8_t *read_seq_num;
52 	uint8_t *write_seq_num;
53 };
54 
55 struct tls12_record_layer *
56 tls12_record_layer_new(void)
57 {
58 	struct tls12_record_layer *rl;
59 
60 	if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL)
61 		return NULL;
62 
63 	return rl;
64 }
65 
66 void
67 tls12_record_layer_free(struct tls12_record_layer *rl)
68 {
69 	if (rl == NULL)
70 		return;
71 
72 	freezero(rl->read_mac_key, rl->read_mac_key_len);
73 
74 	freezero(rl, sizeof(struct tls12_record_layer));
75 }
76 
77 void
78 tls12_record_layer_alert(struct tls12_record_layer *rl, uint8_t *alert_desc)
79 {
80 	*alert_desc = rl->alert_desc;
81 }
82 
83 void
84 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version)
85 {
86 	rl->version = version;
87 	rl->dtls = (version == DTLS1_VERSION);
88 }
89 
90 void
91 tls12_record_layer_set_read_epoch(struct tls12_record_layer *rl, uint16_t epoch)
92 {
93 	rl->read_epoch = epoch;
94 }
95 
96 void
97 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch)
98 {
99 	rl->write_epoch = epoch;
100 }
101 
102 static void
103 tls12_record_layer_set_read_state(struct tls12_record_layer *rl,
104     SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx,
105     int stream_mac)
106 {
107 	rl->read_aead_ctx = aead_ctx;
108 
109 	rl->read_cipher_ctx = cipher_ctx;
110 	rl->read_hash_ctx = hash_ctx;
111 	rl->read_stream_mac = stream_mac;
112 }
113 
114 static void
115 tls12_record_layer_set_write_state(struct tls12_record_layer *rl,
116     SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx,
117     int stream_mac)
118 {
119 	rl->write_aead_ctx = aead_ctx;
120 
121 	rl->write_cipher_ctx = cipher_ctx;
122 	rl->write_hash_ctx = hash_ctx;
123 	rl->write_stream_mac = stream_mac;
124 }
125 
126 void
127 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl)
128 {
129 	tls12_record_layer_set_read_state(rl, NULL, NULL, NULL, 0);
130 	tls12_record_layer_set_read_mac_key(rl, NULL, 0);
131 	rl->read_seq_num = NULL;
132 }
133 
134 void
135 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl)
136 {
137 	tls12_record_layer_set_write_state(rl, NULL, NULL, NULL, 0);
138 	rl->write_seq_num = NULL;
139 }
140 
141 void
142 tls12_record_layer_set_read_seq_num(struct tls12_record_layer *rl,
143     uint8_t *seq_num)
144 {
145 	rl->read_seq_num = seq_num;
146 }
147 
148 void
149 tls12_record_layer_set_write_seq_num(struct tls12_record_layer *rl,
150     uint8_t *seq_num)
151 {
152 	rl->write_seq_num = seq_num;
153 }
154 
155 int
156 tls12_record_layer_set_read_aead(struct tls12_record_layer *rl,
157     SSL_AEAD_CTX *aead_ctx)
158 {
159 	tls12_record_layer_set_read_state(rl, aead_ctx, NULL, NULL, 0);
160 
161 	return 1;
162 }
163 
164 int
165 tls12_record_layer_set_write_aead(struct tls12_record_layer *rl,
166     SSL_AEAD_CTX *aead_ctx)
167 {
168 	tls12_record_layer_set_write_state(rl, aead_ctx, NULL, NULL, 0);
169 
170 	return 1;
171 }
172 
173 int
174 tls12_record_layer_set_read_cipher_hash(struct tls12_record_layer *rl,
175     EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac)
176 {
177 	tls12_record_layer_set_read_state(rl, NULL, cipher_ctx, hash_ctx,
178 	    stream_mac);
179 
180 	return 1;
181 }
182 
183 int
184 tls12_record_layer_set_write_cipher_hash(struct tls12_record_layer *rl,
185     EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac)
186 {
187 	tls12_record_layer_set_write_state(rl, NULL, cipher_ctx, hash_ctx,
188 	    stream_mac);
189 
190 	return 1;
191 }
192 
193 int
194 tls12_record_layer_set_read_mac_key(struct tls12_record_layer *rl,
195     const uint8_t *mac_key, size_t mac_key_len)
196 {
197 	freezero(rl->read_mac_key, rl->read_mac_key_len);
198 	rl->read_mac_key = NULL;
199 	rl->read_mac_key_len = 0;
200 
201 	if (mac_key == NULL || mac_key_len == 0)
202 		return 1;
203 
204 	if ((rl->read_mac_key = calloc(1, mac_key_len)) == NULL)
205 		return 0;
206 
207 	memcpy(rl->read_mac_key, mac_key, mac_key_len);
208 	rl->read_mac_key_len = mac_key_len;
209 
210 	return 1;
211 }
212 
213 static int
214 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb,
215     uint16_t epoch, uint8_t *seq_num, size_t seq_num_len)
216 {
217 	CBS seq;
218 
219 	CBS_init(&seq, seq_num, seq_num_len);
220 
221 	if (rl->dtls) {
222 		if (!CBB_add_u16(cbb, epoch))
223 			return 0;
224 		if (!CBS_skip(&seq, 2))
225 			return 0;
226 	}
227 
228 	return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq));
229 }
230 
231 static int
232 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl,
233     uint8_t content_type, uint16_t record_len, uint16_t epoch, uint8_t *seq_num,
234     size_t seq_num_len, uint8_t **out, size_t *out_len)
235 {
236 	CBB cbb;
237 
238 	*out = NULL;
239 	*out_len = 0;
240 
241 	/* Build the pseudo-header used for MAC/AEAD. */
242 	if (!CBB_init(&cbb, 13))
243 		goto err;
244 
245 	if (!tls12_record_layer_build_seq_num(rl, &cbb, epoch,
246 	    seq_num, seq_num_len))
247 		goto err;
248 	if (!CBB_add_u8(&cbb, content_type))
249 		goto err;
250 	if (!CBB_add_u16(&cbb, rl->version))
251 		goto err;
252 	if (!CBB_add_u16(&cbb, record_len))
253 		goto err;
254 
255 	if (!CBB_finish(&cbb, out, out_len))
256 		goto err;
257 
258 	return 1;
259 
260  err:
261 	CBB_cleanup(&cbb);
262 
263 	return 0;
264 }
265 
266 static int
267 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb,
268     EVP_MD_CTX *hash_ctx, int stream_mac, uint16_t epoch, uint8_t *seq_num,
269     size_t seq_num_len, uint8_t content_type, const uint8_t *content,
270     size_t content_len, size_t *out_len)
271 {
272 	EVP_MD_CTX *mac_ctx = NULL;
273 	uint8_t *header = NULL;
274 	size_t header_len = 0;
275 	size_t mac_len;
276 	uint8_t *mac;
277 	int ret = 0;
278 
279 	if ((mac_ctx = EVP_MD_CTX_new()) == NULL)
280 		goto err;
281 	if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx))
282 		goto err;
283 
284 	if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
285 	    epoch, seq_num, seq_num_len, &header, &header_len))
286 		goto err;
287 
288 	if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0)
289 		goto err;
290 	if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0)
291 		goto err;
292 	if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0)
293 		goto err;
294 	if (!CBB_add_space(cbb, &mac, mac_len))
295 		goto err;
296 	if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0)
297 		goto err;
298 	if (mac_len == 0)
299 		goto err;
300 
301 	if (stream_mac) {
302 		if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx))
303 			goto err;
304 	}
305 
306 	*out_len = mac_len;
307 	ret = 1;
308 
309  err:
310 	EVP_MD_CTX_free(mac_ctx);
311 	freezero(header, header_len);
312 
313 	return ret;
314 }
315 
316 static int
317 tls12_record_layer_read_mac_cbc(struct tls12_record_layer *rl, CBB *cbb,
318     uint8_t content_type, const uint8_t *content, size_t content_len,
319     size_t mac_len, size_t padding_len)
320 {
321 	uint8_t *header = NULL;
322 	size_t header_len = 0;
323 	uint8_t *mac = NULL;
324 	size_t out_mac_len = 0;
325 	int ret = 0;
326 
327 	/*
328 	 * Must be constant time to avoid leaking details about CBC padding.
329 	 */
330 
331 	if (!ssl3_cbc_record_digest_supported(rl->read_hash_ctx))
332 		goto err;
333 
334 	if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
335 	    rl->read_epoch, rl->read_seq_num, SSL3_SEQUENCE_SIZE,
336 	    &header, &header_len))
337 		goto err;
338 
339 	if (!CBB_add_space(cbb, &mac, mac_len))
340 		goto err;
341 	if (!ssl3_cbc_digest_record(rl->read_hash_ctx, mac, &out_mac_len, header,
342 	    content, content_len + mac_len, content_len + mac_len + padding_len,
343 	    rl->read_mac_key, rl->read_mac_key_len))
344 		goto err;
345 	if (mac_len != out_mac_len)
346 		goto err;
347 
348 	ret = 1;
349 
350  err:
351 	freezero(header, header_len);
352 
353 	return ret;
354 }
355 
356 static int
357 tls12_record_layer_read_mac(struct tls12_record_layer *rl, CBB *cbb,
358     uint8_t content_type, const uint8_t *content, size_t content_len)
359 {
360 	EVP_CIPHER_CTX *enc = rl->read_cipher_ctx;
361 	size_t out_len;
362 
363 	if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
364 		return 0;
365 
366 	return tls12_record_layer_mac(rl, cbb, rl->read_hash_ctx,
367 	    rl->read_stream_mac, rl->read_epoch, rl->read_seq_num,
368 	    SSL3_SEQUENCE_SIZE, content_type, content, content_len, &out_len);
369 }
370 
371 static int
372 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb,
373     uint8_t content_type, const uint8_t *content, size_t content_len,
374     size_t *out_len)
375 {
376 	return tls12_record_layer_mac(rl, cbb, rl->write_hash_ctx,
377 	    rl->write_stream_mac, rl->write_epoch, rl->write_seq_num,
378 	    SSL3_SEQUENCE_SIZE, content_type, content, content_len, out_len);
379 }
380 
381 static int
382 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl,
383     const SSL_AEAD_CTX *aead, const uint8_t *seq_num,
384     uint8_t **out, size_t *out_len)
385 {
386 	CBB cbb;
387 
388 	if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE)
389 		return 0;
390 
391 	/* Fixed nonce and variable nonce (sequence number) are concatenated. */
392 	if (!CBB_init(&cbb, 16))
393 		goto err;
394 	if (!CBB_add_bytes(&cbb, aead->fixed_nonce,
395 	    aead->fixed_nonce_len))
396 		goto err;
397 	if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len))
398 		goto err;
399 	if (!CBB_finish(&cbb, out, out_len))
400 		goto err;
401 
402 	return 1;
403 
404  err:
405 	CBB_cleanup(&cbb);
406 
407 	return 0;
408 }
409 
410 static int
411 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl,
412     const SSL_AEAD_CTX *aead, const uint8_t *seq_num,
413     uint8_t **out, size_t *out_len)
414 {
415 	uint8_t *nonce = NULL;
416 	size_t nonce_len = 0;
417 	uint8_t *pad;
418 	CBB cbb;
419 	int i;
420 
421 	if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE)
422 		return 0;
423 	if (aead->fixed_nonce_len < aead->variable_nonce_len)
424 		return 0;
425 
426 	/*
427 	 * Variable nonce (sequence number) is right padded, before the fixed
428 	 * nonce is XOR'd in.
429 	 */
430 	if (!CBB_init(&cbb, 16))
431 		goto err;
432 	if (!CBB_add_space(&cbb, &pad,
433 	    aead->fixed_nonce_len - aead->variable_nonce_len))
434 		goto err;
435 	if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len))
436 		goto err;
437 	if (!CBB_finish(&cbb, &nonce, &nonce_len))
438 		goto err;
439 
440 	for (i = 0; i < aead->fixed_nonce_len; i++)
441 		nonce[i] ^= aead->fixed_nonce[i];
442 
443 	*out = nonce;
444 	*out_len = nonce_len;
445 
446 	return 1;
447 
448  err:
449 	CBB_cleanup(&cbb);
450 	freezero(nonce, nonce_len);
451 
452 	return 0;
453 }
454 
455 static int
456 tls12_record_layer_open_record_plaintext(struct tls12_record_layer *rl,
457     uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len)
458 {
459 	if (rl->read_aead_ctx != NULL || rl->read_cipher_ctx != NULL)
460 		return 0;
461 
462 	/* XXX - decrypt/process in place for now. */
463 	*out = (uint8_t *)CBS_data(fragment);
464 	*out_len = CBS_len(fragment);
465 
466 	return 1;
467 }
468 
469 static int
470 tls12_record_layer_open_record_protected_aead(struct tls12_record_layer *rl,
471     uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len)
472 {
473 	const SSL_AEAD_CTX *aead = rl->read_aead_ctx;
474 	uint8_t *header = NULL, *nonce = NULL;
475 	size_t header_len = 0, nonce_len = 0;
476 	uint8_t *plain;
477 	size_t plain_len;
478 	uint16_t epoch = 0;
479 	CBS var_nonce;
480 	int ret = 0;
481 
482 	/* XXX - move to nonce allocated in record layer, matching TLSv1.3 */
483 	if (aead->xor_fixed_nonce) {
484 		if (!tls12_record_layer_aead_xored_nonce(rl, aead,
485 		    rl->read_seq_num, &nonce, &nonce_len))
486 			goto err;
487 	} else if (aead->variable_nonce_in_record) {
488 		if (!CBS_get_bytes(fragment, &var_nonce,
489 		    aead->variable_nonce_len))
490 			goto err;
491 		if (!tls12_record_layer_aead_concat_nonce(rl, aead,
492 		    CBS_data(&var_nonce), &nonce, &nonce_len))
493 			goto err;
494 	} else {
495 		if (!tls12_record_layer_aead_concat_nonce(rl, aead,
496 		    rl->read_seq_num, &nonce, &nonce_len))
497 			goto err;
498 	}
499 
500 	/* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */
501 	if (CBS_len(fragment) < aead->tag_len) {
502 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
503 		goto err;
504 	}
505 	if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
506 		rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
507 		goto err;
508 	}
509 
510 	/* XXX - decrypt/process in place for now. */
511 	plain = (uint8_t *)CBS_data(fragment);
512 	plain_len = CBS_len(fragment) - aead->tag_len;
513 
514 	if (!tls12_record_layer_pseudo_header(rl, content_type, plain_len,
515 	    epoch, rl->read_seq_num, SSL3_SEQUENCE_SIZE, &header, &header_len))
516 		goto err;
517 
518 	if (!EVP_AEAD_CTX_open(&aead->ctx, plain, out_len, plain_len,
519 	    nonce, nonce_len, CBS_data(fragment), CBS_len(fragment),
520 	    header, header_len)) {
521 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
522 		goto err;
523 	}
524 
525 	if (*out_len > SSL3_RT_MAX_PLAIN_LENGTH) {
526 		rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
527 		goto err;
528 	}
529 
530 	if (*out_len != plain_len)
531 		goto err;
532 
533 	*out = plain;
534 
535 	ret = 1;
536 
537  err:
538 	freezero(header, header_len);
539 	freezero(nonce, nonce_len);
540 
541 	return ret;
542 }
543 
544 static int
545 tls12_record_layer_open_record_protected_cipher(struct tls12_record_layer *rl,
546     uint8_t content_type, CBS *fragment, uint8_t **out, size_t *out_len)
547 {
548 	EVP_CIPHER_CTX *enc = rl->read_cipher_ctx;
549 	SSL3_RECORD_INTERNAL rrec;
550 	int block_size, eiv_len;
551 	uint8_t *mac = NULL;
552 	int mac_len = 0;
553 	uint8_t *out_mac = NULL;
554 	size_t out_mac_len = 0;
555 	uint8_t *plain;
556 	size_t plain_len;
557 	size_t min_len;
558 	CBB cbb_mac;
559 	int ret = 0;
560 
561 	memset(&cbb_mac, 0, sizeof(cbb_mac));
562 
563 	block_size = EVP_CIPHER_CTX_block_size(enc);
564 	if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH)
565 		goto err;
566 
567 	/* Determine explicit IV length. */
568 	eiv_len = 0;
569 	if (rl->version != TLS1_VERSION &&
570 	    EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
571 		eiv_len = EVP_CIPHER_CTX_iv_length(enc);
572 	if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH)
573 		goto err;
574 
575 	mac_len = 0;
576 	if (rl->read_hash_ctx != NULL) {
577 		mac_len = EVP_MD_CTX_size(rl->read_hash_ctx);
578 		if (mac_len <= 0 || mac_len > EVP_MAX_MD_SIZE)
579 			goto err;
580 	}
581 
582 	/* CBC has at least one padding byte. */
583 	min_len = eiv_len + mac_len;
584 	if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
585 		min_len += 1;
586 
587 	if (CBS_len(fragment) < min_len) {
588 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
589 		goto err;
590 	}
591 	if (CBS_len(fragment) > SSL3_RT_MAX_ENCRYPTED_LENGTH) {
592 		rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
593 		goto err;
594 	}
595 	if (CBS_len(fragment) % block_size != 0) {
596 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
597 		goto err;
598 	}
599 
600 	/* XXX - decrypt/process in place for now. */
601 	plain = (uint8_t *)CBS_data(fragment);
602 	plain_len = CBS_len(fragment);
603 
604 	if (!EVP_Cipher(enc, plain, CBS_data(fragment), plain_len))
605 		goto err;
606 
607 	rrec.data = plain;
608 	rrec.input = plain;
609 	rrec.length = plain_len;
610 
611 	/*
612 	 * We now have to remove padding, extract MAC, calculate MAC
613 	 * and compare MAC in constant time.
614 	 */
615 	if (block_size > 1)
616 		ssl3_cbc_remove_padding(&rrec, eiv_len, mac_len);
617 
618 	if ((mac = calloc(1, mac_len)) == NULL)
619 		goto err;
620 
621 	if (!CBB_init(&cbb_mac, EVP_MAX_MD_SIZE))
622 		goto err;
623 	if (EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE) {
624 		ssl3_cbc_copy_mac(mac, &rrec, mac_len, rrec.length +
625 		    rrec.padding_length);
626 		rrec.length -= mac_len;
627 		if (!tls12_record_layer_read_mac_cbc(rl, &cbb_mac, content_type,
628 		    rrec.input, rrec.length, mac_len, rrec.padding_length))
629 			goto err;
630 	} else {
631 		rrec.length -= mac_len;
632 		memcpy(mac, rrec.data + rrec.length, mac_len);
633 		if (!tls12_record_layer_read_mac(rl, &cbb_mac, content_type,
634 		    rrec.input, rrec.length))
635 			goto err;
636 	}
637 	if (!CBB_finish(&cbb_mac, &out_mac, &out_mac_len))
638 		goto err;
639 	if (mac_len != out_mac_len)
640 		goto err;
641 
642 	if (timingsafe_memcmp(mac, out_mac, mac_len) != 0) {
643 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
644 		goto err;
645 	}
646 
647 	if (rrec.length > SSL3_RT_MAX_COMPRESSED_LENGTH + mac_len) {
648 		rl->alert_desc = SSL_AD_BAD_RECORD_MAC;
649 		goto err;
650 	}
651 	if (rrec.length > SSL3_RT_MAX_PLAIN_LENGTH) {
652 		rl->alert_desc = SSL_AD_RECORD_OVERFLOW;
653 		goto err;
654 	}
655 
656 	*out = rrec.data;
657 	*out_len = rrec.length;
658 
659 	ret = 1;
660 
661  err:
662 	CBB_cleanup(&cbb_mac);
663 	freezero(mac, mac_len);
664 	freezero(out_mac, out_mac_len);
665 
666 	return ret;
667 }
668 
669 int
670 tls12_record_layer_open_record(struct tls12_record_layer *rl, uint8_t *buf,
671     size_t buf_len, uint8_t **out, size_t *out_len)
672 {
673 	CBS cbs, fragment, seq_no;
674 	uint16_t epoch, version;
675 	uint8_t content_type;
676 
677 	CBS_init(&cbs, buf, buf_len);
678 
679 	if (!CBS_get_u8(&cbs, &content_type))
680 		return 0;
681 	if (!CBS_get_u16(&cbs, &version))
682 		return 0;
683 	if (rl->dtls) {
684 		if (!CBS_get_u16(&cbs, &epoch))
685 			return 0;
686 		if (!CBS_get_bytes(&cbs, &seq_no, 6))
687 			return 0;
688 	}
689 	if (!CBS_get_u16_length_prefixed(&cbs, &fragment))
690 		return 0;
691 
692 	if (rl->read_aead_ctx != NULL) {
693 		if (!tls12_record_layer_open_record_protected_aead(rl,
694 		    content_type, &fragment, out, out_len))
695 			return 0;
696 	} else if (rl->read_cipher_ctx != NULL) {
697 		if (!tls12_record_layer_open_record_protected_cipher(rl,
698 		    content_type, &fragment, out, out_len))
699 			return 0;
700 	} else {
701 		if (!tls12_record_layer_open_record_plaintext(rl,
702 		    content_type, &fragment, out, out_len))
703 			return 0;
704 	}
705 
706 	if (!rl->dtls)
707 		tls1_record_sequence_increment(rl->read_seq_num);
708 
709 	return 1;
710 }
711 
712 static int
713 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl,
714     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
715 {
716 	if (rl->write_aead_ctx != NULL || rl->write_cipher_ctx != NULL)
717 		return 0;
718 
719 	return CBB_add_bytes(out, content, content_len);
720 }
721 
722 static int
723 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl,
724     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
725 {
726 	const SSL_AEAD_CTX *aead = rl->write_aead_ctx;
727 	uint8_t *header = NULL, *nonce = NULL;
728 	size_t header_len = 0, nonce_len = 0;
729 	size_t enc_record_len, out_len;
730 	uint16_t epoch = 0;
731 	uint8_t *enc_data;
732 	int ret = 0;
733 
734 	/* XXX - move to nonce allocated in record layer, matching TLSv1.3 */
735 	if (aead->xor_fixed_nonce) {
736 		if (!tls12_record_layer_aead_xored_nonce(rl, aead,
737 		    rl->write_seq_num, &nonce, &nonce_len))
738 			goto err;
739 	} else {
740 		if (!tls12_record_layer_aead_concat_nonce(rl, aead,
741 		    rl->write_seq_num, &nonce, &nonce_len))
742 			goto err;
743 	}
744 
745 	if (aead->variable_nonce_in_record) {
746 		/* XXX - length check? */
747 		if (!CBB_add_bytes(out, rl->write_seq_num, aead->variable_nonce_len))
748 			goto err;
749 	}
750 
751 	if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
752 	    epoch, rl->write_seq_num, SSL3_SEQUENCE_SIZE, &header, &header_len))
753 		goto err;
754 
755 	/* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */
756 	enc_record_len = content_len + aead->tag_len;
757 	if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
758 		goto err;
759 	if (!CBB_add_space(out, &enc_data, enc_record_len))
760 		goto err;
761 
762 	if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len,
763 	    nonce, nonce_len, content, content_len, header, header_len))
764 		goto err;
765 
766 	if (out_len != enc_record_len)
767 		goto err;
768 
769 	ret = 1;
770 
771  err:
772 	freezero(header, header_len);
773 	freezero(nonce, nonce_len);
774 
775 	return ret;
776 }
777 
778 static int
779 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl,
780     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
781 {
782 	EVP_CIPHER_CTX *enc = rl->write_cipher_ctx;
783 	size_t mac_len, pad_len;
784 	int block_size, eiv_len;
785 	uint8_t *enc_data, *eiv, *pad, pad_val;
786 	uint8_t *plain = NULL;
787 	size_t plain_len = 0;
788 	int ret = 0;
789 	CBB cbb;
790 
791 	if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH))
792 		goto err;
793 
794 	/* Add explicit IV if necessary. */
795 	eiv_len = 0;
796 	if (rl->version != TLS1_VERSION &&
797 	    EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
798 		eiv_len = EVP_CIPHER_CTX_iv_length(enc);
799 	if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH)
800 		goto err;
801 	if (eiv_len > 0) {
802 		if (!CBB_add_space(&cbb, &eiv, eiv_len))
803 			goto err;
804 		arc4random_buf(eiv, eiv_len);
805 	}
806 
807 	if (!CBB_add_bytes(&cbb, content, content_len))
808 		goto err;
809 
810 	mac_len = 0;
811 	if (rl->write_hash_ctx != NULL) {
812 		if (!tls12_record_layer_write_mac(rl, &cbb, content_type,
813 		    content, content_len, &mac_len))
814 			goto err;
815 	}
816 
817 	plain_len = (size_t)eiv_len + content_len + mac_len;
818 
819 	/* Add padding to block size, if necessary. */
820 	block_size = EVP_CIPHER_CTX_block_size(enc);
821 	if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH)
822 		goto err;
823 	if (block_size > 1) {
824 		pad_len = block_size - (plain_len % block_size);
825 		pad_val = pad_len - 1;
826 
827 		if (pad_len > 255)
828 			goto err;
829 		if (!CBB_add_space(&cbb, &pad, pad_len))
830 			goto err;
831 		memset(pad, pad_val, pad_len);
832 	}
833 
834 	if (!CBB_finish(&cbb, &plain, &plain_len))
835 		goto err;
836 
837 	if (plain_len % block_size != 0)
838 		goto err;
839 	if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
840 		goto err;
841 
842 	if (!CBB_add_space(out, &enc_data, plain_len))
843 		goto err;
844 	if (!EVP_Cipher(enc, enc_data, plain, plain_len))
845 		goto err;
846 
847 	ret = 1;
848 
849  err:
850 	CBB_cleanup(&cbb);
851 	freezero(plain, plain_len);
852 
853 	return ret;
854 }
855 
856 int
857 tls12_record_layer_seal_record(struct tls12_record_layer *rl,
858     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb)
859 {
860 	CBB fragment;
861 
862 	if (!CBB_add_u8(cbb, content_type))
863 		return 0;
864 	if (!CBB_add_u16(cbb, rl->version))
865 		return 0;
866 	if (rl->dtls) {
867 		if (!tls12_record_layer_build_seq_num(rl, cbb,
868 		    rl->write_epoch, rl->write_seq_num,
869 		    SSL3_SEQUENCE_SIZE))
870 			return 0;
871 	}
872 	if (!CBB_add_u16_length_prefixed(cbb, &fragment))
873 		return 0;
874 
875 	if (rl->write_aead_ctx != NULL) {
876 		if (!tls12_record_layer_seal_record_protected_aead(rl,
877 		    content_type, content, content_len, &fragment))
878 			return 0;
879 	} else if (rl->write_cipher_ctx != NULL) {
880 		if (!tls12_record_layer_seal_record_protected_cipher(rl,
881 		    content_type, content, content_len, &fragment))
882 			return 0;
883 	} else {
884 		if (!tls12_record_layer_seal_record_plaintext(rl,
885 		    content_type, content, content_len, &fragment))
886 			return 0;
887 	}
888 
889 	if (!CBB_flush(cbb))
890 		return 0;
891 
892 	tls1_record_sequence_increment(rl->write_seq_num);
893 
894 	return 1;
895 }
896