1 /*
2 * Copyright (c) 2011-2012 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@dragonflybsd.org>
6 * by Venkatesh Srinivas <vsrinivas@dragonflybsd.org>
7 * by Alex Hornung <alexh@dragonflybsd.org>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 #include "dmsg_local.h"
38
39 /*
40 * Setup crypto for pthreads
41 */
42 static pthread_mutex_t *crypto_locks;
43 int crypto_count;
44
45 static int dmsg_crypto_gcm_init(dmsg_ioq_t *, char *, int, char *, int, int);
46 static void dmsg_crypto_gcm_uninit(dmsg_ioq_t *);
47 static int dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
48 static int dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *, char *, char *, int, int *);
49
50 /*
51 * NOTE: the order of this table needs to match the DMSG_CRYPTO_ALGO_*_IDX
52 * defines in network.h.
53 */
54 static struct crypto_algo crypto_algos[] = {
55 {
56 .name = "aes-256-gcm",
57 .keylen = DMSG_CRYPTO_GCM_KEY_SIZE,
58 .unused01 = 0,
59 .init = dmsg_crypto_gcm_init,
60 .uninit = dmsg_crypto_gcm_uninit,
61 .enc_chunk = dmsg_crypto_gcm_encrypt_chunk,
62 .dec_chunk = dmsg_crypto_gcm_decrypt_chunk
63 },
64 { NULL, 0, 0, NULL, NULL, NULL, NULL }
65 };
66
67 static
68 unsigned long
dmsg_crypto_id_callback(void)69 dmsg_crypto_id_callback(void)
70 {
71 return ((unsigned long)(uintptr_t)pthread_self());
72 }
73
74 static
75 void
dmsg_crypto_locking_callback(int mode,int type,const char * file __unused,int line __unused)76 dmsg_crypto_locking_callback(int mode, int type,
77 const char *file __unused, int line __unused)
78 {
79 assert(type >= 0 && type < crypto_count);
80 if (mode & CRYPTO_LOCK) {
81 pthread_mutex_lock(&crypto_locks[type]);
82 } else {
83 pthread_mutex_unlock(&crypto_locks[type]);
84 }
85 }
86
87 void
dmsg_crypto_setup(void)88 dmsg_crypto_setup(void)
89 {
90 crypto_count = CRYPTO_num_locks();
91 crypto_locks = calloc(crypto_count, sizeof(crypto_locks[0]));
92 CRYPTO_set_id_callback(dmsg_crypto_id_callback);
93 CRYPTO_set_locking_callback(dmsg_crypto_locking_callback);
94 }
95
96 static
97 int
dmsg_crypto_gcm_init(dmsg_ioq_t * ioq,char * key,int klen,char * iv_fixed,int ivlen,int enc)98 dmsg_crypto_gcm_init(dmsg_ioq_t *ioq, char *key, int klen,
99 char *iv_fixed, int ivlen, int enc)
100 {
101 int i, ok;
102
103 if (klen < DMSG_CRYPTO_GCM_KEY_SIZE ||
104 ivlen < DMSG_CRYPTO_GCM_IV_FIXED_SIZE) {
105 dm_printf(1, "%s\n", "Not enough key or iv material");
106 return -1;
107 }
108
109 dm_printf(6, "%s key: ", enc ? "Encryption" : "Decryption");
110 for (i = 0; i < DMSG_CRYPTO_GCM_KEY_SIZE; ++i)
111 dmx_printf(6, "%02x", (unsigned char)key[i]);
112 dmx_printf(6, "%s\n", "");
113
114 dm_printf(6, "%s iv: ", enc ? "Encryption" : "Decryption");
115 for (i = 0; i < DMSG_CRYPTO_GCM_IV_FIXED_SIZE; ++i)
116 dmx_printf(6, "%02x", (unsigned char)iv_fixed[i]);
117 dmx_printf(6, "%s\n", " (fixed part only)");
118
119 memset(ioq->iv, 0, DMSG_CRYPTO_GCM_IV_SIZE);
120 memcpy(ioq->iv, iv_fixed, DMSG_CRYPTO_GCM_IV_FIXED_SIZE);
121
122 ioq->ctx = EVP_CIPHER_CTX_new();
123
124 if (enc)
125 ok = EVP_EncryptInit_ex(ioq->ctx, EVP_aes_256_gcm(), NULL,
126 (unsigned char*)key, ioq->iv);
127 else
128 ok = EVP_DecryptInit_ex(ioq->ctx, EVP_aes_256_gcm(), NULL,
129 (unsigned char*)key, ioq->iv);
130 if (!ok)
131 goto fail;
132
133 /*
134 * According to the original Galois/Counter Mode of Operation (GCM)
135 * proposal, only IVs that are exactly 96 bits get used without any
136 * further processing. Other IV sizes cause the GHASH() operation
137 * to be applied to the IV, which is more costly.
138 *
139 * The NIST SP 800-38D also recommends using a 96 bit IV for the same
140 * reasons. We actually follow the deterministic construction
141 * recommended in NIST SP 800-38D with a 64 bit invocation field as an
142 * integer counter and a random, session-specific fixed field.
143 *
144 * This means that we can essentially use the same session key and
145 * IV fixed field for up to 2^64 invocations of the authenticated
146 * encryption or decryption.
147 *
148 * With a chunk size of 64 bytes, this adds up to 1 zettabyte of
149 * traffic.
150 */
151 ok = EVP_CIPHER_CTX_ctrl(ioq->ctx, EVP_CTRL_GCM_SET_IVLEN,
152 DMSG_CRYPTO_GCM_IV_SIZE, NULL);
153 if (!ok)
154 goto fail;
155
156 /*
157 * Strictly speaking, padding is irrelevant with a counter mode
158 * encryption.
159 *
160 * However, setting padding to 0, even if using a counter mode such
161 * as GCM, will cause an error in _finish if the pt/ct size is not
162 * a multiple of the cipher block size.
163 */
164 EVP_CIPHER_CTX_set_padding(ioq->ctx, 0);
165
166 return 0;
167
168 fail:
169 dm_printf(1, "%s\n", "Error during _gcm_init");
170 return -1;
171 }
172
173 static
174 void
dmsg_crypto_gcm_uninit(dmsg_ioq_t * ioq)175 dmsg_crypto_gcm_uninit(dmsg_ioq_t *ioq)
176 {
177 EVP_CIPHER_CTX_free(ioq->ctx);
178 ioq->ctx = NULL;
179 }
180
181 static
182 int
_gcm_iv_increment(char * iv)183 _gcm_iv_increment(char *iv)
184 {
185 /*
186 * Deterministic construction according to NIST SP 800-38D, with
187 * 64 bit invocation field as integer counter.
188 *
189 * In other words, our 96 bit IV consists of a 32 bit fixed field
190 * unique to the session and a 64 bit integer counter.
191 */
192
193 uint64_t *c = (uint64_t *)(&iv[DMSG_CRYPTO_GCM_IV_FIXED_SIZE]);
194
195 /* Increment invocation field integer counter */
196 *c = htobe64(be64toh(*c)+1);
197
198 /*
199 * Detect wrap-around, which means it is time to renegotiate
200 * the session to get a new key and/or fixed field.
201 */
202 return (*c == 0) ? 0 : 1;
203 }
204
205 static
206 int
dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t * ioq,char * ct,char * pt,int in_size,int * out_size)207 dmsg_crypto_gcm_encrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
208 int in_size, int *out_size)
209 {
210 int ok;
211 int u_len;
212
213 *out_size = 0;
214
215 /*
216 * Change running IV for each block
217 */
218 ok = EVP_CIPHER_CTX_set_iv(ioq->ctx, (unsigned char *)ioq->iv,
219 DMSG_CRYPTO_GCM_IV_SIZE);
220
221 if (!ok)
222 goto fail;
223
224 u_len = 0; /* safety */
225 ok = EVP_EncryptUpdate(ioq->ctx, (unsigned char*)ct, &u_len,
226 (unsigned char*)pt, in_size);
227 if (!ok)
228 goto fail;
229
230 ok = _gcm_iv_increment(ioq->iv);
231 if (!ok) {
232 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
233 goto fail_out;
234 }
235
236 *out_size = u_len;
237
238 return 0;
239
240 fail:
241 ioq->error = DMSG_IOQ_ERROR_ALGO;
242 fail_out:
243 dm_printf(1, "%s\n", "error during encrypt_chunk");
244 return -1;
245 }
246
247 static
248 int
dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t * ioq,char * ct,char * pt,int out_size,int * consume_size)249 dmsg_crypto_gcm_decrypt_chunk(dmsg_ioq_t *ioq, char *ct, char *pt,
250 int out_size, int *consume_size)
251 {
252 int ok;
253 int u_len;
254
255 *consume_size = 0;
256
257 /*
258 * Change running IV for each block
259 */
260 ok = EVP_CIPHER_CTX_set_iv(ioq->ctx, (unsigned char *)ioq->iv,
261 DMSG_CRYPTO_GCM_IV_SIZE);
262 if (!ok) {
263 ioq->error = DMSG_IOQ_ERROR_ALGO;
264 goto fail_out;
265 }
266
267 ok = EVP_DecryptUpdate(ioq->ctx, (unsigned char*)pt, &u_len,
268 (unsigned char*)ct, out_size);
269 if (!ok)
270 goto fail;
271
272 ok = _gcm_iv_increment(ioq->iv);
273 if (!ok) {
274 ioq->error = DMSG_IOQ_ERROR_IVWRAP;
275 goto fail_out;
276 }
277
278 *consume_size = u_len;
279
280 return 0;
281
282 fail:
283 ioq->error = DMSG_IOQ_ERROR_MACFAIL;
284 fail_out:
285 dm_printf(1, "%s\n",
286 "error during decrypt_chunk "
287 "(likely authentication error)");
288 return -1;
289 }
290
291 /*
292 * Synchronously negotiate crypto for a new session. This must occur
293 * within 10 seconds or the connection is error'd out.
294 *
295 * We work off the IP address and/or reverse DNS. The IP address is
296 * checked first, followed by the IP address at various levels of granularity,
297 * followed by the full domain name and domain names at various levels of
298 * granularity.
299 *
300 * /etc/hammer2/remote/<name>.pub - Contains a public key
301 * /etc/hammer2/remote/<name>.none - Indicates no encryption (empty file)
302 * (e.g. localhost.none).
303 *
304 * We first attempt to locate a public key file based on the peer address or
305 * peer FQDN.
306 *
307 * <name>.none - No further negotiation is needed. We simply return.
308 * All communication proceeds without encryption.
309 * No public key handshake occurs in this situation.
310 * (both ends must match).
311 *
312 * <name>.pub - We have located the public key for the peer. Both
313 * sides transmit a block encrypted with their private
314 * keys and the peer's public key.
315 *
316 * Both sides receive a block and decrypt it.
317 *
318 * Both sides formulate a reply using the decrypted
319 * block and transmit it.
320 *
321 * communication proceeds with the negotiated session
322 * key (typically AES-256-CBC).
323 *
324 * If we fail to locate the appropriate file and no floating.db exists the
325 * connection is terminated without further action.
326 *
327 * If floating.db exists the connection proceeds with a floating negotiation.
328 */
329 typedef union {
330 struct sockaddr sa;
331 struct sockaddr_in sa_in;
332 struct sockaddr_in6 sa_in6;
333 } sockaddr_any_t;
334
335 void
dmsg_crypto_negotiate(dmsg_iocom_t * iocom)336 dmsg_crypto_negotiate(dmsg_iocom_t *iocom)
337 {
338 sockaddr_any_t sa;
339 socklen_t salen = sizeof(sa);
340 char peername[128];
341 char realname[128];
342 dmsg_handshake_t handtx;
343 dmsg_handshake_t handrx;
344 char buf1[sizeof(handtx)];
345 char buf2[sizeof(handtx)];
346 char *ptr;
347 char *path = NULL;
348 struct stat st;
349 FILE *fp;
350 RSA *keys[3] = { NULL, NULL, NULL };
351 size_t i;
352 size_t blksize;
353 size_t blkmask;
354 ssize_t n;
355 int fd;
356 int error;
357
358 /*
359 * Get the peer IP address for the connection as a string.
360 */
361 if (getpeername(iocom->sock_fd, &sa.sa, &salen) < 0) {
362 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
363 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
364 dm_printf(1, "%s\n", "accept: getpeername() failed");
365 goto done;
366 }
367 if (getnameinfo(&sa.sa, salen, peername, sizeof(peername),
368 NULL, 0, NI_NUMERICHOST) < 0) {
369 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOPEER;
370 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
371 dm_printf(1, "%s\n", "accept: cannot decode sockaddr");
372 goto done;
373 }
374 if (DMsgDebugOpt) {
375 if (realhostname_sa(realname, sizeof(realname),
376 &sa.sa, salen) == HOSTNAME_FOUND) {
377 dm_printf(1, "accept from %s (%s)\n",
378 peername, realname);
379 } else {
380 dm_printf(1, "accept from %s\n", peername);
381 }
382 }
383
384 /*
385 * Find the remote host's public key
386 *
387 * If the link is not to be encrypted (<ip>.none located) we shortcut
388 * the handshake entirely. No buffers are exchanged.
389 */
390 asprintf(&path, "%s/%s.pub", DMSG_PATH_REMOTE, peername);
391 if ((fp = fopen(path, "r")) == NULL) {
392 free(path);
393 asprintf(&path, "%s/%s.none",
394 DMSG_PATH_REMOTE, peername);
395 if (stat(path, &st) < 0) {
396 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NORKEY;
397 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
398 dm_printf(1, "%s\n", "auth failure: unknown host");
399 goto done;
400 }
401 dm_printf(1, "%s\n", "auth succeeded, unencrypted link");
402 goto done;
403 }
404 if (fp) {
405 keys[0] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
406 fclose(fp);
407 if (keys[0] == NULL) {
408 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
409 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
410 dm_printf(1, "%s\n", "auth failure: bad key format");
411 goto done;
412 }
413 }
414
415 /*
416 * Get our public and private keys
417 */
418 free(path);
419 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.pub");
420 if ((fp = fopen(path, "r")) == NULL) {
421 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
422 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
423 goto done;
424 }
425 keys[1] = PEM_read_RSA_PUBKEY(fp, NULL, NULL, NULL);
426 fclose(fp);
427 if (keys[1] == NULL) {
428 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
429 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
430 dm_printf(1, "%s\n", "auth failure: bad host key format");
431 goto done;
432 }
433
434 free(path);
435 asprintf(&path, DMSG_DEFAULT_DIR "/rsa.prv");
436 if ((fp = fopen(path, "r")) == NULL) {
437 iocom->ioq_rx.error = DMSG_IOQ_ERROR_NOLKEY;
438 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
439 dm_printf(1, "%s\n", "auth failure: bad host key format");
440 goto done;
441 }
442 keys[2] = PEM_read_RSAPrivateKey(fp, NULL, NULL, NULL);
443 fclose(fp);
444 if (keys[2] == NULL) {
445 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
446 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
447 dm_printf(1, "%s\n", "auth failure: bad host key format");
448 goto done;
449 }
450 free(path);
451 path = NULL;
452
453 /*
454 * public key encrypt/decrypt block size.
455 */
456 if (keys[0]) {
457 blksize = (size_t)RSA_size(keys[0]);
458 if (blksize != (size_t)RSA_size(keys[1]) ||
459 blksize != (size_t)RSA_size(keys[2]) ||
460 sizeof(handtx) % blksize != 0) {
461 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYFMT;
462 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
463 dm_printf(1, "%s\n",
464 "auth failure: key size mismatch");
465 goto done;
466 }
467 } else {
468 blksize = sizeof(handtx);
469 }
470 blkmask = blksize - 1;
471
472 bzero(&handrx, sizeof(handrx));
473 bzero(&handtx, sizeof(handtx));
474
475 /*
476 * Fill all unused fields (particular all junk fields) with random
477 * data, and also set the session key.
478 */
479 fd = open("/dev/urandom", O_RDONLY);
480 if (fd < 0 ||
481 fstat(fd, &st) < 0 || /* something wrong */
482 S_ISREG(st.st_mode) || /* supposed to be a RNG dev! */
483 read(fd, &handtx, sizeof(handtx)) != sizeof(handtx)) {
484 urandfail:
485 if (fd >= 0)
486 close(fd);
487 iocom->ioq_rx.error = DMSG_IOQ_ERROR_BADURANDOM;
488 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
489 dm_printf(1, "%s\n", "auth failure: bad rng");
490 goto done;
491 }
492 if (bcmp(&handrx, &handtx, sizeof(handtx)) == 0)
493 goto urandfail; /* read all zeros */
494 close(fd);
495 /* ERR_load_crypto_strings(); openssl debugging */
496
497 /*
498 * Handshake with the remote.
499 *
500 * Encrypt with my private and remote's public
501 * Decrypt with my private and remote's public
502 *
503 * When encrypting we have to make sure our buffer fits within the
504 * modulus, which typically requires bit 7 o the first byte to be
505 * zero. To be safe make sure that bit 7 and bit 6 is zero.
506 */
507 snprintf(handtx.quickmsg, sizeof(handtx.quickmsg), "Testing 1 2 3");
508 handtx.magic = DMSG_HDR_MAGIC;
509 handtx.version = 1;
510 handtx.flags = 0;
511 assert(sizeof(handtx.verf) * 4 == sizeof(handtx.sess));
512 bzero(handtx.verf, sizeof(handtx.verf));
513
514 handtx.pad1[0] &= 0x3f; /* message must fit within modulus */
515 handtx.pad2[0] &= 0x3f; /* message must fit within modulus */
516
517 for (i = 0; i < sizeof(handtx.sess); ++i)
518 handtx.verf[i / 4] ^= handtx.sess[i];
519
520 /*
521 * Write handshake buffer to remote
522 */
523 for (i = 0; i < sizeof(handtx); i += blksize) {
524 ptr = (char *)&handtx + i;
525 if (keys[0]) {
526 /*
527 * Since we are double-encrypting we have to make
528 * sure that the result of the first stage does
529 * not blow out the modulus for the second stage.
530 *
531 * The pointer is pointing to the pad*[] area so
532 * we can mess with that until the first stage
533 * is legal.
534 */
535 do {
536 ++*(int *)(ptr + 4);
537 if (RSA_private_encrypt(blksize,
538 (unsigned char*)ptr,
539 (unsigned char*)buf1,
540 keys[2], RSA_NO_PADDING) < 0) {
541 iocom->ioq_rx.error =
542 DMSG_IOQ_ERROR_KEYXCHGFAIL;
543 }
544 } while (buf1[0] & 0xC0);
545
546 if (RSA_public_encrypt(blksize,
547 (unsigned char*)buf1,
548 (unsigned char*)buf2,
549 keys[0], RSA_NO_PADDING) < 0) {
550 iocom->ioq_rx.error =
551 DMSG_IOQ_ERROR_KEYXCHGFAIL;
552 }
553 }
554 if (write(iocom->sock_fd, buf2, blksize) != (ssize_t)blksize) {
555 dmio_printf(iocom, 1, "%s\n", "WRITE ERROR");
556 }
557 }
558 if (iocom->ioq_rx.error) {
559 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
560 dmio_printf(iocom, 1, "%s\n",
561 "auth failure: key exchange failure "
562 "during encryption");
563 goto done;
564 }
565
566 /*
567 * Read handshake buffer from remote
568 */
569 i = 0;
570 while (i < sizeof(handrx)) {
571 ptr = (char *)&handrx + i;
572 n = read(iocom->sock_fd, ptr, blksize - (i & blkmask));
573 if (n <= 0)
574 break;
575 ptr -= (i & blkmask);
576 i += n;
577 if (keys[0] && (i & blkmask) == 0) {
578 if (RSA_private_decrypt(blksize,
579 (unsigned char*)ptr,
580 (unsigned char*)buf1,
581 keys[2], RSA_NO_PADDING) < 0)
582 iocom->ioq_rx.error =
583 DMSG_IOQ_ERROR_KEYXCHGFAIL;
584 if (RSA_public_decrypt(blksize,
585 (unsigned char*)buf1,
586 (unsigned char*)ptr,
587 keys[0], RSA_NO_PADDING) < 0)
588 iocom->ioq_rx.error =
589 DMSG_IOQ_ERROR_KEYXCHGFAIL;
590 }
591 }
592 if (iocom->ioq_rx.error) {
593 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
594 dmio_printf(iocom, 1, "%s\n",
595 "auth failure: key exchange failure "
596 "during decryption");
597 goto done;
598 }
599
600 /*
601 * Validate the received data. Try to make this a constant-time
602 * algorithm.
603 */
604 if (i != sizeof(handrx)) {
605 keyxchgfail:
606 iocom->ioq_rx.error = DMSG_IOQ_ERROR_KEYXCHGFAIL;
607 atomic_set_int(&iocom->flags, DMSG_IOCOMF_EOF);
608 dmio_printf(iocom, 1, "%s\n",
609 "auth failure: key exchange failure");
610 goto done;
611 }
612
613 if (handrx.magic == DMSG_HDR_MAGIC_REV) {
614 handrx.version = bswap16(handrx.version);
615 handrx.flags = bswap32(handrx.flags);
616 }
617 for (i = 0; i < sizeof(handrx.sess); ++i)
618 handrx.verf[i / 4] ^= handrx.sess[i];
619 n = 0;
620 for (i = 0; i < sizeof(handrx.verf); ++i)
621 n += handrx.verf[i];
622 if (handrx.version != 1)
623 ++n;
624 if (n != 0)
625 goto keyxchgfail;
626
627 /*
628 * Use separate session keys and session fixed IVs for receive and
629 * transmit.
630 */
631 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_rx,
632 (char*)handrx.sess,
633 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
634 (char*)handrx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
635 sizeof(handrx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
636 0 /* decryption */);
637 if (error)
638 goto keyxchgfail;
639
640 error = crypto_algos[DMSG_CRYPTO_ALGO].init(&iocom->ioq_tx,
641 (char*)handtx.sess,
642 crypto_algos[DMSG_CRYPTO_ALGO].keylen,
643 (char*)handtx.sess + crypto_algos[DMSG_CRYPTO_ALGO].keylen,
644 sizeof(handtx.sess) - crypto_algos[DMSG_CRYPTO_ALGO].keylen,
645 1 /* encryption */);
646 if (error)
647 goto keyxchgfail;
648
649 atomic_set_int(&iocom->flags, DMSG_IOCOMF_CRYPTED);
650
651 dmio_printf(iocom, 1, "auth success: %s\n", handrx.quickmsg);
652 done:
653 if (path)
654 free(path);
655 if (keys[0])
656 RSA_free(keys[0]);
657 if (keys[1])
658 RSA_free(keys[1]);
659 if (keys[2])
660 RSA_free(keys[2]);
661 }
662
663 void
dmsg_crypto_terminate(dmsg_iocom_t * iocom)664 dmsg_crypto_terminate(dmsg_iocom_t *iocom)
665 {
666 crypto_algos[DMSG_CRYPTO_ALGO].uninit(&iocom->ioq_rx);
667 crypto_algos[DMSG_CRYPTO_ALGO].uninit(&iocom->ioq_tx);
668 }
669
670 /*
671 * Decrypt pending data in the ioq's fifo. The data is decrypted in-place.
672 */
673 void
dmsg_crypto_decrypt(dmsg_iocom_t * iocom __unused,dmsg_ioq_t * ioq)674 dmsg_crypto_decrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq)
675 {
676 int p_len;
677 int used;
678 __unused int error; /* XXX */
679 char buf[512];
680
681 /*
682 * fifo_beg to fifo_cdx is data already decrypted.
683 * fifo_cdn to fifo_end is data not yet decrypted.
684 */
685 p_len = ioq->fifo_end - ioq->fifo_cdn; /* data not yet decrypted */
686
687 if (p_len == 0)
688 return;
689
690 while (p_len >= DMSG_CRYPTO_CHUNK_SIZE) {
691 bcopy(ioq->buf + ioq->fifo_cdn, buf, DMSG_CRYPTO_CHUNK_SIZE);
692 error = crypto_algos[DMSG_CRYPTO_ALGO].dec_chunk(
693 ioq, buf,
694 ioq->buf + ioq->fifo_cdx,
695 DMSG_CRYPTO_CHUNK_SIZE,
696 &used);
697 #ifdef CRYPTO_DEBUG
698 dmio_printf(iocom, 5,
699 "dec: p_len: %d, used: %d, "
700 "fifo_cdn: %ju, fifo_cdx: %ju\n",
701 p_len, used,
702 ioq->fifo_cdn, ioq->fifo_cdx);
703 #endif
704 p_len -= used;
705 ioq->fifo_cdn += used;
706 ioq->fifo_cdx += DMSG_CRYPTO_CHUNK_SIZE;
707 #ifdef CRYPTO_DEBUG
708 dmio_printf(iocom, 5,
709 "dec: p_len: %d, used: %d, "
710 "fifo_cdn: %ju, fifo_cdx: %ju\n",
711 p_len, used, ioq->fifo_cdn, ioq->fifo_cdx);
712 #endif
713 }
714 }
715
716 /*
717 * *nactp is set to the number of ORIGINAL bytes consumed by the encrypter.
718 * The FIFO may contain more data.
719 */
720 int
dmsg_crypto_encrypt(dmsg_iocom_t * iocom __unused,dmsg_ioq_t * ioq,struct iovec * iov,int n,size_t * nactp)721 dmsg_crypto_encrypt(dmsg_iocom_t *iocom __unused, dmsg_ioq_t *ioq,
722 struct iovec *iov, int n, size_t *nactp)
723 {
724 int p_len, used, ct_used;
725 int i;
726 __unused int error; /* XXX */
727 size_t nmax;
728
729 nmax = sizeof(ioq->buf) - ioq->fifo_end; /* max new bytes */
730
731 *nactp = 0;
732 for (i = 0; i < n && nmax; ++i) {
733 used = 0;
734 p_len = iov[i].iov_len;
735 assert((p_len & DMSG_ALIGNMASK) == 0);
736
737 while (p_len >= DMSG_CRYPTO_CHUNK_SIZE &&
738 nmax >= DMSG_CRYPTO_CHUNK_SIZE)
739 {
740 error = crypto_algos[DMSG_CRYPTO_ALGO].enc_chunk(
741 ioq,
742 ioq->buf + ioq->fifo_cdx,
743 (char *)iov[i].iov_base + used,
744 DMSG_CRYPTO_CHUNK_SIZE, &ct_used);
745 #ifdef CRYPTO_DEBUG
746 dmio_printf(iocom, 5,
747 "nactp: %ju, p_len: %d, "
748 "ct_used: %d, used: %d, nmax: %ju\n",
749 *nactp, p_len, ct_used, used, nmax);
750 #endif
751
752 *nactp += (size_t)DMSG_CRYPTO_CHUNK_SIZE; /* plaintext count */
753 used += DMSG_CRYPTO_CHUNK_SIZE;
754 p_len -= DMSG_CRYPTO_CHUNK_SIZE;
755
756 /*
757 * NOTE: crypted count will eventually differ from
758 * nmax, but for now we have not yet introduced
759 * random armor.
760 */
761 ioq->fifo_cdx += (size_t)ct_used;
762 ioq->fifo_cdn += (size_t)ct_used;
763 ioq->fifo_end += (size_t)ct_used;
764 nmax -= (size_t)ct_used;
765 #ifdef CRYPTO_DEBUG
766 dmio_printf(iocom, 5,
767 "nactp: %ju, p_len: %d, "
768 "ct_used: %d, used: %d, nmax: %ju\n",
769 *nactp, p_len, ct_used, used, nmax);
770 #endif
771 }
772 }
773 iov[0].iov_base = ioq->buf + ioq->fifo_beg;
774 iov[0].iov_len = ioq->fifo_cdx - ioq->fifo_beg;
775
776 return (1);
777 }
778