1 /* $NetBSD: cfx.c,v 1.3 2023/06/19 21:41:43 christos Exp $ */
2
3 /*
4 * Copyright (c) 2003, PADL Software Pty Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * 3. Neither the name of PADL Software nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34
35 #include "gsskrb5_locl.h"
36
37 /*
38 * Implementation of RFC 4121
39 */
40
41 #define CFXSentByAcceptor (1 << 0)
42 #define CFXSealed (1 << 1)
43 #define CFXAcceptorSubkey (1 << 2)
44
45 krb5_error_code
_gsskrb5cfx_wrap_length_cfx(krb5_context context,krb5_crypto crypto,int conf_req_flag,int dce_style,size_t input_length,size_t * output_length,size_t * cksumsize,uint16_t * padlength)46 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
47 krb5_crypto crypto,
48 int conf_req_flag,
49 int dce_style,
50 size_t input_length,
51 size_t *output_length,
52 size_t *cksumsize,
53 uint16_t *padlength)
54 {
55 krb5_error_code ret;
56 krb5_cksumtype type;
57
58 /* 16-byte header is always first */
59 *output_length = sizeof(gss_cfx_wrap_token_desc);
60 *padlength = 0;
61
62 ret = krb5_crypto_get_checksum_type(context, crypto, &type);
63 if (ret)
64 return ret;
65
66 ret = krb5_checksumsize(context, type, cksumsize);
67 if (ret)
68 return ret;
69
70 if (conf_req_flag) {
71 size_t padsize;
72
73 /* Header is concatenated with data before encryption */
74 input_length += sizeof(gss_cfx_wrap_token_desc);
75
76 if (dce_style) {
77 ret = krb5_crypto_getblocksize(context, crypto, &padsize);
78 } else {
79 ret = krb5_crypto_getpadsize(context, crypto, &padsize);
80 }
81 if (ret) {
82 return ret;
83 }
84 if (padsize > 1) {
85 /* XXX check this */
86 *padlength = padsize - (input_length % padsize);
87
88 /* We add the pad ourselves (noted here for completeness only) */
89 input_length += *padlength;
90 }
91
92 *output_length += krb5_get_wrapped_length(context,
93 crypto, input_length);
94 } else {
95 /* Checksum is concatenated with data */
96 *output_length += input_length + *cksumsize;
97 }
98
99 assert(*output_length > input_length);
100
101 return 0;
102 }
103
104 OM_uint32
_gssapi_wrap_size_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,gss_qop_t qop_req,OM_uint32 req_output_size,OM_uint32 * max_input_size)105 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
106 const gsskrb5_ctx ctx,
107 krb5_context context,
108 int conf_req_flag,
109 gss_qop_t qop_req,
110 OM_uint32 req_output_size,
111 OM_uint32 *max_input_size)
112 {
113 krb5_error_code ret;
114
115 *max_input_size = 0;
116
117 /* 16-byte header is always first */
118 if (req_output_size < 16)
119 return 0;
120 req_output_size -= 16;
121
122 if (conf_req_flag) {
123 size_t wrapped_size, sz;
124
125 wrapped_size = req_output_size + 1;
126 do {
127 wrapped_size--;
128 sz = krb5_get_wrapped_length(context,
129 ctx->crypto, wrapped_size);
130 } while (wrapped_size && sz > req_output_size);
131 if (wrapped_size == 0)
132 return 0;
133
134 /* inner header */
135 if (wrapped_size < 16)
136 return 0;
137
138 wrapped_size -= 16;
139
140 *max_input_size = wrapped_size;
141 } else {
142 krb5_cksumtype type;
143 size_t cksumsize;
144
145 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
146 if (ret)
147 return ret;
148
149 ret = krb5_checksumsize(context, type, &cksumsize);
150 if (ret)
151 return ret;
152
153 if (req_output_size < cksumsize)
154 return 0;
155
156 /* Checksum is concatenated with data */
157 *max_input_size = req_output_size - cksumsize;
158 }
159
160 return 0;
161 }
162
163 /*
164 * Rotate "rrc" bytes to the front or back
165 */
166
167 static krb5_error_code
rrc_rotate(void * data,size_t len,uint16_t rrc,krb5_boolean unrotate)168 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
169 {
170 u_char *tmp, buf[256];
171 size_t left;
172
173 if (len == 0)
174 return 0;
175
176 rrc %= len;
177
178 if (rrc == 0)
179 return 0;
180
181 left = len - rrc;
182
183 if (rrc <= sizeof(buf)) {
184 tmp = buf;
185 } else {
186 tmp = malloc(rrc);
187 if (tmp == NULL)
188 return ENOMEM;
189 }
190
191 if (unrotate) {
192 memcpy(tmp, data, rrc);
193 memmove(data, (u_char *)data + rrc, left);
194 memcpy((u_char *)data + left, tmp, rrc);
195 } else {
196 memcpy(tmp, (u_char *)data + left, rrc);
197 memmove((u_char *)data + rrc, data, left);
198 memcpy(data, tmp, rrc);
199 }
200
201 if (rrc > sizeof(buf))
202 free(tmp);
203
204 return 0;
205 }
206
207 gss_iov_buffer_desc *
_gk_find_buffer(gss_iov_buffer_desc * iov,int iov_count,OM_uint32 type)208 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
209 {
210 int i;
211 gss_iov_buffer_t iovp = GSS_C_NO_IOV_BUFFER;
212
213 if (iov == GSS_C_NO_IOV_BUFFER)
214 return GSS_C_NO_IOV_BUFFER;
215
216 /*
217 * This function is used to find header, padding or trailer buffers
218 * which are singletons; return NULL if multiple instances are found.
219 */
220 for (i = 0; i < iov_count; i++) {
221 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type)) {
222 if (iovp == GSS_C_NO_IOV_BUFFER)
223 iovp = &iov[i];
224 else
225 return GSS_C_NO_IOV_BUFFER;
226 }
227 }
228
229 /*
230 * For compatibility with SSPI, an empty padding buffer is treated
231 * equivalent to an absent padding buffer (unless the caller is
232 * requesting that a padding buffer be allocated).
233 */
234 if (iovp &&
235 iovp->buffer.length == 0 &&
236 type == GSS_IOV_BUFFER_TYPE_PADDING &&
237 (GSS_IOV_BUFFER_FLAGS(iovp->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) == 0)
238 iovp = NULL;
239
240 return iovp;
241 }
242
243 OM_uint32
_gk_allocate_buffer(OM_uint32 * minor_status,gss_iov_buffer_desc * buffer,size_t size)244 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
245 {
246 if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
247 if (buffer->buffer.length == size)
248 return GSS_S_COMPLETE;
249 free(buffer->buffer.value);
250 }
251
252 buffer->buffer.value = malloc(size);
253 buffer->buffer.length = size;
254 if (buffer->buffer.value == NULL) {
255 *minor_status = ENOMEM;
256 return GSS_S_FAILURE;
257 }
258 buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
259
260 return GSS_S_COMPLETE;
261 }
262
263
264 OM_uint32
_gk_verify_buffers(OM_uint32 * minor_status,const gsskrb5_ctx ctx,const gss_iov_buffer_desc * header,const gss_iov_buffer_desc * padding,const gss_iov_buffer_desc * trailer,int block_cipher)265 _gk_verify_buffers(OM_uint32 *minor_status,
266 const gsskrb5_ctx ctx,
267 const gss_iov_buffer_desc *header,
268 const gss_iov_buffer_desc *padding,
269 const gss_iov_buffer_desc *trailer,
270 int block_cipher)
271 {
272 if (header == NULL) {
273 *minor_status = EINVAL;
274 return GSS_S_FAILURE;
275 }
276
277 if (IS_DCE_STYLE(ctx)) {
278 /*
279 * In DCE style mode we reject having a padding or trailer buffer
280 */
281 if (padding) {
282 *minor_status = EINVAL;
283 return GSS_S_FAILURE;
284 }
285 if (trailer) {
286 *minor_status = EINVAL;
287 return GSS_S_FAILURE;
288 }
289 } else {
290 /*
291 * In non-DCE style mode we require having a padding buffer for
292 * encryption types that do not behave as stream ciphers. This
293 * check is superfluous for now, as only RC4 and RFC4121 enctypes
294 * are presently implemented for the IOV APIs; be defensive.
295 */
296 if (block_cipher && padding == NULL) {
297 *minor_status = EINVAL;
298 return GSS_S_FAILURE;
299 }
300 }
301
302 *minor_status = 0;
303 return GSS_S_COMPLETE;
304 }
305
306 OM_uint32
_gssapi_wrap_cfx_iov(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)307 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
308 gsskrb5_ctx ctx,
309 krb5_context context,
310 int conf_req_flag,
311 int *conf_state,
312 gss_iov_buffer_desc *iov,
313 int iov_count)
314 {
315 OM_uint32 major_status, junk;
316 gss_iov_buffer_desc *header, *trailer, *padding;
317 size_t gsshsize, k5hsize;
318 size_t gsstsize, k5tsize;
319 size_t rrc = 0, ec = 0;
320 int i;
321 gss_cfx_wrap_token token;
322 krb5_error_code ret;
323 int32_t seq_number;
324 unsigned usage;
325 krb5_crypto_iov *data = NULL;
326
327 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
328 if (header == NULL) {
329 *minor_status = EINVAL;
330 return GSS_S_FAILURE;
331 }
332
333 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
334 if (padding != NULL) {
335 padding->buffer.length = 0;
336 }
337
338 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
339
340 major_status = _gk_verify_buffers(minor_status, ctx, header,
341 padding, trailer, FALSE);
342 if (major_status != GSS_S_COMPLETE) {
343 return major_status;
344 }
345
346 if (conf_req_flag) {
347 size_t k5psize = 0;
348 size_t k5pbase = 0;
349 size_t k5bsize = 0;
350 size_t size = 0;
351
352 for (i = 0; i < iov_count; i++) {
353 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
354 case GSS_IOV_BUFFER_TYPE_DATA:
355 size += iov[i].buffer.length;
356 break;
357 default:
358 break;
359 }
360 }
361
362 size += sizeof(gss_cfx_wrap_token_desc);
363
364 *minor_status = krb5_crypto_length(context, ctx->crypto,
365 KRB5_CRYPTO_TYPE_HEADER,
366 &k5hsize);
367 if (*minor_status)
368 return GSS_S_FAILURE;
369
370 *minor_status = krb5_crypto_length(context, ctx->crypto,
371 KRB5_CRYPTO_TYPE_TRAILER,
372 &k5tsize);
373 if (*minor_status)
374 return GSS_S_FAILURE;
375
376 *minor_status = krb5_crypto_length(context, ctx->crypto,
377 KRB5_CRYPTO_TYPE_PADDING,
378 &k5pbase);
379 if (*minor_status)
380 return GSS_S_FAILURE;
381
382 if (k5pbase > 1) {
383 k5psize = k5pbase - (size % k5pbase);
384 } else {
385 k5psize = 0;
386 }
387
388 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
389 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
390 &k5bsize);
391 if (*minor_status)
392 return GSS_S_FAILURE;
393 ec = k5bsize;
394 } else {
395 ec = k5psize;
396 }
397
398 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
399 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
400 } else {
401 if (IS_DCE_STYLE(ctx)) {
402 *minor_status = EINVAL;
403 return GSS_S_FAILURE;
404 }
405
406 k5hsize = 0;
407 *minor_status = krb5_crypto_length(context, ctx->crypto,
408 KRB5_CRYPTO_TYPE_CHECKSUM,
409 &k5tsize);
410 if (*minor_status)
411 return GSS_S_FAILURE;
412
413 gsshsize = sizeof(gss_cfx_wrap_token_desc);
414 gsstsize = k5tsize;
415 }
416
417 /*
418 *
419 */
420
421 if (trailer == NULL) {
422 rrc = gsstsize;
423 if (IS_DCE_STYLE(ctx))
424 rrc -= ec;
425 gsshsize += gsstsize;
426 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
427 major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
428 if (major_status)
429 goto failure;
430 } else if (trailer->buffer.length < gsstsize) {
431 *minor_status = KRB5_BAD_MSIZE;
432 major_status = GSS_S_FAILURE;
433 goto failure;
434 } else
435 trailer->buffer.length = gsstsize;
436
437 /*
438 *
439 */
440
441 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
442 major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
443 if (major_status != GSS_S_COMPLETE)
444 goto failure;
445 } else if (header->buffer.length < gsshsize) {
446 *minor_status = KRB5_BAD_MSIZE;
447 major_status = GSS_S_FAILURE;
448 goto failure;
449 } else
450 header->buffer.length = gsshsize;
451
452 token = (gss_cfx_wrap_token)header->buffer.value;
453
454 token->TOK_ID[0] = 0x05;
455 token->TOK_ID[1] = 0x04;
456 token->Flags = 0;
457 token->Filler = 0xFF;
458
459 if ((ctx->more_flags & LOCAL) == 0)
460 token->Flags |= CFXSentByAcceptor;
461
462 if (ctx->more_flags & ACCEPTOR_SUBKEY)
463 token->Flags |= CFXAcceptorSubkey;
464
465 if (ctx->more_flags & LOCAL)
466 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
467 else
468 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
469
470 if (conf_req_flag) {
471 /*
472 * In Wrap tokens with confidentiality, the EC field is
473 * used to encode the size (in bytes) of the random filler.
474 */
475 token->Flags |= CFXSealed;
476 token->EC[0] = (ec >> 8) & 0xFF;
477 token->EC[1] = (ec >> 0) & 0xFF;
478
479 } else {
480 /*
481 * In Wrap tokens without confidentiality, the EC field is
482 * used to encode the size (in bytes) of the trailing
483 * checksum.
484 *
485 * This is not used in the checksum calcuation itself,
486 * because the checksum length could potentially vary
487 * depending on the data length.
488 */
489 token->EC[0] = 0;
490 token->EC[1] = 0;
491 }
492
493 /*
494 * In Wrap tokens that provide for confidentiality, the RRC
495 * field in the header contains the hex value 00 00 before
496 * encryption.
497 *
498 * In Wrap tokens that do not provide for confidentiality,
499 * both the EC and RRC fields in the appended checksum
500 * contain the hex value 00 00 for the purpose of calculating
501 * the checksum.
502 */
503 token->RRC[0] = 0;
504 token->RRC[1] = 0;
505
506 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
507 krb5_auth_con_getlocalseqnumber(context,
508 ctx->auth_context,
509 &seq_number);
510 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
511 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
512 krb5_auth_con_setlocalseqnumber(context,
513 ctx->auth_context,
514 ++seq_number);
515 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
516
517 data = calloc(iov_count + 3, sizeof(data[0]));
518 if (data == NULL) {
519 *minor_status = ENOMEM;
520 major_status = GSS_S_FAILURE;
521 goto failure;
522 }
523
524 if (conf_req_flag) {
525 /*
526 plain packet:
527
528 {"header" | encrypt(plaintext-data | ec-padding | E"header")}
529
530 Expanded, this is with with RRC = 0:
531
532 {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
533
534 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
535
536 {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data }
537 */
538
539 i = 0;
540 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
541 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
542 data[i].data.length = k5hsize;
543
544 for (i = 1; i < iov_count + 1; i++) {
545 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
546 case GSS_IOV_BUFFER_TYPE_DATA:
547 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
548 break;
549 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
550 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
551 break;
552 default:
553 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
554 break;
555 }
556 data[i].data.length = iov[i - 1].buffer.length;
557 data[i].data.data = iov[i - 1].buffer.value;
558 }
559
560 /*
561 * Any necessary padding is added here to ensure that the
562 * encrypted token header is always at the end of the
563 * ciphertext.
564 */
565
566 /* encrypted CFX header in trailer (or after the header if in
567 DCE mode). Copy in header into E"header"
568 */
569 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
570 if (trailer)
571 data[i].data.data = trailer->buffer.value;
572 else
573 data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
574
575 data[i].data.length = ec + sizeof(*token);
576 memset(data[i].data.data, 0xFF, ec);
577 memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
578 i++;
579
580 /* Kerberos trailer comes after the gss trailer */
581 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
582 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
583 data[i].data.length = k5tsize;
584 i++;
585
586 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
587 if (ret != 0) {
588 *minor_status = ret;
589 major_status = GSS_S_FAILURE;
590 goto failure;
591 }
592
593 if (rrc) {
594 token->RRC[0] = (rrc >> 8) & 0xFF;
595 token->RRC[1] = (rrc >> 0) & 0xFF;
596 }
597
598 } else {
599 /*
600 plain packet:
601
602 {data | "header" | gss-trailer (krb5 checksum)
603
604 don't do RRC != 0
605
606 */
607
608 for (i = 0; i < iov_count; i++) {
609 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
610 case GSS_IOV_BUFFER_TYPE_DATA:
611 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
612 break;
613 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
614 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
615 break;
616 default:
617 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
618 break;
619 }
620 data[i].data.length = iov[i].buffer.length;
621 data[i].data.data = iov[i].buffer.value;
622 }
623
624 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
625 data[i].data.data = header->buffer.value;
626 data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
627 i++;
628
629 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
630 if (trailer) {
631 data[i].data.data = trailer->buffer.value;
632 } else {
633 data[i].data.data = (uint8_t *)header->buffer.value +
634 sizeof(gss_cfx_wrap_token_desc);
635 }
636 data[i].data.length = k5tsize;
637 i++;
638
639 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
640 if (ret) {
641 *minor_status = ret;
642 major_status = GSS_S_FAILURE;
643 goto failure;
644 }
645
646 if (rrc) {
647 token->RRC[0] = (rrc >> 8) & 0xFF;
648 token->RRC[1] = (rrc >> 0) & 0xFF;
649 }
650
651 token->EC[0] = (k5tsize >> 8) & 0xFF;
652 token->EC[1] = (k5tsize >> 0) & 0xFF;
653 }
654
655 if (conf_state != NULL)
656 *conf_state = conf_req_flag;
657
658 free(data);
659
660 *minor_status = 0;
661 return GSS_S_COMPLETE;
662
663 failure:
664 if (data)
665 free(data);
666
667 gss_release_iov_buffer(&junk, iov, iov_count);
668
669 return major_status;
670 }
671
672 /* This is slowpath */
673 static OM_uint32
unrotate_iov(OM_uint32 * minor_status,size_t rrc,gss_iov_buffer_desc * iov,int iov_count)674 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
675 {
676 uint8_t *p, *q;
677 size_t len = 0, skip;
678 int i;
679
680 for (i = 0; i < iov_count; i++)
681 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
682 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
683 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
684 len += iov[i].buffer.length;
685
686 p = malloc(len);
687 if (p == NULL) {
688 *minor_status = ENOMEM;
689 return GSS_S_FAILURE;
690 }
691 q = p;
692
693 /* copy up */
694
695 for (i = 0; i < iov_count; i++) {
696 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
697 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
698 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
699 {
700 memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
701 q += iov[i].buffer.length;
702 }
703 }
704 assert((size_t)(q - p) == len);
705
706 /* unrotate first part */
707 q = p + rrc;
708 skip = rrc;
709 for (i = 0; i < iov_count; i++) {
710 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
711 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
712 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
713 {
714 if (iov[i].buffer.length <= skip) {
715 skip -= iov[i].buffer.length;
716 } else {
717 /* copy back to original buffer */
718 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
719 q += iov[i].buffer.length - skip;
720 skip = 0;
721 }
722 }
723 }
724 /* copy trailer */
725 q = p;
726 skip = rrc;
727 for (i = 0; i < iov_count; i++) {
728 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
729 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
730 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
731 {
732 memcpy(iov[i].buffer.value, q, min(iov[i].buffer.length, skip));
733 if (iov[i].buffer.length > skip)
734 break;
735 skip -= iov[i].buffer.length;
736 q += iov[i].buffer.length;
737 }
738 }
739 free(p);
740 return GSS_S_COMPLETE;
741 }
742
743
744 OM_uint32
_gssapi_unwrap_cfx_iov(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int * conf_state,gss_qop_t * qop_state,gss_iov_buffer_desc * iov,int iov_count)745 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
746 gsskrb5_ctx ctx,
747 krb5_context context,
748 int *conf_state,
749 gss_qop_t *qop_state,
750 gss_iov_buffer_desc *iov,
751 int iov_count)
752 {
753 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
754 gss_iov_buffer_desc *header, *trailer, *padding;
755 gss_cfx_wrap_token token, ttoken;
756 u_char token_flags;
757 krb5_error_code ret;
758 unsigned usage;
759 uint16_t ec, rrc;
760 krb5_crypto_iov *data = NULL;
761 int i, j;
762
763 *minor_status = 0;
764
765 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
766 if (header == NULL) {
767 *minor_status = EINVAL;
768 return GSS_S_FAILURE;
769 }
770
771 if (header->buffer.length < sizeof(*token)) /* we check exact below */
772 return GSS_S_DEFECTIVE_TOKEN;
773
774 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
775 if (padding != NULL && padding->buffer.length != 0) {
776 *minor_status = EINVAL;
777 return GSS_S_FAILURE;
778 }
779
780 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
781
782 major_status = _gk_verify_buffers(minor_status, ctx, header,
783 padding, trailer, FALSE);
784 if (major_status != GSS_S_COMPLETE) {
785 return major_status;
786 }
787
788 token = (gss_cfx_wrap_token)header->buffer.value;
789
790 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
791 return GSS_S_DEFECTIVE_TOKEN;
792
793 /* Ignore unknown flags */
794 token_flags = token->Flags &
795 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
796
797 if (token_flags & CFXSentByAcceptor) {
798 if ((ctx->more_flags & LOCAL) == 0)
799 return GSS_S_DEFECTIVE_TOKEN;
800 }
801
802 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
803 if ((token_flags & CFXAcceptorSubkey) == 0)
804 return GSS_S_DEFECTIVE_TOKEN;
805 } else {
806 if (token_flags & CFXAcceptorSubkey)
807 return GSS_S_DEFECTIVE_TOKEN;
808 }
809
810 if (token->Filler != 0xFF)
811 return GSS_S_DEFECTIVE_TOKEN;
812
813 if (conf_state != NULL)
814 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
815
816 ec = (token->EC[0] << 8) | token->EC[1];
817 rrc = (token->RRC[0] << 8) | token->RRC[1];
818
819 /*
820 * Check sequence number
821 */
822 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
823 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
824 if (seq_number_hi) {
825 /* no support for 64-bit sequence numbers */
826 *minor_status = ERANGE;
827 return GSS_S_UNSEQ_TOKEN;
828 }
829
830 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
831 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
832 if (ret != 0) {
833 *minor_status = 0;
834 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
835 return ret;
836 }
837 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
838
839 /*
840 * Decrypt and/or verify checksum
841 */
842
843 if (ctx->more_flags & LOCAL) {
844 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
845 } else {
846 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
847 }
848
849 data = calloc(iov_count + 3, sizeof(data[0]));
850 if (data == NULL) {
851 *minor_status = ENOMEM;
852 major_status = GSS_S_FAILURE;
853 goto failure;
854 }
855
856 if (token_flags & CFXSealed) {
857 size_t k5tsize, k5hsize;
858
859 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
860 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
861
862 /* Rotate by RRC; bogus to do this in-place XXX */
863 /* Check RRC */
864
865 if (trailer == NULL) {
866 size_t gsstsize = k5tsize + sizeof(*token);
867 size_t gsshsize = k5hsize + sizeof(*token);
868
869 if (rrc != gsstsize) {
870 major_status = GSS_S_DEFECTIVE_TOKEN;
871 goto failure;
872 }
873
874 if (IS_DCE_STYLE(ctx))
875 gsstsize += ec;
876
877 gsshsize += gsstsize;
878
879 if (header->buffer.length != gsshsize) {
880 major_status = GSS_S_DEFECTIVE_TOKEN;
881 goto failure;
882 }
883 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
884 major_status = GSS_S_DEFECTIVE_TOKEN;
885 goto failure;
886 } else if (header->buffer.length != sizeof(*token) + k5hsize) {
887 major_status = GSS_S_DEFECTIVE_TOKEN;
888 goto failure;
889 } else if (rrc != 0) {
890 /* go though slowpath */
891 major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
892 if (major_status)
893 goto failure;
894 }
895
896 i = 0;
897 data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
898 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
899 data[i].data.length = k5hsize;
900 i++;
901
902 for (j = 0; j < iov_count; i++, j++) {
903 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
904 case GSS_IOV_BUFFER_TYPE_DATA:
905 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
906 break;
907 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
908 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
909 break;
910 default:
911 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
912 break;
913 }
914 data[i].data.length = iov[j].buffer.length;
915 data[i].data.data = iov[j].buffer.value;
916 }
917
918 /* encrypted CFX header in trailer (or after the header if in
919 DCE mode). Copy in header into E"header"
920 */
921 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
922 if (trailer) {
923 data[i].data.data = trailer->buffer.value;
924 } else {
925 data[i].data.data = ((uint8_t *)header->buffer.value) +
926 header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
927 }
928
929 data[i].data.length = ec + sizeof(*token);
930 ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
931 i++;
932
933 /* Kerberos trailer comes after the gss trailer */
934 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
935 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
936 data[i].data.length = k5tsize;
937 i++;
938
939 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
940 if (ret != 0) {
941 *minor_status = ret;
942 major_status = GSS_S_FAILURE;
943 goto failure;
944 }
945
946 ttoken->RRC[0] = token->RRC[0];
947 ttoken->RRC[1] = token->RRC[1];
948
949 /* Check the integrity of the header */
950 if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
951 major_status = GSS_S_BAD_MIC;
952 goto failure;
953 }
954 } else {
955 size_t gsstsize = ec;
956 size_t gsshsize = sizeof(*token);
957
958 if (trailer == NULL) {
959 /* Check RRC */
960 if (rrc != gsstsize) {
961 *minor_status = EINVAL;
962 major_status = GSS_S_FAILURE;
963 goto failure;
964 }
965
966 gsshsize += gsstsize;
967 } else if (trailer->buffer.length != gsstsize) {
968 major_status = GSS_S_DEFECTIVE_TOKEN;
969 goto failure;
970 } else if (rrc != 0) {
971 /* Check RRC */
972 *minor_status = EINVAL;
973 major_status = GSS_S_FAILURE;
974 goto failure;
975 }
976
977 if (header->buffer.length != gsshsize) {
978 major_status = GSS_S_DEFECTIVE_TOKEN;
979 goto failure;
980 }
981
982 for (i = 0; i < iov_count; i++) {
983 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
984 case GSS_IOV_BUFFER_TYPE_DATA:
985 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
986 break;
987 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
988 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
989 break;
990 default:
991 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
992 break;
993 }
994 data[i].data.length = iov[i].buffer.length;
995 data[i].data.data = iov[i].buffer.value;
996 }
997
998 data[i].flags = KRB5_CRYPTO_TYPE_DATA;
999 data[i].data.data = header->buffer.value;
1000 data[i].data.length = sizeof(*token);
1001 i++;
1002
1003 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
1004 if (trailer) {
1005 data[i].data.data = trailer->buffer.value;
1006 } else {
1007 data[i].data.data = (uint8_t *)header->buffer.value +
1008 sizeof(*token);
1009 }
1010 data[i].data.length = ec;
1011 i++;
1012
1013 token = (gss_cfx_wrap_token)header->buffer.value;
1014 token->EC[0] = 0;
1015 token->EC[1] = 0;
1016 token->RRC[0] = 0;
1017 token->RRC[1] = 0;
1018
1019 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
1020 if (ret) {
1021 *minor_status = ret;
1022 major_status = GSS_S_FAILURE;
1023 goto failure;
1024 }
1025 }
1026
1027 if (qop_state != NULL) {
1028 *qop_state = GSS_C_QOP_DEFAULT;
1029 }
1030
1031 free(data);
1032
1033 *minor_status = 0;
1034 return GSS_S_COMPLETE;
1035
1036 failure:
1037 if (data)
1038 free(data);
1039
1040 gss_release_iov_buffer(&junk, iov, iov_count);
1041
1042 return major_status;
1043 }
1044
1045 OM_uint32
_gssapi_wrap_iov_length_cfx(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,gss_qop_t qop_req,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)1046 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1047 gsskrb5_ctx ctx,
1048 krb5_context context,
1049 int conf_req_flag,
1050 gss_qop_t qop_req,
1051 int *conf_state,
1052 gss_iov_buffer_desc *iov,
1053 int iov_count)
1054 {
1055 OM_uint32 major_status;
1056 size_t size;
1057 int i;
1058 gss_iov_buffer_desc *header = NULL;
1059 gss_iov_buffer_desc *padding = NULL;
1060 gss_iov_buffer_desc *trailer = NULL;
1061 size_t gsshsize = 0;
1062 size_t gsstsize = 0;
1063 size_t k5hsize = 0;
1064 size_t k5tsize = 0;
1065
1066 GSSAPI_KRB5_INIT (&context);
1067 *minor_status = 0;
1068
1069 for (size = 0, i = 0; i < iov_count; i++) {
1070 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1071 case GSS_IOV_BUFFER_TYPE_EMPTY:
1072 break;
1073 case GSS_IOV_BUFFER_TYPE_DATA:
1074 size += iov[i].buffer.length;
1075 break;
1076 case GSS_IOV_BUFFER_TYPE_HEADER:
1077 if (header != NULL) {
1078 *minor_status = 0;
1079 return GSS_S_FAILURE;
1080 }
1081 header = &iov[i];
1082 break;
1083 case GSS_IOV_BUFFER_TYPE_TRAILER:
1084 if (trailer != NULL) {
1085 *minor_status = 0;
1086 return GSS_S_FAILURE;
1087 }
1088 trailer = &iov[i];
1089 break;
1090 case GSS_IOV_BUFFER_TYPE_PADDING:
1091 if (padding != NULL) {
1092 *minor_status = 0;
1093 return GSS_S_FAILURE;
1094 }
1095 padding = &iov[i];
1096 break;
1097 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1098 break;
1099 default:
1100 *minor_status = EINVAL;
1101 return GSS_S_FAILURE;
1102 }
1103 }
1104
1105 major_status = _gk_verify_buffers(minor_status, ctx, header,
1106 padding, trailer, FALSE);
1107 if (major_status != GSS_S_COMPLETE) {
1108 return major_status;
1109 }
1110
1111 if (conf_req_flag) {
1112 size_t k5psize = 0;
1113 size_t k5pbase = 0;
1114 size_t k5bsize = 0;
1115 size_t ec = 0;
1116
1117 size += sizeof(gss_cfx_wrap_token_desc);
1118
1119 *minor_status = krb5_crypto_length(context, ctx->crypto,
1120 KRB5_CRYPTO_TYPE_HEADER,
1121 &k5hsize);
1122 if (*minor_status)
1123 return GSS_S_FAILURE;
1124
1125 *minor_status = krb5_crypto_length(context, ctx->crypto,
1126 KRB5_CRYPTO_TYPE_TRAILER,
1127 &k5tsize);
1128 if (*minor_status)
1129 return GSS_S_FAILURE;
1130
1131 *minor_status = krb5_crypto_length(context, ctx->crypto,
1132 KRB5_CRYPTO_TYPE_PADDING,
1133 &k5pbase);
1134 if (*minor_status)
1135 return GSS_S_FAILURE;
1136
1137 if (k5pbase > 1) {
1138 k5psize = k5pbase - (size % k5pbase);
1139 } else {
1140 k5psize = 0;
1141 }
1142
1143 if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1144 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1145 &k5bsize);
1146 if (*minor_status)
1147 return GSS_S_FAILURE;
1148
1149 ec = k5bsize;
1150 } else {
1151 ec = k5psize;
1152 }
1153
1154 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1155 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1156 } else {
1157 *minor_status = krb5_crypto_length(context, ctx->crypto,
1158 KRB5_CRYPTO_TYPE_CHECKSUM,
1159 &k5tsize);
1160 if (*minor_status)
1161 return GSS_S_FAILURE;
1162
1163 gsshsize = sizeof(gss_cfx_wrap_token_desc);
1164 gsstsize = k5tsize;
1165 }
1166
1167 if (trailer != NULL) {
1168 trailer->buffer.length = gsstsize;
1169 } else {
1170 gsshsize += gsstsize;
1171 }
1172
1173 header->buffer.length = gsshsize;
1174
1175 if (padding) {
1176 /* padding is done via EC and is contained in the header or trailer */
1177 padding->buffer.length = 0;
1178 }
1179
1180 if (conf_state) {
1181 *conf_state = conf_req_flag;
1182 }
1183
1184 return GSS_S_COMPLETE;
1185 }
1186
1187
1188
1189
_gssapi_wrap_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,const gss_buffer_t input_message_buffer,int * conf_state,gss_buffer_t output_message_buffer)1190 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1191 const gsskrb5_ctx ctx,
1192 krb5_context context,
1193 int conf_req_flag,
1194 const gss_buffer_t input_message_buffer,
1195 int *conf_state,
1196 gss_buffer_t output_message_buffer)
1197 {
1198 gss_cfx_wrap_token token;
1199 krb5_error_code ret;
1200 unsigned usage;
1201 krb5_data cipher;
1202 size_t wrapped_len, cksumsize;
1203 uint16_t padlength, rrc = 0;
1204 int32_t seq_number;
1205 u_char *p;
1206
1207 ret = _gsskrb5cfx_wrap_length_cfx(context,
1208 ctx->crypto, conf_req_flag,
1209 IS_DCE_STYLE(ctx),
1210 input_message_buffer->length,
1211 &wrapped_len, &cksumsize, &padlength);
1212 if (ret != 0) {
1213 *minor_status = ret;
1214 return GSS_S_FAILURE;
1215 }
1216
1217 /* Always rotate encrypted token (if any) and checksum to header */
1218 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1219
1220 output_message_buffer->length = wrapped_len;
1221 output_message_buffer->value = malloc(output_message_buffer->length);
1222 if (output_message_buffer->value == NULL) {
1223 *minor_status = ENOMEM;
1224 return GSS_S_FAILURE;
1225 }
1226
1227 p = output_message_buffer->value;
1228 token = (gss_cfx_wrap_token)p;
1229 token->TOK_ID[0] = 0x05;
1230 token->TOK_ID[1] = 0x04;
1231 token->Flags = 0;
1232 token->Filler = 0xFF;
1233 if ((ctx->more_flags & LOCAL) == 0)
1234 token->Flags |= CFXSentByAcceptor;
1235 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1236 token->Flags |= CFXAcceptorSubkey;
1237 if (conf_req_flag) {
1238 /*
1239 * In Wrap tokens with confidentiality, the EC field is
1240 * used to encode the size (in bytes) of the random filler.
1241 */
1242 token->Flags |= CFXSealed;
1243 token->EC[0] = (padlength >> 8) & 0xFF;
1244 token->EC[1] = (padlength >> 0) & 0xFF;
1245 } else {
1246 /*
1247 * In Wrap tokens without confidentiality, the EC field is
1248 * used to encode the size (in bytes) of the trailing
1249 * checksum.
1250 *
1251 * This is not used in the checksum calcuation itself,
1252 * because the checksum length could potentially vary
1253 * depending on the data length.
1254 */
1255 token->EC[0] = 0;
1256 token->EC[1] = 0;
1257 }
1258
1259 /*
1260 * In Wrap tokens that provide for confidentiality, the RRC
1261 * field in the header contains the hex value 00 00 before
1262 * encryption.
1263 *
1264 * In Wrap tokens that do not provide for confidentiality,
1265 * both the EC and RRC fields in the appended checksum
1266 * contain the hex value 00 00 for the purpose of calculating
1267 * the checksum.
1268 */
1269 token->RRC[0] = 0;
1270 token->RRC[1] = 0;
1271
1272 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1273 krb5_auth_con_getlocalseqnumber(context,
1274 ctx->auth_context,
1275 &seq_number);
1276 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1277 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1278 krb5_auth_con_setlocalseqnumber(context,
1279 ctx->auth_context,
1280 ++seq_number);
1281 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1282
1283 /*
1284 * If confidentiality is requested, the token header is
1285 * appended to the plaintext before encryption; the resulting
1286 * token is {"header" | encrypt(plaintext | pad | "header")}.
1287 *
1288 * If no confidentiality is requested, the checksum is
1289 * calculated over the plaintext concatenated with the
1290 * token header.
1291 */
1292 if (ctx->more_flags & LOCAL) {
1293 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1294 } else {
1295 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1296 }
1297
1298 if (conf_req_flag) {
1299 /*
1300 * Any necessary padding is added here to ensure that the
1301 * encrypted token header is always at the end of the
1302 * ciphertext.
1303 *
1304 * The specification does not require that the padding
1305 * bytes are initialized.
1306 */
1307 p += sizeof(*token);
1308 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1309 memset(p + input_message_buffer->length, 0xFF, padlength);
1310 memcpy(p + input_message_buffer->length + padlength,
1311 token, sizeof(*token));
1312
1313 ret = krb5_encrypt(context, ctx->crypto,
1314 usage, p,
1315 input_message_buffer->length + padlength +
1316 sizeof(*token),
1317 &cipher);
1318 if (ret != 0) {
1319 *minor_status = ret;
1320 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1321 return GSS_S_FAILURE;
1322 }
1323 assert(sizeof(*token) + cipher.length == wrapped_len);
1324 token->RRC[0] = (rrc >> 8) & 0xFF;
1325 token->RRC[1] = (rrc >> 0) & 0xFF;
1326
1327 /*
1328 * this is really ugly, but needed against windows
1329 * for DCERPC, as windows rotates by EC+RRC.
1330 */
1331 if (IS_DCE_STYLE(ctx)) {
1332 ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1333 } else {
1334 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1335 }
1336 if (ret != 0) {
1337 *minor_status = ret;
1338 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1339 return GSS_S_FAILURE;
1340 }
1341 memcpy(p, cipher.data, cipher.length);
1342 krb5_data_free(&cipher);
1343 } else {
1344 char *buf;
1345 Checksum cksum;
1346
1347 buf = malloc(input_message_buffer->length + sizeof(*token));
1348 if (buf == NULL) {
1349 *minor_status = ENOMEM;
1350 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1351 return GSS_S_FAILURE;
1352 }
1353 memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1354 memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1355
1356 ret = krb5_create_checksum(context, ctx->crypto,
1357 usage, 0, buf,
1358 input_message_buffer->length +
1359 sizeof(*token),
1360 &cksum);
1361 if (ret != 0) {
1362 *minor_status = ret;
1363 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1364 free(buf);
1365 return GSS_S_FAILURE;
1366 }
1367
1368 free(buf);
1369
1370 assert(cksum.checksum.length == cksumsize);
1371 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF;
1372 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF;
1373 token->RRC[0] = (rrc >> 8) & 0xFF;
1374 token->RRC[1] = (rrc >> 0) & 0xFF;
1375
1376 p += sizeof(*token);
1377 memcpy(p, input_message_buffer->value, input_message_buffer->length);
1378 memcpy(p + input_message_buffer->length,
1379 cksum.checksum.data, cksum.checksum.length);
1380
1381 ret = rrc_rotate(p,
1382 input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1383 if (ret != 0) {
1384 *minor_status = ret;
1385 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1386 free_Checksum(&cksum);
1387 return GSS_S_FAILURE;
1388 }
1389 free_Checksum(&cksum);
1390 }
1391
1392 if (conf_state != NULL) {
1393 *conf_state = conf_req_flag;
1394 }
1395
1396 *minor_status = 0;
1397 return GSS_S_COMPLETE;
1398 }
1399
_gssapi_unwrap_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,const gss_buffer_t input_message_buffer,gss_buffer_t output_message_buffer,int * conf_state,gss_qop_t * qop_state)1400 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1401 const gsskrb5_ctx ctx,
1402 krb5_context context,
1403 const gss_buffer_t input_message_buffer,
1404 gss_buffer_t output_message_buffer,
1405 int *conf_state,
1406 gss_qop_t *qop_state)
1407 {
1408 gss_cfx_wrap_token token;
1409 u_char token_flags;
1410 krb5_error_code ret;
1411 unsigned usage;
1412 krb5_data data;
1413 uint16_t ec, rrc;
1414 OM_uint32 seq_number_lo, seq_number_hi;
1415 size_t len;
1416 u_char *p;
1417
1418 *minor_status = 0;
1419
1420 if (input_message_buffer->length < sizeof(*token)) {
1421 return GSS_S_DEFECTIVE_TOKEN;
1422 }
1423
1424 p = input_message_buffer->value;
1425
1426 token = (gss_cfx_wrap_token)p;
1427
1428 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1429 return GSS_S_DEFECTIVE_TOKEN;
1430 }
1431
1432 /* Ignore unknown flags */
1433 token_flags = token->Flags &
1434 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1435
1436 if (token_flags & CFXSentByAcceptor) {
1437 if ((ctx->more_flags & LOCAL) == 0)
1438 return GSS_S_DEFECTIVE_TOKEN;
1439 }
1440
1441 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1442 if ((token_flags & CFXAcceptorSubkey) == 0)
1443 return GSS_S_DEFECTIVE_TOKEN;
1444 } else {
1445 if (token_flags & CFXAcceptorSubkey)
1446 return GSS_S_DEFECTIVE_TOKEN;
1447 }
1448
1449 if (token->Filler != 0xFF) {
1450 return GSS_S_DEFECTIVE_TOKEN;
1451 }
1452
1453 if (conf_state != NULL) {
1454 *conf_state = (token_flags & CFXSealed) ? 1 : 0;
1455 }
1456
1457 ec = (token->EC[0] << 8) | token->EC[1];
1458 rrc = (token->RRC[0] << 8) | token->RRC[1];
1459
1460 /*
1461 * Check sequence number
1462 */
1463 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1464 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1465 if (seq_number_hi) {
1466 /* no support for 64-bit sequence numbers */
1467 *minor_status = ERANGE;
1468 return GSS_S_UNSEQ_TOKEN;
1469 }
1470
1471 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1472 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1473 if (ret != 0) {
1474 *minor_status = 0;
1475 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1476 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1477 return ret;
1478 }
1479 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1480
1481 /*
1482 * Decrypt and/or verify checksum
1483 */
1484
1485 if (ctx->more_flags & LOCAL) {
1486 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1487 } else {
1488 usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1489 }
1490
1491 p += sizeof(*token);
1492 len = input_message_buffer->length;
1493 len -= (p - (u_char *)input_message_buffer->value);
1494
1495 if (token_flags & CFXSealed) {
1496 /*
1497 * this is really ugly, but needed against windows
1498 * for DCERPC, as windows rotates by EC+RRC.
1499 */
1500 if (IS_DCE_STYLE(ctx)) {
1501 *minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1502 } else {
1503 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1504 }
1505 if (*minor_status != 0) {
1506 return GSS_S_FAILURE;
1507 }
1508
1509 ret = krb5_decrypt(context, ctx->crypto, usage,
1510 p, len, &data);
1511 if (ret != 0) {
1512 *minor_status = ret;
1513 return GSS_S_BAD_MIC;
1514 }
1515
1516 /* Check that there is room for the pad and token header */
1517 if (data.length < ec + sizeof(*token)) {
1518 krb5_data_free(&data);
1519 return GSS_S_DEFECTIVE_TOKEN;
1520 }
1521 p = data.data;
1522 p += data.length - sizeof(*token);
1523
1524 /* RRC is unprotected; don't modify input buffer */
1525 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1526 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1527
1528 /* Check the integrity of the header */
1529 if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1530 krb5_data_free(&data);
1531 return GSS_S_BAD_MIC;
1532 }
1533
1534 output_message_buffer->value = data.data;
1535 output_message_buffer->length = data.length - ec - sizeof(*token);
1536 } else {
1537 Checksum cksum;
1538
1539 /* Rotate by RRC; bogus to do this in-place XXX */
1540 *minor_status = rrc_rotate(p, len, rrc, TRUE);
1541 if (*minor_status != 0) {
1542 return GSS_S_FAILURE;
1543 }
1544
1545 /* Determine checksum type */
1546 ret = krb5_crypto_get_checksum_type(context,
1547 ctx->crypto,
1548 &cksum.cksumtype);
1549 if (ret != 0) {
1550 *minor_status = ret;
1551 return GSS_S_FAILURE;
1552 }
1553
1554 cksum.checksum.length = ec;
1555
1556 /* Check we have at least as much data as the checksum */
1557 if (len < cksum.checksum.length) {
1558 *minor_status = ERANGE;
1559 return GSS_S_BAD_MIC;
1560 }
1561
1562 /* Length now is of the plaintext only, no checksum */
1563 len -= cksum.checksum.length;
1564 cksum.checksum.data = p + len;
1565
1566 output_message_buffer->length = len; /* for later */
1567 output_message_buffer->value = malloc(len + sizeof(*token));
1568 if (output_message_buffer->value == NULL) {
1569 *minor_status = ENOMEM;
1570 return GSS_S_FAILURE;
1571 }
1572
1573 /* Checksum is over (plaintext-data | "header") */
1574 memcpy(output_message_buffer->value, p, len);
1575 memcpy((u_char *)output_message_buffer->value + len,
1576 token, sizeof(*token));
1577
1578 /* EC is not included in checksum calculation */
1579 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1580 len);
1581 token->EC[0] = 0;
1582 token->EC[1] = 0;
1583 token->RRC[0] = 0;
1584 token->RRC[1] = 0;
1585
1586 ret = krb5_verify_checksum(context, ctx->crypto,
1587 usage,
1588 output_message_buffer->value,
1589 len + sizeof(*token),
1590 &cksum);
1591 if (ret != 0) {
1592 *minor_status = ret;
1593 _gsskrb5_release_buffer(minor_status, output_message_buffer);
1594 return GSS_S_BAD_MIC;
1595 }
1596 }
1597
1598 if (qop_state != NULL) {
1599 *qop_state = GSS_C_QOP_DEFAULT;
1600 }
1601
1602 *minor_status = 0;
1603 return GSS_S_COMPLETE;
1604 }
1605
_gssapi_mic_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,gss_qop_t qop_req,const gss_buffer_t message_buffer,gss_buffer_t message_token)1606 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1607 const gsskrb5_ctx ctx,
1608 krb5_context context,
1609 gss_qop_t qop_req,
1610 const gss_buffer_t message_buffer,
1611 gss_buffer_t message_token)
1612 {
1613 gss_cfx_mic_token token;
1614 krb5_error_code ret;
1615 unsigned usage;
1616 Checksum cksum;
1617 u_char *buf;
1618 size_t len;
1619 int32_t seq_number;
1620
1621 len = message_buffer->length + sizeof(*token);
1622 buf = malloc(len);
1623 if (buf == NULL) {
1624 *minor_status = ENOMEM;
1625 return GSS_S_FAILURE;
1626 }
1627
1628 memcpy(buf, message_buffer->value, message_buffer->length);
1629
1630 token = (gss_cfx_mic_token)(buf + message_buffer->length);
1631 token->TOK_ID[0] = 0x04;
1632 token->TOK_ID[1] = 0x04;
1633 token->Flags = 0;
1634 if ((ctx->more_flags & LOCAL) == 0)
1635 token->Flags |= CFXSentByAcceptor;
1636 if (ctx->more_flags & ACCEPTOR_SUBKEY)
1637 token->Flags |= CFXAcceptorSubkey;
1638 memset(token->Filler, 0xFF, 5);
1639
1640 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1641 krb5_auth_con_getlocalseqnumber(context,
1642 ctx->auth_context,
1643 &seq_number);
1644 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]);
1645 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1646 krb5_auth_con_setlocalseqnumber(context,
1647 ctx->auth_context,
1648 ++seq_number);
1649 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1650
1651 if (ctx->more_flags & LOCAL) {
1652 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1653 } else {
1654 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1655 }
1656
1657 ret = krb5_create_checksum(context, ctx->crypto,
1658 usage, 0, buf, len, &cksum);
1659 if (ret != 0) {
1660 *minor_status = ret;
1661 free(buf);
1662 return GSS_S_FAILURE;
1663 }
1664
1665 /* Determine MIC length */
1666 message_token->length = sizeof(*token) + cksum.checksum.length;
1667 message_token->value = malloc(message_token->length);
1668 if (message_token->value == NULL) {
1669 *minor_status = ENOMEM;
1670 free_Checksum(&cksum);
1671 free(buf);
1672 return GSS_S_FAILURE;
1673 }
1674
1675 /* Token is { "header" | get_mic("header" | plaintext-data) } */
1676 memcpy(message_token->value, token, sizeof(*token));
1677 memcpy((u_char *)message_token->value + sizeof(*token),
1678 cksum.checksum.data, cksum.checksum.length);
1679
1680 free_Checksum(&cksum);
1681 free(buf);
1682
1683 *minor_status = 0;
1684 return GSS_S_COMPLETE;
1685 }
1686
_gssapi_verify_mic_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,const gss_buffer_t message_buffer,const gss_buffer_t token_buffer,gss_qop_t * qop_state)1687 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1688 const gsskrb5_ctx ctx,
1689 krb5_context context,
1690 const gss_buffer_t message_buffer,
1691 const gss_buffer_t token_buffer,
1692 gss_qop_t *qop_state)
1693 {
1694 gss_cfx_mic_token token;
1695 u_char token_flags;
1696 krb5_error_code ret;
1697 unsigned usage;
1698 OM_uint32 seq_number_lo, seq_number_hi;
1699 u_char *buf, *p;
1700 Checksum cksum;
1701
1702 *minor_status = 0;
1703
1704 if (token_buffer->length < sizeof(*token)) {
1705 return GSS_S_DEFECTIVE_TOKEN;
1706 }
1707
1708 p = token_buffer->value;
1709
1710 token = (gss_cfx_mic_token)p;
1711
1712 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1713 return GSS_S_DEFECTIVE_TOKEN;
1714 }
1715
1716 /* Ignore unknown flags */
1717 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1718
1719 if (token_flags & CFXSentByAcceptor) {
1720 if ((ctx->more_flags & LOCAL) == 0)
1721 return GSS_S_DEFECTIVE_TOKEN;
1722 }
1723 if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1724 if ((token_flags & CFXAcceptorSubkey) == 0)
1725 return GSS_S_DEFECTIVE_TOKEN;
1726 } else {
1727 if (token_flags & CFXAcceptorSubkey)
1728 return GSS_S_DEFECTIVE_TOKEN;
1729 }
1730
1731 if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1732 return GSS_S_DEFECTIVE_TOKEN;
1733 }
1734
1735 /*
1736 * Check sequence number
1737 */
1738 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1739 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1740 if (seq_number_hi) {
1741 *minor_status = ERANGE;
1742 return GSS_S_UNSEQ_TOKEN;
1743 }
1744
1745 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1746 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1747 if (ret != 0) {
1748 *minor_status = 0;
1749 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1750 return ret;
1751 }
1752 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1753
1754 /*
1755 * Verify checksum
1756 */
1757 ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1758 &cksum.cksumtype);
1759 if (ret != 0) {
1760 *minor_status = ret;
1761 return GSS_S_FAILURE;
1762 }
1763
1764 cksum.checksum.data = p + sizeof(*token);
1765 cksum.checksum.length = token_buffer->length - sizeof(*token);
1766
1767 if (ctx->more_flags & LOCAL) {
1768 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1769 } else {
1770 usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1771 }
1772
1773 buf = malloc(message_buffer->length + sizeof(*token));
1774 if (buf == NULL) {
1775 *minor_status = ENOMEM;
1776 return GSS_S_FAILURE;
1777 }
1778 memcpy(buf, message_buffer->value, message_buffer->length);
1779 memcpy(buf + message_buffer->length, token, sizeof(*token));
1780
1781 ret = krb5_verify_checksum(context, ctx->crypto,
1782 usage,
1783 buf,
1784 sizeof(*token) + message_buffer->length,
1785 &cksum);
1786 if (ret != 0) {
1787 *minor_status = ret;
1788 free(buf);
1789 return GSS_S_BAD_MIC;
1790 }
1791
1792 free(buf);
1793
1794 if (qop_state != NULL) {
1795 *qop_state = GSS_C_QOP_DEFAULT;
1796 }
1797
1798 return GSS_S_COMPLETE;
1799 }
1800