xref: /netbsd-src/crypto/external/bsd/heimdal/dist/lib/gssapi/krb5/cfx.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: cfx.c,v 1.1.1.1 2011/04/13 18:14:44 elric Exp $	*/
2 
3 /*
4  * Copyright (c) 2003, PADL Software Pty Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. Neither the name of PADL Software nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "gsskrb5_locl.h"
36 
37 /*
38  * Implementation of RFC 4121
39  */
40 
41 #define CFXSentByAcceptor	(1 << 0)
42 #define CFXSealed		(1 << 1)
43 #define CFXAcceptorSubkey	(1 << 2)
44 
45 krb5_error_code
46 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
47 			    krb5_crypto crypto,
48 			    int conf_req_flag,
49 			    int dce_style,
50 			    size_t input_length,
51 			    size_t *output_length,
52 			    size_t *cksumsize,
53 			    uint16_t *padlength)
54 {
55     krb5_error_code ret;
56     krb5_cksumtype type;
57 
58     /* 16-byte header is always first */
59     *output_length = sizeof(gss_cfx_wrap_token_desc);
60     *padlength = 0;
61 
62     ret = krb5_crypto_get_checksum_type(context, crypto, &type);
63     if (ret)
64 	return ret;
65 
66     ret = krb5_checksumsize(context, type, cksumsize);
67     if (ret)
68 	return ret;
69 
70     if (conf_req_flag) {
71 	size_t padsize;
72 
73 	/* Header is concatenated with data before encryption */
74 	input_length += sizeof(gss_cfx_wrap_token_desc);
75 
76 	if (dce_style) {
77 		ret = krb5_crypto_getblocksize(context, crypto, &padsize);
78 	} else {
79 		ret = krb5_crypto_getpadsize(context, crypto, &padsize);
80 	}
81 	if (ret) {
82 	    return ret;
83 	}
84 	if (padsize > 1) {
85 	    /* XXX check this */
86 	    *padlength = padsize - (input_length % padsize);
87 
88 	    /* We add the pad ourselves (noted here for completeness only) */
89 	    input_length += *padlength;
90 	}
91 
92 	*output_length += krb5_get_wrapped_length(context,
93 						  crypto, input_length);
94     } else {
95 	/* Checksum is concatenated with data */
96 	*output_length += input_length + *cksumsize;
97     }
98 
99     assert(*output_length > input_length);
100 
101     return 0;
102 }
103 
104 OM_uint32
105 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
106 		      const gsskrb5_ctx ctx,
107 		      krb5_context context,
108 		      int conf_req_flag,
109 		      gss_qop_t qop_req,
110 		      OM_uint32 req_output_size,
111 		      OM_uint32 *max_input_size)
112 {
113     krb5_error_code ret;
114 
115     *max_input_size = 0;
116 
117     /* 16-byte header is always first */
118     if (req_output_size < 16)
119 	return 0;
120     req_output_size -= 16;
121 
122     if (conf_req_flag) {
123 	size_t wrapped_size, sz;
124 
125 	wrapped_size = req_output_size + 1;
126 	do {
127 	    wrapped_size--;
128 	    sz = krb5_get_wrapped_length(context,
129 					 ctx->crypto, wrapped_size);
130 	} while (wrapped_size && sz > req_output_size);
131 	if (wrapped_size == 0)
132 	    return 0;
133 
134 	/* inner header */
135 	if (wrapped_size < 16)
136 	    return 0;
137 
138 	wrapped_size -= 16;
139 
140 	*max_input_size = wrapped_size;
141     } else {
142 	krb5_cksumtype type;
143 	size_t cksumsize;
144 
145 	ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
146 	if (ret)
147 	    return ret;
148 
149 	ret = krb5_checksumsize(context, type, &cksumsize);
150 	if (ret)
151 	    return ret;
152 
153 	if (req_output_size < cksumsize)
154 	    return 0;
155 
156 	/* Checksum is concatenated with data */
157 	*max_input_size = req_output_size - cksumsize;
158     }
159 
160     return 0;
161 }
162 
163 /*
164  * Rotate "rrc" bytes to the front or back
165  */
166 
167 static krb5_error_code
168 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
169 {
170     u_char *tmp, buf[256];
171     size_t left;
172 
173     if (len == 0)
174 	return 0;
175 
176     rrc %= len;
177 
178     if (rrc == 0)
179 	return 0;
180 
181     left = len - rrc;
182 
183     if (rrc <= sizeof(buf)) {
184 	tmp = buf;
185     } else {
186 	tmp = malloc(rrc);
187 	if (tmp == NULL)
188 	    return ENOMEM;
189     }
190 
191     if (unrotate) {
192 	memcpy(tmp, data, rrc);
193 	memmove(data, (u_char *)data + rrc, left);
194 	memcpy((u_char *)data + left, tmp, rrc);
195     } else {
196 	memcpy(tmp, (u_char *)data + left, rrc);
197 	memmove((u_char *)data + rrc, data, left);
198 	memcpy(data, tmp, rrc);
199     }
200 
201     if (rrc > sizeof(buf))
202 	free(tmp);
203 
204     return 0;
205 }
206 
207 gss_iov_buffer_desc *
208 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
209 {
210     int i;
211 
212     for (i = 0; i < iov_count; i++)
213 	if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
214 	    return &iov[i];
215     return NULL;
216 }
217 
218 OM_uint32
219 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
220 {
221     if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
222 	if (buffer->buffer.length == size)
223 	    return GSS_S_COMPLETE;
224 	free(buffer->buffer.value);
225     }
226 
227     buffer->buffer.value = malloc(size);
228     buffer->buffer.length = size;
229     if (buffer->buffer.value == NULL) {
230 	*minor_status = ENOMEM;
231 	return GSS_S_FAILURE;
232     }
233     buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
234 
235     return GSS_S_COMPLETE;
236 }
237 
238 
239 OM_uint32
240 _gk_verify_buffers(OM_uint32 *minor_status,
241 		   const gsskrb5_ctx ctx,
242 		   const gss_iov_buffer_desc *header,
243 		   const gss_iov_buffer_desc *padding,
244 		   const gss_iov_buffer_desc *trailer)
245 {
246     if (header == NULL) {
247 	*minor_status = EINVAL;
248 	return GSS_S_FAILURE;
249     }
250 
251     if (IS_DCE_STYLE(ctx)) {
252 	/*
253 	 * In DCE style mode we reject having a padding or trailer buffer
254 	 */
255 	if (padding) {
256 	    *minor_status = EINVAL;
257 	    return GSS_S_FAILURE;
258 	}
259 	if (trailer) {
260 	    *minor_status = EINVAL;
261 	    return GSS_S_FAILURE;
262 	}
263     } else {
264 	/*
265 	 * In non-DCE style mode we require having a padding buffer
266 	 */
267 	if (padding == NULL) {
268 	    *minor_status = EINVAL;
269 	    return GSS_S_FAILURE;
270 	}
271     }
272 
273     *minor_status = 0;
274     return GSS_S_COMPLETE;
275 }
276 
277 OM_uint32
278 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
279 		     gsskrb5_ctx ctx,
280 		     krb5_context context,
281 		     int conf_req_flag,
282 		     int *conf_state,
283 		     gss_iov_buffer_desc *iov,
284 		     int iov_count)
285 {
286     OM_uint32 major_status, junk;
287     gss_iov_buffer_desc *header, *trailer, *padding;
288     size_t gsshsize, k5hsize;
289     size_t gsstsize, k5tsize;
290     size_t i, rrc = 0, ec = 0;
291     gss_cfx_wrap_token token;
292     krb5_error_code ret;
293     int32_t seq_number;
294     unsigned usage;
295     krb5_crypto_iov *data = NULL;
296 
297     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
298     if (header == NULL) {
299 	*minor_status = EINVAL;
300 	return GSS_S_FAILURE;
301     }
302 
303     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
304     if (padding != NULL) {
305 	padding->buffer.length = 0;
306     }
307 
308     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
309 
310     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
311     if (major_status != GSS_S_COMPLETE) {
312 	    return major_status;
313     }
314 
315     if (conf_req_flag) {
316 	size_t k5psize = 0;
317 	size_t k5pbase = 0;
318 	size_t k5bsize = 0;
319 	size_t size = 0;
320 
321 	for (i = 0; i < iov_count; i++) {
322 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
323 	    case GSS_IOV_BUFFER_TYPE_DATA:
324 		size += iov[i].buffer.length;
325 		break;
326 	    default:
327 		break;
328 	    }
329 	}
330 
331 	size += sizeof(gss_cfx_wrap_token_desc);
332 
333 	*minor_status = krb5_crypto_length(context, ctx->crypto,
334 					   KRB5_CRYPTO_TYPE_HEADER,
335 					   &k5hsize);
336 	if (*minor_status)
337 	    return GSS_S_FAILURE;
338 
339 	*minor_status = krb5_crypto_length(context, ctx->crypto,
340 					   KRB5_CRYPTO_TYPE_TRAILER,
341 					   &k5tsize);
342 	if (*minor_status)
343 	    return GSS_S_FAILURE;
344 
345 	*minor_status = krb5_crypto_length(context, ctx->crypto,
346 					   KRB5_CRYPTO_TYPE_PADDING,
347 					   &k5pbase);
348 	if (*minor_status)
349 	    return GSS_S_FAILURE;
350 
351 	if (k5pbase > 1) {
352 	    k5psize = k5pbase - (size % k5pbase);
353 	} else {
354 	    k5psize = 0;
355 	}
356 
357 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
358 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
359 						     &k5bsize);
360 	    if (*minor_status)
361 		return GSS_S_FAILURE;
362 	    ec = k5bsize;
363 	} else {
364 	    ec = k5psize;
365 	}
366 
367 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
368 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
369     } else {
370 	if (IS_DCE_STYLE(ctx)) {
371 	    *minor_status = EINVAL;
372 	    return GSS_S_FAILURE;
373 	}
374 
375 	k5hsize = 0;
376 	*minor_status = krb5_crypto_length(context, ctx->crypto,
377 					   KRB5_CRYPTO_TYPE_CHECKSUM,
378 					   &k5tsize);
379 	if (*minor_status)
380 	    return GSS_S_FAILURE;
381 
382 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
383 	gsstsize = k5tsize;
384     }
385 
386     /*
387      *
388      */
389 
390     if (trailer == NULL) {
391 	rrc = gsstsize;
392 	if (IS_DCE_STYLE(ctx))
393 	    rrc -= ec;
394 	gsshsize += gsstsize;
395 	gsstsize = 0;
396     } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
397 	major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
398 	if (major_status)
399 	    goto failure;
400     } else if (trailer->buffer.length < gsstsize) {
401 	*minor_status = KRB5_BAD_MSIZE;
402 	major_status = GSS_S_FAILURE;
403 	goto failure;
404     } else
405 	trailer->buffer.length = gsstsize;
406 
407     /*
408      *
409      */
410 
411     if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
412 	major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
413 	if (major_status != GSS_S_COMPLETE)
414 	    goto failure;
415     } else if (header->buffer.length < gsshsize) {
416 	*minor_status = KRB5_BAD_MSIZE;
417 	major_status = GSS_S_FAILURE;
418 	goto failure;
419     } else
420 	header->buffer.length = gsshsize;
421 
422     token = (gss_cfx_wrap_token)header->buffer.value;
423 
424     token->TOK_ID[0] = 0x05;
425     token->TOK_ID[1] = 0x04;
426     token->Flags     = 0;
427     token->Filler    = 0xFF;
428 
429     if (ctx->more_flags & ACCEPTOR_SUBKEY)
430 	token->Flags |= CFXAcceptorSubkey;
431 
432     if (ctx->more_flags & LOCAL)
433 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
434     else
435 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
436 
437     if (conf_req_flag) {
438 	/*
439 	 * In Wrap tokens with confidentiality, the EC field is
440 	 * used to encode the size (in bytes) of the random filler.
441 	 */
442 	token->Flags |= CFXSealed;
443 	token->EC[0] = (ec >> 8) & 0xFF;
444 	token->EC[1] = (ec >> 0) & 0xFF;
445 
446     } else {
447 	/*
448 	 * In Wrap tokens without confidentiality, the EC field is
449 	 * used to encode the size (in bytes) of the trailing
450 	 * checksum.
451 	 *
452 	 * This is not used in the checksum calcuation itself,
453 	 * because the checksum length could potentially vary
454 	 * depending on the data length.
455 	 */
456 	token->EC[0] = 0;
457 	token->EC[1] = 0;
458     }
459 
460     /*
461      * In Wrap tokens that provide for confidentiality, the RRC
462      * field in the header contains the hex value 00 00 before
463      * encryption.
464      *
465      * In Wrap tokens that do not provide for confidentiality,
466      * both the EC and RRC fields in the appended checksum
467      * contain the hex value 00 00 for the purpose of calculating
468      * the checksum.
469      */
470     token->RRC[0] = 0;
471     token->RRC[1] = 0;
472 
473     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
474     krb5_auth_con_getlocalseqnumber(context,
475 				    ctx->auth_context,
476 				    &seq_number);
477     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
478     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
479     krb5_auth_con_setlocalseqnumber(context,
480 				    ctx->auth_context,
481 				    ++seq_number);
482     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
483 
484     data = calloc(iov_count + 3, sizeof(data[0]));
485     if (data == NULL) {
486 	*minor_status = ENOMEM;
487 	major_status = GSS_S_FAILURE;
488 	goto failure;
489     }
490 
491     if (conf_req_flag) {
492 	/*
493 	  plain packet:
494 
495 	  {"header" | encrypt(plaintext-data | ec-padding | E"header")}
496 
497 	  Expanded, this is with with RRC = 0:
498 
499 	  {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
500 
501 	  In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
502 
503 	  {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data  }
504 	 */
505 
506 	i = 0;
507 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
508 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
509 	data[i].data.length = k5hsize;
510 
511 	for (i = 1; i < iov_count + 1; i++) {
512 	    switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
513 	    case GSS_IOV_BUFFER_TYPE_DATA:
514 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
515 		break;
516 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
517 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
518 		break;
519 	    default:
520 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
521 		break;
522 	    }
523 	    data[i].data.length = iov[i - 1].buffer.length;
524 	    data[i].data.data = iov[i - 1].buffer.value;
525 	}
526 
527 	/*
528 	 * Any necessary padding is added here to ensure that the
529 	 * encrypted token header is always at the end of the
530 	 * ciphertext.
531 	 */
532 
533 	/* encrypted CFX header in trailer (or after the header if in
534 	   DCE mode). Copy in header into E"header"
535 	*/
536 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
537 	if (trailer)
538 	    data[i].data.data = trailer->buffer.value;
539 	else
540 	    data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
541 
542 	data[i].data.length = ec + sizeof(*token);
543 	memset(data[i].data.data, 0xFF, ec);
544 	memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
545 	i++;
546 
547 	/* Kerberos trailer comes after the gss trailer */
548 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
549 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
550 	data[i].data.length = k5tsize;
551 	i++;
552 
553 	ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
554 	if (ret != 0) {
555 	    *minor_status = ret;
556 	    major_status = GSS_S_FAILURE;
557 	    goto failure;
558 	}
559 
560 	if (rrc) {
561 	    token->RRC[0] = (rrc >> 8) & 0xFF;
562 	    token->RRC[1] = (rrc >> 0) & 0xFF;
563 	}
564 
565     } else {
566 	/*
567 	  plain packet:
568 
569 	  {data | "header" | gss-trailer (krb5 checksum)
570 
571 	  don't do RRC != 0
572 
573 	 */
574 
575 	for (i = 0; i < iov_count; i++) {
576 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
577 	    case GSS_IOV_BUFFER_TYPE_DATA:
578 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
579 		break;
580 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
581 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
582 		break;
583 	    default:
584 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
585 		break;
586 	    }
587 	    data[i].data.length = iov[i].buffer.length;
588 	    data[i].data.data = iov[i].buffer.value;
589 	}
590 
591 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
592 	data[i].data.data = header->buffer.value;
593 	data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
594 	i++;
595 
596 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
597 	if (trailer) {
598 		data[i].data.data = trailer->buffer.value;
599 	} else {
600 		data[i].data.data = (uint8_t *)header->buffer.value +
601 				     sizeof(gss_cfx_wrap_token_desc);
602 	}
603 	data[i].data.length = k5tsize;
604 	i++;
605 
606 	ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
607 	if (ret) {
608 	    *minor_status = ret;
609 	    major_status = GSS_S_FAILURE;
610 	    goto failure;
611 	}
612 
613 	if (rrc) {
614 	    token->RRC[0] = (rrc >> 8) & 0xFF;
615 	    token->RRC[1] = (rrc >> 0) & 0xFF;
616 	}
617 
618 	token->EC[0] =  (k5tsize >> 8) & 0xFF;
619 	token->EC[1] =  (k5tsize >> 0) & 0xFF;
620     }
621 
622     if (conf_state != NULL)
623 	*conf_state = conf_req_flag;
624 
625     free(data);
626 
627     *minor_status = 0;
628     return GSS_S_COMPLETE;
629 
630  failure:
631     if (data)
632 	free(data);
633 
634     gss_release_iov_buffer(&junk, iov, iov_count);
635 
636     return major_status;
637 }
638 
639 /* This is slowpath */
640 static OM_uint32
641 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
642 {
643     uint8_t *p, *q;
644     size_t len = 0, skip;
645     int i;
646 
647     for (i = 0; i < iov_count; i++)
648 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
649 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
650 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
651 	    len += iov[i].buffer.length;
652 
653     p = malloc(len);
654     if (p == NULL) {
655 	*minor_status = ENOMEM;
656 	return GSS_S_FAILURE;
657     }
658     q = p;
659 
660     /* copy up */
661 
662     for (i = 0; i < iov_count; i++) {
663 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
664 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
665 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
666 	{
667 	    memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
668 	    q += iov[i].buffer.length;
669 	}
670     }
671     assert((q - p) == len);
672 
673     /* unrotate first part */
674     q = p + rrc;
675     skip = rrc;
676     for (i = 0; i < iov_count; i++) {
677 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
678 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
679 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
680 	{
681 	    if (iov[i].buffer.length <= skip) {
682 		skip -= iov[i].buffer.length;
683 	    } else {
684 		memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
685 		q += iov[i].buffer.length - skip;
686 		skip = 0;
687 	    }
688 	}
689     }
690     /* copy trailer */
691     q = p;
692     skip = rrc;
693     for (i = 0; i < iov_count; i++) {
694 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
695 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
696 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
697 	{
698 	    memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
699 	    if (iov[i].buffer.length > skip)
700 		break;
701 	    skip -= iov[i].buffer.length;
702 	    q += iov[i].buffer.length;
703 	}
704     }
705     return GSS_S_COMPLETE;
706 }
707 
708 
709 OM_uint32
710 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
711 		       gsskrb5_ctx ctx,
712 		       krb5_context context,
713 		       int *conf_state,
714 		       gss_qop_t *qop_state,
715 		       gss_iov_buffer_desc *iov,
716 		       int iov_count)
717 {
718     OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
719     gss_iov_buffer_desc *header, *trailer, *padding;
720     gss_cfx_wrap_token token, ttoken;
721     u_char token_flags;
722     krb5_error_code ret;
723     unsigned usage;
724     uint16_t ec, rrc;
725     krb5_crypto_iov *data = NULL;
726     int i, j;
727 
728     *minor_status = 0;
729 
730     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
731     if (header == NULL) {
732 	*minor_status = EINVAL;
733 	return GSS_S_FAILURE;
734     }
735 
736     if (header->buffer.length < sizeof(*token)) /* we check exact below */
737 	return GSS_S_DEFECTIVE_TOKEN;
738 
739     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
740     if (padding != NULL && padding->buffer.length != 0) {
741 	*minor_status = EINVAL;
742 	return GSS_S_FAILURE;
743     }
744 
745     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
746 
747     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
748     if (major_status != GSS_S_COMPLETE) {
749 	    return major_status;
750     }
751 
752     token = (gss_cfx_wrap_token)header->buffer.value;
753 
754     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
755 	return GSS_S_DEFECTIVE_TOKEN;
756 
757     /* Ignore unknown flags */
758     token_flags = token->Flags &
759 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
760 
761     if (token_flags & CFXSentByAcceptor) {
762 	if ((ctx->more_flags & LOCAL) == 0)
763 	    return GSS_S_DEFECTIVE_TOKEN;
764     }
765 
766     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
767 	if ((token_flags & CFXAcceptorSubkey) == 0)
768 	    return GSS_S_DEFECTIVE_TOKEN;
769     } else {
770 	if (token_flags & CFXAcceptorSubkey)
771 	    return GSS_S_DEFECTIVE_TOKEN;
772     }
773 
774     if (token->Filler != 0xFF)
775 	return GSS_S_DEFECTIVE_TOKEN;
776 
777     if (conf_state != NULL)
778 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
779 
780     ec  = (token->EC[0]  << 8) | token->EC[1];
781     rrc = (token->RRC[0] << 8) | token->RRC[1];
782 
783     /*
784      * Check sequence number
785      */
786     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
787     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
788     if (seq_number_hi) {
789 	/* no support for 64-bit sequence numbers */
790 	*minor_status = ERANGE;
791 	return GSS_S_UNSEQ_TOKEN;
792     }
793 
794     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
795     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
796     if (ret != 0) {
797 	*minor_status = 0;
798 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
799 	return ret;
800     }
801     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
802 
803     /*
804      * Decrypt and/or verify checksum
805      */
806 
807     if (ctx->more_flags & LOCAL) {
808 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
809     } else {
810 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
811     }
812 
813     data = calloc(iov_count + 3, sizeof(data[0]));
814     if (data == NULL) {
815 	*minor_status = ENOMEM;
816 	major_status = GSS_S_FAILURE;
817 	goto failure;
818     }
819 
820     if (token_flags & CFXSealed) {
821 	size_t k5tsize, k5hsize;
822 
823 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
824 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
825 
826 	/* Rotate by RRC; bogus to do this in-place XXX */
827 	/* Check RRC */
828 
829 	if (trailer == NULL) {
830 	    size_t gsstsize = k5tsize + sizeof(*token);
831 	    size_t gsshsize = k5hsize + sizeof(*token);
832 
833 	    if (rrc != gsstsize) {
834 		major_status = GSS_S_DEFECTIVE_TOKEN;
835 		goto failure;
836 	    }
837 
838 	    if (IS_DCE_STYLE(ctx))
839 		gsstsize += ec;
840 
841 	    gsshsize += gsstsize;
842 
843 	    if (header->buffer.length != gsshsize) {
844 		major_status = GSS_S_DEFECTIVE_TOKEN;
845 		goto failure;
846 	    }
847 	} else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
848 	    major_status = GSS_S_DEFECTIVE_TOKEN;
849 	    goto failure;
850 	} else if (header->buffer.length != sizeof(*token) + k5hsize) {
851 	    major_status = GSS_S_DEFECTIVE_TOKEN;
852 	    goto failure;
853 	} else if (rrc != 0) {
854 	    /* go though slowpath */
855 	    major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
856 	    if (major_status)
857 		goto failure;
858 	}
859 
860 	i = 0;
861 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
862 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
863 	data[i].data.length = k5hsize;
864 	i++;
865 
866 	for (j = 0; j < iov_count; i++, j++) {
867 	    switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
868 	    case GSS_IOV_BUFFER_TYPE_DATA:
869 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
870 		break;
871 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
872 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
873 		break;
874 	    default:
875 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
876 		break;
877 	    }
878 	    data[i].data.length = iov[j].buffer.length;
879 	    data[i].data.data = iov[j].buffer.value;
880 	}
881 
882 	/* encrypted CFX header in trailer (or after the header if in
883 	   DCE mode). Copy in header into E"header"
884 	*/
885 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
886 	if (trailer) {
887 	    data[i].data.data = trailer->buffer.value;
888 	} else {
889 	    data[i].data.data = ((uint8_t *)header->buffer.value) +
890 		header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
891 	}
892 
893 	data[i].data.length = ec + sizeof(*token);
894 	ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
895 	i++;
896 
897 	/* Kerberos trailer comes after the gss trailer */
898 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
899 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
900 	data[i].data.length = k5tsize;
901 	i++;
902 
903 	ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
904 	if (ret != 0) {
905 	    *minor_status = ret;
906 	    major_status = GSS_S_FAILURE;
907 	    goto failure;
908 	}
909 
910 	ttoken->RRC[0] = token->RRC[0];
911 	ttoken->RRC[1] = token->RRC[1];
912 
913 	/* Check the integrity of the header */
914 	if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
915 	    major_status = GSS_S_BAD_MIC;
916 	    goto failure;
917 	}
918     } else {
919 	size_t gsstsize = ec;
920 	size_t gsshsize = sizeof(*token);
921 
922 	if (trailer == NULL) {
923 	    /* Check RRC */
924 	    if (rrc != gsstsize) {
925 	       *minor_status = EINVAL;
926 	       major_status = GSS_S_FAILURE;
927 	       goto failure;
928 	    }
929 
930 	    gsshsize += gsstsize;
931 	    gsstsize = 0;
932 	} else if (trailer->buffer.length != gsstsize) {
933 	    major_status = GSS_S_DEFECTIVE_TOKEN;
934 	    goto failure;
935 	} else if (rrc != 0) {
936 	    /* Check RRC */
937 	    *minor_status = EINVAL;
938 	    major_status = GSS_S_FAILURE;
939 	    goto failure;
940 	}
941 
942 	if (header->buffer.length != gsshsize) {
943 	    major_status = GSS_S_DEFECTIVE_TOKEN;
944 	    goto failure;
945 	}
946 
947 	for (i = 0; i < iov_count; i++) {
948 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
949 	    case GSS_IOV_BUFFER_TYPE_DATA:
950 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
951 		break;
952 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
953 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
954 		break;
955 	    default:
956 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
957 		break;
958 	    }
959 	    data[i].data.length = iov[i].buffer.length;
960 	    data[i].data.data = iov[i].buffer.value;
961 	}
962 
963 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
964 	data[i].data.data = header->buffer.value;
965 	data[i].data.length = sizeof(*token);
966 	i++;
967 
968 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
969 	if (trailer) {
970 		data[i].data.data = trailer->buffer.value;
971 	} else {
972 		data[i].data.data = (uint8_t *)header->buffer.value +
973 				     sizeof(*token);
974 	}
975 	data[i].data.length = ec;
976 	i++;
977 
978 	token = (gss_cfx_wrap_token)header->buffer.value;
979 	token->EC[0]  = 0;
980 	token->EC[1]  = 0;
981 	token->RRC[0] = 0;
982 	token->RRC[1] = 0;
983 
984 	ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
985 	if (ret) {
986 	    *minor_status = ret;
987 	    major_status = GSS_S_FAILURE;
988 	    goto failure;
989 	}
990     }
991 
992     if (qop_state != NULL) {
993 	*qop_state = GSS_C_QOP_DEFAULT;
994     }
995 
996     free(data);
997 
998     *minor_status = 0;
999     return GSS_S_COMPLETE;
1000 
1001  failure:
1002     if (data)
1003 	free(data);
1004 
1005     gss_release_iov_buffer(&junk, iov, iov_count);
1006 
1007     return major_status;
1008 }
1009 
1010 OM_uint32
1011 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1012 			    gsskrb5_ctx ctx,
1013 			    krb5_context context,
1014 			    int conf_req_flag,
1015 			    gss_qop_t qop_req,
1016 			    int *conf_state,
1017 			    gss_iov_buffer_desc *iov,
1018 			    int iov_count)
1019 {
1020     OM_uint32 major_status;
1021     size_t size;
1022     int i;
1023     gss_iov_buffer_desc *header = NULL;
1024     gss_iov_buffer_desc *padding = NULL;
1025     gss_iov_buffer_desc *trailer = NULL;
1026     size_t gsshsize = 0;
1027     size_t gsstsize = 0;
1028     size_t k5hsize = 0;
1029     size_t k5tsize = 0;
1030 
1031     GSSAPI_KRB5_INIT (&context);
1032     *minor_status = 0;
1033 
1034     for (size = 0, i = 0; i < iov_count; i++) {
1035 	switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1036 	case GSS_IOV_BUFFER_TYPE_EMPTY:
1037 	    break;
1038 	case GSS_IOV_BUFFER_TYPE_DATA:
1039 	    size += iov[i].buffer.length;
1040 	    break;
1041 	case GSS_IOV_BUFFER_TYPE_HEADER:
1042 	    if (header != NULL) {
1043 		*minor_status = 0;
1044 		return GSS_S_FAILURE;
1045 	    }
1046 	    header = &iov[i];
1047 	    break;
1048 	case GSS_IOV_BUFFER_TYPE_TRAILER:
1049 	    if (trailer != NULL) {
1050 		*minor_status = 0;
1051 		return GSS_S_FAILURE;
1052 	    }
1053 	    trailer = &iov[i];
1054 	    break;
1055 	case GSS_IOV_BUFFER_TYPE_PADDING:
1056 	    if (padding != NULL) {
1057 		*minor_status = 0;
1058 		return GSS_S_FAILURE;
1059 	    }
1060 	    padding = &iov[i];
1061 	    break;
1062 	case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1063 	    break;
1064 	default:
1065 	    *minor_status = EINVAL;
1066 	    return GSS_S_FAILURE;
1067 	}
1068     }
1069 
1070     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1071     if (major_status != GSS_S_COMPLETE) {
1072 	    return major_status;
1073     }
1074 
1075     if (conf_req_flag) {
1076 	size_t k5psize = 0;
1077 	size_t k5pbase = 0;
1078 	size_t k5bsize = 0;
1079 	size_t ec = 0;
1080 
1081 	size += sizeof(gss_cfx_wrap_token_desc);
1082 
1083 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1084 					   KRB5_CRYPTO_TYPE_HEADER,
1085 					   &k5hsize);
1086 	if (*minor_status)
1087 	    return GSS_S_FAILURE;
1088 
1089 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1090 					   KRB5_CRYPTO_TYPE_TRAILER,
1091 					   &k5tsize);
1092 	if (*minor_status)
1093 	    return GSS_S_FAILURE;
1094 
1095 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1096 					   KRB5_CRYPTO_TYPE_PADDING,
1097 					   &k5pbase);
1098 	if (*minor_status)
1099 	    return GSS_S_FAILURE;
1100 
1101 	if (k5pbase > 1) {
1102 	    k5psize = k5pbase - (size % k5pbase);
1103 	} else {
1104 	    k5psize = 0;
1105 	}
1106 
1107 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1108 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1109 						     &k5bsize);
1110 	    if (*minor_status)
1111 		return GSS_S_FAILURE;
1112 
1113 	    ec = k5bsize;
1114 	} else {
1115 	    ec = k5psize;
1116 	}
1117 
1118 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1119 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1120     } else {
1121 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1122 					   KRB5_CRYPTO_TYPE_CHECKSUM,
1123 					   &k5tsize);
1124 	if (*minor_status)
1125 	    return GSS_S_FAILURE;
1126 
1127 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
1128 	gsstsize = k5tsize;
1129     }
1130 
1131     if (trailer != NULL) {
1132 	trailer->buffer.length = gsstsize;
1133     } else {
1134 	gsshsize += gsstsize;
1135     }
1136 
1137     header->buffer.length = gsshsize;
1138 
1139     if (padding) {
1140 	/* padding is done via EC and is contained in the header or trailer */
1141 	padding->buffer.length = 0;
1142     }
1143 
1144     if (conf_state) {
1145 	*conf_state = conf_req_flag;
1146     }
1147 
1148     return GSS_S_COMPLETE;
1149 }
1150 
1151 
1152 
1153 
1154 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1155 			   const gsskrb5_ctx ctx,
1156 			   krb5_context context,
1157 			   int conf_req_flag,
1158 			   const gss_buffer_t input_message_buffer,
1159 			   int *conf_state,
1160 			   gss_buffer_t output_message_buffer)
1161 {
1162     gss_cfx_wrap_token token;
1163     krb5_error_code ret;
1164     unsigned usage;
1165     krb5_data cipher;
1166     size_t wrapped_len, cksumsize;
1167     uint16_t padlength, rrc = 0;
1168     int32_t seq_number;
1169     u_char *p;
1170 
1171     ret = _gsskrb5cfx_wrap_length_cfx(context,
1172 				      ctx->crypto, conf_req_flag,
1173 				      IS_DCE_STYLE(ctx),
1174 				      input_message_buffer->length,
1175 				      &wrapped_len, &cksumsize, &padlength);
1176     if (ret != 0) {
1177 	*minor_status = ret;
1178 	return GSS_S_FAILURE;
1179     }
1180 
1181     /* Always rotate encrypted token (if any) and checksum to header */
1182     rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1183 
1184     output_message_buffer->length = wrapped_len;
1185     output_message_buffer->value = malloc(output_message_buffer->length);
1186     if (output_message_buffer->value == NULL) {
1187 	*minor_status = ENOMEM;
1188 	return GSS_S_FAILURE;
1189     }
1190 
1191     p = output_message_buffer->value;
1192     token = (gss_cfx_wrap_token)p;
1193     token->TOK_ID[0] = 0x05;
1194     token->TOK_ID[1] = 0x04;
1195     token->Flags     = 0;
1196     token->Filler    = 0xFF;
1197     if ((ctx->more_flags & LOCAL) == 0)
1198 	token->Flags |= CFXSentByAcceptor;
1199     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1200 	token->Flags |= CFXAcceptorSubkey;
1201     if (conf_req_flag) {
1202 	/*
1203 	 * In Wrap tokens with confidentiality, the EC field is
1204 	 * used to encode the size (in bytes) of the random filler.
1205 	 */
1206 	token->Flags |= CFXSealed;
1207 	token->EC[0] = (padlength >> 8) & 0xFF;
1208 	token->EC[1] = (padlength >> 0) & 0xFF;
1209     } else {
1210 	/*
1211 	 * In Wrap tokens without confidentiality, the EC field is
1212 	 * used to encode the size (in bytes) of the trailing
1213 	 * checksum.
1214 	 *
1215 	 * This is not used in the checksum calcuation itself,
1216 	 * because the checksum length could potentially vary
1217 	 * depending on the data length.
1218 	 */
1219 	token->EC[0] = 0;
1220 	token->EC[1] = 0;
1221     }
1222 
1223     /*
1224      * In Wrap tokens that provide for confidentiality, the RRC
1225      * field in the header contains the hex value 00 00 before
1226      * encryption.
1227      *
1228      * In Wrap tokens that do not provide for confidentiality,
1229      * both the EC and RRC fields in the appended checksum
1230      * contain the hex value 00 00 for the purpose of calculating
1231      * the checksum.
1232      */
1233     token->RRC[0] = 0;
1234     token->RRC[1] = 0;
1235 
1236     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1237     krb5_auth_con_getlocalseqnumber(context,
1238 				    ctx->auth_context,
1239 				    &seq_number);
1240     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1241     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1242     krb5_auth_con_setlocalseqnumber(context,
1243 				    ctx->auth_context,
1244 				    ++seq_number);
1245     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1246 
1247     /*
1248      * If confidentiality is requested, the token header is
1249      * appended to the plaintext before encryption; the resulting
1250      * token is {"header" | encrypt(plaintext | pad | "header")}.
1251      *
1252      * If no confidentiality is requested, the checksum is
1253      * calculated over the plaintext concatenated with the
1254      * token header.
1255      */
1256     if (ctx->more_flags & LOCAL) {
1257 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1258     } else {
1259 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1260     }
1261 
1262     if (conf_req_flag) {
1263 	/*
1264 	 * Any necessary padding is added here to ensure that the
1265 	 * encrypted token header is always at the end of the
1266 	 * ciphertext.
1267 	 *
1268 	 * The specification does not require that the padding
1269 	 * bytes are initialized.
1270 	 */
1271 	p += sizeof(*token);
1272 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1273 	memset(p + input_message_buffer->length, 0xFF, padlength);
1274 	memcpy(p + input_message_buffer->length + padlength,
1275 	       token, sizeof(*token));
1276 
1277 	ret = krb5_encrypt(context, ctx->crypto,
1278 			   usage, p,
1279 			   input_message_buffer->length + padlength +
1280 				sizeof(*token),
1281 			   &cipher);
1282 	if (ret != 0) {
1283 	    *minor_status = ret;
1284 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1285 	    return GSS_S_FAILURE;
1286 	}
1287 	assert(sizeof(*token) + cipher.length == wrapped_len);
1288 	token->RRC[0] = (rrc >> 8) & 0xFF;
1289 	token->RRC[1] = (rrc >> 0) & 0xFF;
1290 
1291 	/*
1292 	 * this is really ugly, but needed against windows
1293 	 * for DCERPC, as windows rotates by EC+RRC.
1294 	 */
1295 	if (IS_DCE_STYLE(ctx)) {
1296 		ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1297 	} else {
1298 		ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1299 	}
1300 	if (ret != 0) {
1301 	    *minor_status = ret;
1302 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1303 	    return GSS_S_FAILURE;
1304 	}
1305 	memcpy(p, cipher.data, cipher.length);
1306 	krb5_data_free(&cipher);
1307     } else {
1308 	char *buf;
1309 	Checksum cksum;
1310 
1311 	buf = malloc(input_message_buffer->length + sizeof(*token));
1312 	if (buf == NULL) {
1313 	    *minor_status = ENOMEM;
1314 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1315 	    return GSS_S_FAILURE;
1316 	}
1317 	memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1318 	memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1319 
1320 	ret = krb5_create_checksum(context, ctx->crypto,
1321 				   usage, 0, buf,
1322 				   input_message_buffer->length +
1323 					sizeof(*token),
1324 				   &cksum);
1325 	if (ret != 0) {
1326 	    *minor_status = ret;
1327 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1328 	    free(buf);
1329 	    return GSS_S_FAILURE;
1330 	}
1331 
1332 	free(buf);
1333 
1334 	assert(cksum.checksum.length == cksumsize);
1335 	token->EC[0] =  (cksum.checksum.length >> 8) & 0xFF;
1336 	token->EC[1] =  (cksum.checksum.length >> 0) & 0xFF;
1337 	token->RRC[0] = (rrc >> 8) & 0xFF;
1338 	token->RRC[1] = (rrc >> 0) & 0xFF;
1339 
1340 	p += sizeof(*token);
1341 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1342 	memcpy(p + input_message_buffer->length,
1343 	       cksum.checksum.data, cksum.checksum.length);
1344 
1345 	ret = rrc_rotate(p,
1346 	    input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1347 	if (ret != 0) {
1348 	    *minor_status = ret;
1349 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1350 	    free_Checksum(&cksum);
1351 	    return GSS_S_FAILURE;
1352 	}
1353 	free_Checksum(&cksum);
1354     }
1355 
1356     if (conf_state != NULL) {
1357 	*conf_state = conf_req_flag;
1358     }
1359 
1360     *minor_status = 0;
1361     return GSS_S_COMPLETE;
1362 }
1363 
1364 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1365 			     const gsskrb5_ctx ctx,
1366 			     krb5_context context,
1367 			     const gss_buffer_t input_message_buffer,
1368 			     gss_buffer_t output_message_buffer,
1369 			     int *conf_state,
1370 			     gss_qop_t *qop_state)
1371 {
1372     gss_cfx_wrap_token token;
1373     u_char token_flags;
1374     krb5_error_code ret;
1375     unsigned usage;
1376     krb5_data data;
1377     uint16_t ec, rrc;
1378     OM_uint32 seq_number_lo, seq_number_hi;
1379     size_t len;
1380     u_char *p;
1381 
1382     *minor_status = 0;
1383 
1384     if (input_message_buffer->length < sizeof(*token)) {
1385 	return GSS_S_DEFECTIVE_TOKEN;
1386     }
1387 
1388     p = input_message_buffer->value;
1389 
1390     token = (gss_cfx_wrap_token)p;
1391 
1392     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1393 	return GSS_S_DEFECTIVE_TOKEN;
1394     }
1395 
1396     /* Ignore unknown flags */
1397     token_flags = token->Flags &
1398 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1399 
1400     if (token_flags & CFXSentByAcceptor) {
1401 	if ((ctx->more_flags & LOCAL) == 0)
1402 	    return GSS_S_DEFECTIVE_TOKEN;
1403     }
1404 
1405     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1406 	if ((token_flags & CFXAcceptorSubkey) == 0)
1407 	    return GSS_S_DEFECTIVE_TOKEN;
1408     } else {
1409 	if (token_flags & CFXAcceptorSubkey)
1410 	    return GSS_S_DEFECTIVE_TOKEN;
1411     }
1412 
1413     if (token->Filler != 0xFF) {
1414 	return GSS_S_DEFECTIVE_TOKEN;
1415     }
1416 
1417     if (conf_state != NULL) {
1418 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
1419     }
1420 
1421     ec  = (token->EC[0]  << 8) | token->EC[1];
1422     rrc = (token->RRC[0] << 8) | token->RRC[1];
1423 
1424     /*
1425      * Check sequence number
1426      */
1427     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1428     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1429     if (seq_number_hi) {
1430 	/* no support for 64-bit sequence numbers */
1431 	*minor_status = ERANGE;
1432 	return GSS_S_UNSEQ_TOKEN;
1433     }
1434 
1435     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1436     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1437     if (ret != 0) {
1438 	*minor_status = 0;
1439 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1440 	_gsskrb5_release_buffer(minor_status, output_message_buffer);
1441 	return ret;
1442     }
1443     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1444 
1445     /*
1446      * Decrypt and/or verify checksum
1447      */
1448 
1449     if (ctx->more_flags & LOCAL) {
1450 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1451     } else {
1452 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1453     }
1454 
1455     p += sizeof(*token);
1456     len = input_message_buffer->length;
1457     len -= (p - (u_char *)input_message_buffer->value);
1458 
1459     if (token_flags & CFXSealed) {
1460 	/*
1461 	 * this is really ugly, but needed against windows
1462 	 * for DCERPC, as windows rotates by EC+RRC.
1463 	 */
1464 	if (IS_DCE_STYLE(ctx)) {
1465 		*minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1466 	} else {
1467 		*minor_status = rrc_rotate(p, len, rrc, TRUE);
1468 	}
1469 	if (*minor_status != 0) {
1470 	    return GSS_S_FAILURE;
1471 	}
1472 
1473 	ret = krb5_decrypt(context, ctx->crypto, usage,
1474 	    p, len, &data);
1475 	if (ret != 0) {
1476 	    *minor_status = ret;
1477 	    return GSS_S_BAD_MIC;
1478 	}
1479 
1480 	/* Check that there is room for the pad and token header */
1481 	if (data.length < ec + sizeof(*token)) {
1482 	    krb5_data_free(&data);
1483 	    return GSS_S_DEFECTIVE_TOKEN;
1484 	}
1485 	p = data.data;
1486 	p += data.length - sizeof(*token);
1487 
1488 	/* RRC is unprotected; don't modify input buffer */
1489 	((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1490 	((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1491 
1492 	/* Check the integrity of the header */
1493 	if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1494 	    krb5_data_free(&data);
1495 	    return GSS_S_BAD_MIC;
1496 	}
1497 
1498 	output_message_buffer->value = data.data;
1499 	output_message_buffer->length = data.length - ec - sizeof(*token);
1500     } else {
1501 	Checksum cksum;
1502 
1503 	/* Rotate by RRC; bogus to do this in-place XXX */
1504 	*minor_status = rrc_rotate(p, len, rrc, TRUE);
1505 	if (*minor_status != 0) {
1506 	    return GSS_S_FAILURE;
1507 	}
1508 
1509 	/* Determine checksum type */
1510 	ret = krb5_crypto_get_checksum_type(context,
1511 					    ctx->crypto,
1512 					    &cksum.cksumtype);
1513 	if (ret != 0) {
1514 	    *minor_status = ret;
1515 	    return GSS_S_FAILURE;
1516 	}
1517 
1518 	cksum.checksum.length = ec;
1519 
1520 	/* Check we have at least as much data as the checksum */
1521 	if (len < cksum.checksum.length) {
1522 	    *minor_status = ERANGE;
1523 	    return GSS_S_BAD_MIC;
1524 	}
1525 
1526 	/* Length now is of the plaintext only, no checksum */
1527 	len -= cksum.checksum.length;
1528 	cksum.checksum.data = p + len;
1529 
1530 	output_message_buffer->length = len; /* for later */
1531 	output_message_buffer->value = malloc(len + sizeof(*token));
1532 	if (output_message_buffer->value == NULL) {
1533 	    *minor_status = ENOMEM;
1534 	    return GSS_S_FAILURE;
1535 	}
1536 
1537 	/* Checksum is over (plaintext-data | "header") */
1538 	memcpy(output_message_buffer->value, p, len);
1539 	memcpy((u_char *)output_message_buffer->value + len,
1540 	       token, sizeof(*token));
1541 
1542 	/* EC is not included in checksum calculation */
1543 	token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1544 				     len);
1545 	token->EC[0]  = 0;
1546 	token->EC[1]  = 0;
1547 	token->RRC[0] = 0;
1548 	token->RRC[1] = 0;
1549 
1550 	ret = krb5_verify_checksum(context, ctx->crypto,
1551 				   usage,
1552 				   output_message_buffer->value,
1553 				   len + sizeof(*token),
1554 				   &cksum);
1555 	if (ret != 0) {
1556 	    *minor_status = ret;
1557 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1558 	    return GSS_S_BAD_MIC;
1559 	}
1560     }
1561 
1562     if (qop_state != NULL) {
1563 	*qop_state = GSS_C_QOP_DEFAULT;
1564     }
1565 
1566     *minor_status = 0;
1567     return GSS_S_COMPLETE;
1568 }
1569 
1570 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1571 			  const gsskrb5_ctx ctx,
1572 			  krb5_context context,
1573 			  gss_qop_t qop_req,
1574 			  const gss_buffer_t message_buffer,
1575 			  gss_buffer_t message_token)
1576 {
1577     gss_cfx_mic_token token;
1578     krb5_error_code ret;
1579     unsigned usage;
1580     Checksum cksum;
1581     u_char *buf;
1582     size_t len;
1583     int32_t seq_number;
1584 
1585     len = message_buffer->length + sizeof(*token);
1586     buf = malloc(len);
1587     if (buf == NULL) {
1588 	*minor_status = ENOMEM;
1589 	return GSS_S_FAILURE;
1590     }
1591 
1592     memcpy(buf, message_buffer->value, message_buffer->length);
1593 
1594     token = (gss_cfx_mic_token)(buf + message_buffer->length);
1595     token->TOK_ID[0] = 0x04;
1596     token->TOK_ID[1] = 0x04;
1597     token->Flags = 0;
1598     if ((ctx->more_flags & LOCAL) == 0)
1599 	token->Flags |= CFXSentByAcceptor;
1600     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1601 	token->Flags |= CFXAcceptorSubkey;
1602     memset(token->Filler, 0xFF, 5);
1603 
1604     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1605     krb5_auth_con_getlocalseqnumber(context,
1606 				    ctx->auth_context,
1607 				    &seq_number);
1608     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1609     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1610     krb5_auth_con_setlocalseqnumber(context,
1611 				    ctx->auth_context,
1612 				    ++seq_number);
1613     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1614 
1615     if (ctx->more_flags & LOCAL) {
1616 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1617     } else {
1618 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1619     }
1620 
1621     ret = krb5_create_checksum(context, ctx->crypto,
1622 	usage, 0, buf, len, &cksum);
1623     if (ret != 0) {
1624 	*minor_status = ret;
1625 	free(buf);
1626 	return GSS_S_FAILURE;
1627     }
1628 
1629     /* Determine MIC length */
1630     message_token->length = sizeof(*token) + cksum.checksum.length;
1631     message_token->value = malloc(message_token->length);
1632     if (message_token->value == NULL) {
1633 	*minor_status = ENOMEM;
1634 	free_Checksum(&cksum);
1635 	free(buf);
1636 	return GSS_S_FAILURE;
1637     }
1638 
1639     /* Token is { "header" | get_mic("header" | plaintext-data) } */
1640     memcpy(message_token->value, token, sizeof(*token));
1641     memcpy((u_char *)message_token->value + sizeof(*token),
1642 	   cksum.checksum.data, cksum.checksum.length);
1643 
1644     free_Checksum(&cksum);
1645     free(buf);
1646 
1647     *minor_status = 0;
1648     return GSS_S_COMPLETE;
1649 }
1650 
1651 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1652 				 const gsskrb5_ctx ctx,
1653 				 krb5_context context,
1654 				 const gss_buffer_t message_buffer,
1655 				 const gss_buffer_t token_buffer,
1656 				 gss_qop_t *qop_state)
1657 {
1658     gss_cfx_mic_token token;
1659     u_char token_flags;
1660     krb5_error_code ret;
1661     unsigned usage;
1662     OM_uint32 seq_number_lo, seq_number_hi;
1663     u_char *buf, *p;
1664     Checksum cksum;
1665 
1666     *minor_status = 0;
1667 
1668     if (token_buffer->length < sizeof(*token)) {
1669 	return GSS_S_DEFECTIVE_TOKEN;
1670     }
1671 
1672     p = token_buffer->value;
1673 
1674     token = (gss_cfx_mic_token)p;
1675 
1676     if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1677 	return GSS_S_DEFECTIVE_TOKEN;
1678     }
1679 
1680     /* Ignore unknown flags */
1681     token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1682 
1683     if (token_flags & CFXSentByAcceptor) {
1684 	if ((ctx->more_flags & LOCAL) == 0)
1685 	    return GSS_S_DEFECTIVE_TOKEN;
1686     }
1687     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1688 	if ((token_flags & CFXAcceptorSubkey) == 0)
1689 	    return GSS_S_DEFECTIVE_TOKEN;
1690     } else {
1691 	if (token_flags & CFXAcceptorSubkey)
1692 	    return GSS_S_DEFECTIVE_TOKEN;
1693     }
1694 
1695     if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1696 	return GSS_S_DEFECTIVE_TOKEN;
1697     }
1698 
1699     /*
1700      * Check sequence number
1701      */
1702     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1703     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1704     if (seq_number_hi) {
1705 	*minor_status = ERANGE;
1706 	return GSS_S_UNSEQ_TOKEN;
1707     }
1708 
1709     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1710     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1711     if (ret != 0) {
1712 	*minor_status = 0;
1713 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1714 	return ret;
1715     }
1716     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1717 
1718     /*
1719      * Verify checksum
1720      */
1721     ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1722 					&cksum.cksumtype);
1723     if (ret != 0) {
1724 	*minor_status = ret;
1725 	return GSS_S_FAILURE;
1726     }
1727 
1728     cksum.checksum.data = p + sizeof(*token);
1729     cksum.checksum.length = token_buffer->length - sizeof(*token);
1730 
1731     if (ctx->more_flags & LOCAL) {
1732 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1733     } else {
1734 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1735     }
1736 
1737     buf = malloc(message_buffer->length + sizeof(*token));
1738     if (buf == NULL) {
1739 	*minor_status = ENOMEM;
1740 	return GSS_S_FAILURE;
1741     }
1742     memcpy(buf, message_buffer->value, message_buffer->length);
1743     memcpy(buf + message_buffer->length, token, sizeof(*token));
1744 
1745     ret = krb5_verify_checksum(context, ctx->crypto,
1746 			       usage,
1747 			       buf,
1748 			       sizeof(*token) + message_buffer->length,
1749 			       &cksum);
1750     if (ret != 0) {
1751 	*minor_status = ret;
1752 	free(buf);
1753 	return GSS_S_BAD_MIC;
1754     }
1755 
1756     free(buf);
1757 
1758     if (qop_state != NULL) {
1759 	*qop_state = GSS_C_QOP_DEFAULT;
1760     }
1761 
1762     return GSS_S_COMPLETE;
1763 }
1764