1 /* $NetBSD: cfx.c,v 1.2 2017/01/28 21:31:46 christos Exp $ */ 2 3 /* 4 * Copyright (c) 2003, PADL Software Pty Ltd. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * 3. Neither the name of PADL Software nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 #include "gsskrb5_locl.h" 36 37 /* 38 * Implementation of RFC 4121 39 */ 40 41 #define CFXSentByAcceptor (1 << 0) 42 #define CFXSealed (1 << 1) 43 #define CFXAcceptorSubkey (1 << 2) 44 45 krb5_error_code 46 _gsskrb5cfx_wrap_length_cfx(krb5_context context, 47 krb5_crypto crypto, 48 int conf_req_flag, 49 int dce_style, 50 size_t input_length, 51 size_t *output_length, 52 size_t *cksumsize, 53 uint16_t *padlength) 54 { 55 krb5_error_code ret; 56 krb5_cksumtype type; 57 58 /* 16-byte header is always first */ 59 *output_length = sizeof(gss_cfx_wrap_token_desc); 60 *padlength = 0; 61 62 ret = krb5_crypto_get_checksum_type(context, crypto, &type); 63 if (ret) 64 return ret; 65 66 ret = krb5_checksumsize(context, type, cksumsize); 67 if (ret) 68 return ret; 69 70 if (conf_req_flag) { 71 size_t padsize; 72 73 /* Header is concatenated with data before encryption */ 74 input_length += sizeof(gss_cfx_wrap_token_desc); 75 76 if (dce_style) { 77 ret = krb5_crypto_getblocksize(context, crypto, &padsize); 78 } else { 79 ret = krb5_crypto_getpadsize(context, crypto, &padsize); 80 } 81 if (ret) { 82 return ret; 83 } 84 if (padsize > 1) { 85 /* XXX check this */ 86 *padlength = padsize - (input_length % padsize); 87 88 /* We add the pad ourselves (noted here for completeness only) */ 89 input_length += *padlength; 90 } 91 92 *output_length += krb5_get_wrapped_length(context, 93 crypto, input_length); 94 } else { 95 /* Checksum is concatenated with data */ 96 *output_length += input_length + *cksumsize; 97 } 98 99 assert(*output_length > input_length); 100 101 return 0; 102 } 103 104 OM_uint32 105 _gssapi_wrap_size_cfx(OM_uint32 *minor_status, 106 const gsskrb5_ctx ctx, 107 krb5_context context, 108 int conf_req_flag, 109 gss_qop_t qop_req, 110 OM_uint32 req_output_size, 111 OM_uint32 *max_input_size) 112 { 113 krb5_error_code ret; 114 115 *max_input_size = 0; 116 117 /* 16-byte header is always first */ 118 if (req_output_size < 16) 119 return 0; 120 req_output_size -= 16; 121 122 if (conf_req_flag) { 123 size_t wrapped_size, sz; 124 125 wrapped_size = req_output_size + 1; 126 do { 127 wrapped_size--; 128 sz = krb5_get_wrapped_length(context, 129 ctx->crypto, wrapped_size); 130 } while (wrapped_size && sz > req_output_size); 131 if (wrapped_size == 0) 132 return 0; 133 134 /* inner header */ 135 if (wrapped_size < 16) 136 return 0; 137 138 wrapped_size -= 16; 139 140 *max_input_size = wrapped_size; 141 } else { 142 krb5_cksumtype type; 143 size_t cksumsize; 144 145 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type); 146 if (ret) 147 return ret; 148 149 ret = krb5_checksumsize(context, type, &cksumsize); 150 if (ret) 151 return ret; 152 153 if (req_output_size < cksumsize) 154 return 0; 155 156 /* Checksum is concatenated with data */ 157 *max_input_size = req_output_size - cksumsize; 158 } 159 160 return 0; 161 } 162 163 /* 164 * Rotate "rrc" bytes to the front or back 165 */ 166 167 static krb5_error_code 168 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate) 169 { 170 u_char *tmp, buf[256]; 171 size_t left; 172 173 if (len == 0) 174 return 0; 175 176 rrc %= len; 177 178 if (rrc == 0) 179 return 0; 180 181 left = len - rrc; 182 183 if (rrc <= sizeof(buf)) { 184 tmp = buf; 185 } else { 186 tmp = malloc(rrc); 187 if (tmp == NULL) 188 return ENOMEM; 189 } 190 191 if (unrotate) { 192 memcpy(tmp, data, rrc); 193 memmove(data, (u_char *)data + rrc, left); 194 memcpy((u_char *)data + left, tmp, rrc); 195 } else { 196 memcpy(tmp, (u_char *)data + left, rrc); 197 memmove((u_char *)data + rrc, data, left); 198 memcpy(data, tmp, rrc); 199 } 200 201 if (rrc > sizeof(buf)) 202 free(tmp); 203 204 return 0; 205 } 206 207 gss_iov_buffer_desc * 208 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type) 209 { 210 int i; 211 212 for (i = 0; i < iov_count; i++) 213 if (type == GSS_IOV_BUFFER_TYPE(iov[i].type)) 214 return &iov[i]; 215 return NULL; 216 } 217 218 OM_uint32 219 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size) 220 { 221 if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) { 222 if (buffer->buffer.length == size) 223 return GSS_S_COMPLETE; 224 free(buffer->buffer.value); 225 } 226 227 buffer->buffer.value = malloc(size); 228 buffer->buffer.length = size; 229 if (buffer->buffer.value == NULL) { 230 *minor_status = ENOMEM; 231 return GSS_S_FAILURE; 232 } 233 buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED; 234 235 return GSS_S_COMPLETE; 236 } 237 238 239 OM_uint32 240 _gk_verify_buffers(OM_uint32 *minor_status, 241 const gsskrb5_ctx ctx, 242 const gss_iov_buffer_desc *header, 243 const gss_iov_buffer_desc *padding, 244 const gss_iov_buffer_desc *trailer) 245 { 246 if (header == NULL) { 247 *minor_status = EINVAL; 248 return GSS_S_FAILURE; 249 } 250 251 if (IS_DCE_STYLE(ctx)) { 252 /* 253 * In DCE style mode we reject having a padding or trailer buffer 254 */ 255 if (padding) { 256 *minor_status = EINVAL; 257 return GSS_S_FAILURE; 258 } 259 if (trailer) { 260 *minor_status = EINVAL; 261 return GSS_S_FAILURE; 262 } 263 } else { 264 /* 265 * In non-DCE style mode we require having a padding buffer 266 */ 267 if (padding == NULL) { 268 *minor_status = EINVAL; 269 return GSS_S_FAILURE; 270 } 271 } 272 273 *minor_status = 0; 274 return GSS_S_COMPLETE; 275 } 276 277 OM_uint32 278 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status, 279 gsskrb5_ctx ctx, 280 krb5_context context, 281 int conf_req_flag, 282 int *conf_state, 283 gss_iov_buffer_desc *iov, 284 int iov_count) 285 { 286 OM_uint32 major_status, junk; 287 gss_iov_buffer_desc *header, *trailer, *padding; 288 size_t gsshsize, k5hsize; 289 size_t gsstsize, k5tsize; 290 size_t rrc = 0, ec = 0; 291 int i; 292 gss_cfx_wrap_token token; 293 krb5_error_code ret; 294 int32_t seq_number; 295 unsigned usage; 296 krb5_crypto_iov *data = NULL; 297 298 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); 299 if (header == NULL) { 300 *minor_status = EINVAL; 301 return GSS_S_FAILURE; 302 } 303 304 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); 305 if (padding != NULL) { 306 padding->buffer.length = 0; 307 } 308 309 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); 310 311 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer); 312 if (major_status != GSS_S_COMPLETE) { 313 return major_status; 314 } 315 316 if (conf_req_flag) { 317 size_t k5psize = 0; 318 size_t k5pbase = 0; 319 size_t k5bsize = 0; 320 size_t size = 0; 321 322 for (i = 0; i < iov_count; i++) { 323 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) { 324 case GSS_IOV_BUFFER_TYPE_DATA: 325 size += iov[i].buffer.length; 326 break; 327 default: 328 break; 329 } 330 } 331 332 size += sizeof(gss_cfx_wrap_token_desc); 333 334 *minor_status = krb5_crypto_length(context, ctx->crypto, 335 KRB5_CRYPTO_TYPE_HEADER, 336 &k5hsize); 337 if (*minor_status) 338 return GSS_S_FAILURE; 339 340 *minor_status = krb5_crypto_length(context, ctx->crypto, 341 KRB5_CRYPTO_TYPE_TRAILER, 342 &k5tsize); 343 if (*minor_status) 344 return GSS_S_FAILURE; 345 346 *minor_status = krb5_crypto_length(context, ctx->crypto, 347 KRB5_CRYPTO_TYPE_PADDING, 348 &k5pbase); 349 if (*minor_status) 350 return GSS_S_FAILURE; 351 352 if (k5pbase > 1) { 353 k5psize = k5pbase - (size % k5pbase); 354 } else { 355 k5psize = 0; 356 } 357 358 if (k5psize == 0 && IS_DCE_STYLE(ctx)) { 359 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto, 360 &k5bsize); 361 if (*minor_status) 362 return GSS_S_FAILURE; 363 ec = k5bsize; 364 } else { 365 ec = k5psize; 366 } 367 368 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize; 369 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize; 370 } else { 371 if (IS_DCE_STYLE(ctx)) { 372 *minor_status = EINVAL; 373 return GSS_S_FAILURE; 374 } 375 376 k5hsize = 0; 377 *minor_status = krb5_crypto_length(context, ctx->crypto, 378 KRB5_CRYPTO_TYPE_CHECKSUM, 379 &k5tsize); 380 if (*minor_status) 381 return GSS_S_FAILURE; 382 383 gsshsize = sizeof(gss_cfx_wrap_token_desc); 384 gsstsize = k5tsize; 385 } 386 387 /* 388 * 389 */ 390 391 if (trailer == NULL) { 392 rrc = gsstsize; 393 if (IS_DCE_STYLE(ctx)) 394 rrc -= ec; 395 gsshsize += gsstsize; 396 } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) { 397 major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize); 398 if (major_status) 399 goto failure; 400 } else if (trailer->buffer.length < gsstsize) { 401 *minor_status = KRB5_BAD_MSIZE; 402 major_status = GSS_S_FAILURE; 403 goto failure; 404 } else 405 trailer->buffer.length = gsstsize; 406 407 /* 408 * 409 */ 410 411 if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) { 412 major_status = _gk_allocate_buffer(minor_status, header, gsshsize); 413 if (major_status != GSS_S_COMPLETE) 414 goto failure; 415 } else if (header->buffer.length < gsshsize) { 416 *minor_status = KRB5_BAD_MSIZE; 417 major_status = GSS_S_FAILURE; 418 goto failure; 419 } else 420 header->buffer.length = gsshsize; 421 422 token = (gss_cfx_wrap_token)header->buffer.value; 423 424 token->TOK_ID[0] = 0x05; 425 token->TOK_ID[1] = 0x04; 426 token->Flags = 0; 427 token->Filler = 0xFF; 428 429 if ((ctx->more_flags & LOCAL) == 0) 430 token->Flags |= CFXSentByAcceptor; 431 432 if (ctx->more_flags & ACCEPTOR_SUBKEY) 433 token->Flags |= CFXAcceptorSubkey; 434 435 if (ctx->more_flags & LOCAL) 436 usage = KRB5_KU_USAGE_INITIATOR_SEAL; 437 else 438 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL; 439 440 if (conf_req_flag) { 441 /* 442 * In Wrap tokens with confidentiality, the EC field is 443 * used to encode the size (in bytes) of the random filler. 444 */ 445 token->Flags |= CFXSealed; 446 token->EC[0] = (ec >> 8) & 0xFF; 447 token->EC[1] = (ec >> 0) & 0xFF; 448 449 } else { 450 /* 451 * In Wrap tokens without confidentiality, the EC field is 452 * used to encode the size (in bytes) of the trailing 453 * checksum. 454 * 455 * This is not used in the checksum calcuation itself, 456 * because the checksum length could potentially vary 457 * depending on the data length. 458 */ 459 token->EC[0] = 0; 460 token->EC[1] = 0; 461 } 462 463 /* 464 * In Wrap tokens that provide for confidentiality, the RRC 465 * field in the header contains the hex value 00 00 before 466 * encryption. 467 * 468 * In Wrap tokens that do not provide for confidentiality, 469 * both the EC and RRC fields in the appended checksum 470 * contain the hex value 00 00 for the purpose of calculating 471 * the checksum. 472 */ 473 token->RRC[0] = 0; 474 token->RRC[1] = 0; 475 476 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 477 krb5_auth_con_getlocalseqnumber(context, 478 ctx->auth_context, 479 &seq_number); 480 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]); 481 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]); 482 krb5_auth_con_setlocalseqnumber(context, 483 ctx->auth_context, 484 ++seq_number); 485 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 486 487 data = calloc(iov_count + 3, sizeof(data[0])); 488 if (data == NULL) { 489 *minor_status = ENOMEM; 490 major_status = GSS_S_FAILURE; 491 goto failure; 492 } 493 494 if (conf_req_flag) { 495 /* 496 plain packet: 497 498 {"header" | encrypt(plaintext-data | ec-padding | E"header")} 499 500 Expanded, this is with with RRC = 0: 501 502 {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer } 503 504 In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer) 505 506 {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data } 507 */ 508 509 i = 0; 510 data[i].flags = KRB5_CRYPTO_TYPE_HEADER; 511 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize; 512 data[i].data.length = k5hsize; 513 514 for (i = 1; i < iov_count + 1; i++) { 515 switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) { 516 case GSS_IOV_BUFFER_TYPE_DATA: 517 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 518 break; 519 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: 520 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY; 521 break; 522 default: 523 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY; 524 break; 525 } 526 data[i].data.length = iov[i - 1].buffer.length; 527 data[i].data.data = iov[i - 1].buffer.value; 528 } 529 530 /* 531 * Any necessary padding is added here to ensure that the 532 * encrypted token header is always at the end of the 533 * ciphertext. 534 */ 535 536 /* encrypted CFX header in trailer (or after the header if in 537 DCE mode). Copy in header into E"header" 538 */ 539 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 540 if (trailer) 541 data[i].data.data = trailer->buffer.value; 542 else 543 data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token); 544 545 data[i].data.length = ec + sizeof(*token); 546 memset(data[i].data.data, 0xFF, ec); 547 memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token)); 548 i++; 549 550 /* Kerberos trailer comes after the gss trailer */ 551 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER; 552 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token); 553 data[i].data.length = k5tsize; 554 i++; 555 556 ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL); 557 if (ret != 0) { 558 *minor_status = ret; 559 major_status = GSS_S_FAILURE; 560 goto failure; 561 } 562 563 if (rrc) { 564 token->RRC[0] = (rrc >> 8) & 0xFF; 565 token->RRC[1] = (rrc >> 0) & 0xFF; 566 } 567 568 } else { 569 /* 570 plain packet: 571 572 {data | "header" | gss-trailer (krb5 checksum) 573 574 don't do RRC != 0 575 576 */ 577 578 for (i = 0; i < iov_count; i++) { 579 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) { 580 case GSS_IOV_BUFFER_TYPE_DATA: 581 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 582 break; 583 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: 584 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY; 585 break; 586 default: 587 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY; 588 break; 589 } 590 data[i].data.length = iov[i].buffer.length; 591 data[i].data.data = iov[i].buffer.value; 592 } 593 594 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 595 data[i].data.data = header->buffer.value; 596 data[i].data.length = sizeof(gss_cfx_wrap_token_desc); 597 i++; 598 599 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM; 600 if (trailer) { 601 data[i].data.data = trailer->buffer.value; 602 } else { 603 data[i].data.data = (uint8_t *)header->buffer.value + 604 sizeof(gss_cfx_wrap_token_desc); 605 } 606 data[i].data.length = k5tsize; 607 i++; 608 609 ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL); 610 if (ret) { 611 *minor_status = ret; 612 major_status = GSS_S_FAILURE; 613 goto failure; 614 } 615 616 if (rrc) { 617 token->RRC[0] = (rrc >> 8) & 0xFF; 618 token->RRC[1] = (rrc >> 0) & 0xFF; 619 } 620 621 token->EC[0] = (k5tsize >> 8) & 0xFF; 622 token->EC[1] = (k5tsize >> 0) & 0xFF; 623 } 624 625 if (conf_state != NULL) 626 *conf_state = conf_req_flag; 627 628 free(data); 629 630 *minor_status = 0; 631 return GSS_S_COMPLETE; 632 633 failure: 634 if (data) 635 free(data); 636 637 gss_release_iov_buffer(&junk, iov, iov_count); 638 639 return major_status; 640 } 641 642 /* This is slowpath */ 643 static OM_uint32 644 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count) 645 { 646 uint8_t *p, *q; 647 size_t len = 0, skip; 648 int i; 649 650 for (i = 0; i < iov_count; i++) 651 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA || 652 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING || 653 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER) 654 len += iov[i].buffer.length; 655 656 p = malloc(len); 657 if (p == NULL) { 658 *minor_status = ENOMEM; 659 return GSS_S_FAILURE; 660 } 661 q = p; 662 663 /* copy up */ 664 665 for (i = 0; i < iov_count; i++) { 666 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA || 667 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING || 668 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER) 669 { 670 memcpy(q, iov[i].buffer.value, iov[i].buffer.length); 671 q += iov[i].buffer.length; 672 } 673 } 674 assert((size_t)(q - p) == len); 675 676 /* unrotate first part */ 677 q = p + rrc; 678 skip = rrc; 679 for (i = 0; i < iov_count; i++) { 680 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA || 681 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING || 682 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER) 683 { 684 if (iov[i].buffer.length <= skip) { 685 skip -= iov[i].buffer.length; 686 } else { 687 /* copy back to original buffer */ 688 memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip); 689 q += iov[i].buffer.length - skip; 690 skip = 0; 691 } 692 } 693 } 694 /* copy trailer */ 695 q = p; 696 skip = rrc; 697 for (i = 0; i < iov_count; i++) { 698 if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA || 699 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING || 700 GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER) 701 { 702 memcpy(iov[i].buffer.value, q, min(iov[i].buffer.length, skip)); 703 if (iov[i].buffer.length > skip) 704 break; 705 skip -= iov[i].buffer.length; 706 q += iov[i].buffer.length; 707 } 708 } 709 free(p); 710 return GSS_S_COMPLETE; 711 } 712 713 714 OM_uint32 715 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status, 716 gsskrb5_ctx ctx, 717 krb5_context context, 718 int *conf_state, 719 gss_qop_t *qop_state, 720 gss_iov_buffer_desc *iov, 721 int iov_count) 722 { 723 OM_uint32 seq_number_lo, seq_number_hi, major_status, junk; 724 gss_iov_buffer_desc *header, *trailer, *padding; 725 gss_cfx_wrap_token token, ttoken; 726 u_char token_flags; 727 krb5_error_code ret; 728 unsigned usage; 729 uint16_t ec, rrc; 730 krb5_crypto_iov *data = NULL; 731 int i, j; 732 733 *minor_status = 0; 734 735 header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER); 736 if (header == NULL) { 737 *minor_status = EINVAL; 738 return GSS_S_FAILURE; 739 } 740 741 if (header->buffer.length < sizeof(*token)) /* we check exact below */ 742 return GSS_S_DEFECTIVE_TOKEN; 743 744 padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING); 745 if (padding != NULL && padding->buffer.length != 0) { 746 *minor_status = EINVAL; 747 return GSS_S_FAILURE; 748 } 749 750 trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER); 751 752 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer); 753 if (major_status != GSS_S_COMPLETE) { 754 return major_status; 755 } 756 757 token = (gss_cfx_wrap_token)header->buffer.value; 758 759 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) 760 return GSS_S_DEFECTIVE_TOKEN; 761 762 /* Ignore unknown flags */ 763 token_flags = token->Flags & 764 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey); 765 766 if (token_flags & CFXSentByAcceptor) { 767 if ((ctx->more_flags & LOCAL) == 0) 768 return GSS_S_DEFECTIVE_TOKEN; 769 } 770 771 if (ctx->more_flags & ACCEPTOR_SUBKEY) { 772 if ((token_flags & CFXAcceptorSubkey) == 0) 773 return GSS_S_DEFECTIVE_TOKEN; 774 } else { 775 if (token_flags & CFXAcceptorSubkey) 776 return GSS_S_DEFECTIVE_TOKEN; 777 } 778 779 if (token->Filler != 0xFF) 780 return GSS_S_DEFECTIVE_TOKEN; 781 782 if (conf_state != NULL) 783 *conf_state = (token_flags & CFXSealed) ? 1 : 0; 784 785 ec = (token->EC[0] << 8) | token->EC[1]; 786 rrc = (token->RRC[0] << 8) | token->RRC[1]; 787 788 /* 789 * Check sequence number 790 */ 791 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi); 792 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo); 793 if (seq_number_hi) { 794 /* no support for 64-bit sequence numbers */ 795 *minor_status = ERANGE; 796 return GSS_S_UNSEQ_TOKEN; 797 } 798 799 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 800 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo); 801 if (ret != 0) { 802 *minor_status = 0; 803 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 804 return ret; 805 } 806 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 807 808 /* 809 * Decrypt and/or verify checksum 810 */ 811 812 if (ctx->more_flags & LOCAL) { 813 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL; 814 } else { 815 usage = KRB5_KU_USAGE_INITIATOR_SEAL; 816 } 817 818 data = calloc(iov_count + 3, sizeof(data[0])); 819 if (data == NULL) { 820 *minor_status = ENOMEM; 821 major_status = GSS_S_FAILURE; 822 goto failure; 823 } 824 825 if (token_flags & CFXSealed) { 826 size_t k5tsize, k5hsize; 827 828 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize); 829 krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize); 830 831 /* Rotate by RRC; bogus to do this in-place XXX */ 832 /* Check RRC */ 833 834 if (trailer == NULL) { 835 size_t gsstsize = k5tsize + sizeof(*token); 836 size_t gsshsize = k5hsize + sizeof(*token); 837 838 if (rrc != gsstsize) { 839 major_status = GSS_S_DEFECTIVE_TOKEN; 840 goto failure; 841 } 842 843 if (IS_DCE_STYLE(ctx)) 844 gsstsize += ec; 845 846 gsshsize += gsstsize; 847 848 if (header->buffer.length != gsshsize) { 849 major_status = GSS_S_DEFECTIVE_TOKEN; 850 goto failure; 851 } 852 } else if (trailer->buffer.length != sizeof(*token) + k5tsize) { 853 major_status = GSS_S_DEFECTIVE_TOKEN; 854 goto failure; 855 } else if (header->buffer.length != sizeof(*token) + k5hsize) { 856 major_status = GSS_S_DEFECTIVE_TOKEN; 857 goto failure; 858 } else if (rrc != 0) { 859 /* go though slowpath */ 860 major_status = unrotate_iov(minor_status, rrc, iov, iov_count); 861 if (major_status) 862 goto failure; 863 } 864 865 i = 0; 866 data[i].flags = KRB5_CRYPTO_TYPE_HEADER; 867 data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize; 868 data[i].data.length = k5hsize; 869 i++; 870 871 for (j = 0; j < iov_count; i++, j++) { 872 switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) { 873 case GSS_IOV_BUFFER_TYPE_DATA: 874 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 875 break; 876 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: 877 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY; 878 break; 879 default: 880 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY; 881 break; 882 } 883 data[i].data.length = iov[j].buffer.length; 884 data[i].data.data = iov[j].buffer.value; 885 } 886 887 /* encrypted CFX header in trailer (or after the header if in 888 DCE mode). Copy in header into E"header" 889 */ 890 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 891 if (trailer) { 892 data[i].data.data = trailer->buffer.value; 893 } else { 894 data[i].data.data = ((uint8_t *)header->buffer.value) + 895 header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token); 896 } 897 898 data[i].data.length = ec + sizeof(*token); 899 ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec); 900 i++; 901 902 /* Kerberos trailer comes after the gss trailer */ 903 data[i].flags = KRB5_CRYPTO_TYPE_TRAILER; 904 data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token); 905 data[i].data.length = k5tsize; 906 i++; 907 908 ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL); 909 if (ret != 0) { 910 *minor_status = ret; 911 major_status = GSS_S_FAILURE; 912 goto failure; 913 } 914 915 ttoken->RRC[0] = token->RRC[0]; 916 ttoken->RRC[1] = token->RRC[1]; 917 918 /* Check the integrity of the header */ 919 if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) { 920 major_status = GSS_S_BAD_MIC; 921 goto failure; 922 } 923 } else { 924 size_t gsstsize = ec; 925 size_t gsshsize = sizeof(*token); 926 927 if (trailer == NULL) { 928 /* Check RRC */ 929 if (rrc != gsstsize) { 930 *minor_status = EINVAL; 931 major_status = GSS_S_FAILURE; 932 goto failure; 933 } 934 935 gsshsize += gsstsize; 936 } else if (trailer->buffer.length != gsstsize) { 937 major_status = GSS_S_DEFECTIVE_TOKEN; 938 goto failure; 939 } else if (rrc != 0) { 940 /* Check RRC */ 941 *minor_status = EINVAL; 942 major_status = GSS_S_FAILURE; 943 goto failure; 944 } 945 946 if (header->buffer.length != gsshsize) { 947 major_status = GSS_S_DEFECTIVE_TOKEN; 948 goto failure; 949 } 950 951 for (i = 0; i < iov_count; i++) { 952 switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) { 953 case GSS_IOV_BUFFER_TYPE_DATA: 954 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 955 break; 956 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: 957 data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY; 958 break; 959 default: 960 data[i].flags = KRB5_CRYPTO_TYPE_EMPTY; 961 break; 962 } 963 data[i].data.length = iov[i].buffer.length; 964 data[i].data.data = iov[i].buffer.value; 965 } 966 967 data[i].flags = KRB5_CRYPTO_TYPE_DATA; 968 data[i].data.data = header->buffer.value; 969 data[i].data.length = sizeof(*token); 970 i++; 971 972 data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM; 973 if (trailer) { 974 data[i].data.data = trailer->buffer.value; 975 } else { 976 data[i].data.data = (uint8_t *)header->buffer.value + 977 sizeof(*token); 978 } 979 data[i].data.length = ec; 980 i++; 981 982 token = (gss_cfx_wrap_token)header->buffer.value; 983 token->EC[0] = 0; 984 token->EC[1] = 0; 985 token->RRC[0] = 0; 986 token->RRC[1] = 0; 987 988 ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL); 989 if (ret) { 990 *minor_status = ret; 991 major_status = GSS_S_FAILURE; 992 goto failure; 993 } 994 } 995 996 if (qop_state != NULL) { 997 *qop_state = GSS_C_QOP_DEFAULT; 998 } 999 1000 free(data); 1001 1002 *minor_status = 0; 1003 return GSS_S_COMPLETE; 1004 1005 failure: 1006 if (data) 1007 free(data); 1008 1009 gss_release_iov_buffer(&junk, iov, iov_count); 1010 1011 return major_status; 1012 } 1013 1014 OM_uint32 1015 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status, 1016 gsskrb5_ctx ctx, 1017 krb5_context context, 1018 int conf_req_flag, 1019 gss_qop_t qop_req, 1020 int *conf_state, 1021 gss_iov_buffer_desc *iov, 1022 int iov_count) 1023 { 1024 OM_uint32 major_status; 1025 size_t size; 1026 int i; 1027 gss_iov_buffer_desc *header = NULL; 1028 gss_iov_buffer_desc *padding = NULL; 1029 gss_iov_buffer_desc *trailer = NULL; 1030 size_t gsshsize = 0; 1031 size_t gsstsize = 0; 1032 size_t k5hsize = 0; 1033 size_t k5tsize = 0; 1034 1035 GSSAPI_KRB5_INIT (&context); 1036 *minor_status = 0; 1037 1038 for (size = 0, i = 0; i < iov_count; i++) { 1039 switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) { 1040 case GSS_IOV_BUFFER_TYPE_EMPTY: 1041 break; 1042 case GSS_IOV_BUFFER_TYPE_DATA: 1043 size += iov[i].buffer.length; 1044 break; 1045 case GSS_IOV_BUFFER_TYPE_HEADER: 1046 if (header != NULL) { 1047 *minor_status = 0; 1048 return GSS_S_FAILURE; 1049 } 1050 header = &iov[i]; 1051 break; 1052 case GSS_IOV_BUFFER_TYPE_TRAILER: 1053 if (trailer != NULL) { 1054 *minor_status = 0; 1055 return GSS_S_FAILURE; 1056 } 1057 trailer = &iov[i]; 1058 break; 1059 case GSS_IOV_BUFFER_TYPE_PADDING: 1060 if (padding != NULL) { 1061 *minor_status = 0; 1062 return GSS_S_FAILURE; 1063 } 1064 padding = &iov[i]; 1065 break; 1066 case GSS_IOV_BUFFER_TYPE_SIGN_ONLY: 1067 break; 1068 default: 1069 *minor_status = EINVAL; 1070 return GSS_S_FAILURE; 1071 } 1072 } 1073 1074 major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer); 1075 if (major_status != GSS_S_COMPLETE) { 1076 return major_status; 1077 } 1078 1079 if (conf_req_flag) { 1080 size_t k5psize = 0; 1081 size_t k5pbase = 0; 1082 size_t k5bsize = 0; 1083 size_t ec = 0; 1084 1085 size += sizeof(gss_cfx_wrap_token_desc); 1086 1087 *minor_status = krb5_crypto_length(context, ctx->crypto, 1088 KRB5_CRYPTO_TYPE_HEADER, 1089 &k5hsize); 1090 if (*minor_status) 1091 return GSS_S_FAILURE; 1092 1093 *minor_status = krb5_crypto_length(context, ctx->crypto, 1094 KRB5_CRYPTO_TYPE_TRAILER, 1095 &k5tsize); 1096 if (*minor_status) 1097 return GSS_S_FAILURE; 1098 1099 *minor_status = krb5_crypto_length(context, ctx->crypto, 1100 KRB5_CRYPTO_TYPE_PADDING, 1101 &k5pbase); 1102 if (*minor_status) 1103 return GSS_S_FAILURE; 1104 1105 if (k5pbase > 1) { 1106 k5psize = k5pbase - (size % k5pbase); 1107 } else { 1108 k5psize = 0; 1109 } 1110 1111 if (k5psize == 0 && IS_DCE_STYLE(ctx)) { 1112 *minor_status = krb5_crypto_getblocksize(context, ctx->crypto, 1113 &k5bsize); 1114 if (*minor_status) 1115 return GSS_S_FAILURE; 1116 1117 ec = k5bsize; 1118 } else { 1119 ec = k5psize; 1120 } 1121 1122 gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize; 1123 gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize; 1124 } else { 1125 *minor_status = krb5_crypto_length(context, ctx->crypto, 1126 KRB5_CRYPTO_TYPE_CHECKSUM, 1127 &k5tsize); 1128 if (*minor_status) 1129 return GSS_S_FAILURE; 1130 1131 gsshsize = sizeof(gss_cfx_wrap_token_desc); 1132 gsstsize = k5tsize; 1133 } 1134 1135 if (trailer != NULL) { 1136 trailer->buffer.length = gsstsize; 1137 } else { 1138 gsshsize += gsstsize; 1139 } 1140 1141 header->buffer.length = gsshsize; 1142 1143 if (padding) { 1144 /* padding is done via EC and is contained in the header or trailer */ 1145 padding->buffer.length = 0; 1146 } 1147 1148 if (conf_state) { 1149 *conf_state = conf_req_flag; 1150 } 1151 1152 return GSS_S_COMPLETE; 1153 } 1154 1155 1156 1157 1158 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status, 1159 const gsskrb5_ctx ctx, 1160 krb5_context context, 1161 int conf_req_flag, 1162 const gss_buffer_t input_message_buffer, 1163 int *conf_state, 1164 gss_buffer_t output_message_buffer) 1165 { 1166 gss_cfx_wrap_token token; 1167 krb5_error_code ret; 1168 unsigned usage; 1169 krb5_data cipher; 1170 size_t wrapped_len, cksumsize; 1171 uint16_t padlength, rrc = 0; 1172 int32_t seq_number; 1173 u_char *p; 1174 1175 ret = _gsskrb5cfx_wrap_length_cfx(context, 1176 ctx->crypto, conf_req_flag, 1177 IS_DCE_STYLE(ctx), 1178 input_message_buffer->length, 1179 &wrapped_len, &cksumsize, &padlength); 1180 if (ret != 0) { 1181 *minor_status = ret; 1182 return GSS_S_FAILURE; 1183 } 1184 1185 /* Always rotate encrypted token (if any) and checksum to header */ 1186 rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize; 1187 1188 output_message_buffer->length = wrapped_len; 1189 output_message_buffer->value = malloc(output_message_buffer->length); 1190 if (output_message_buffer->value == NULL) { 1191 *minor_status = ENOMEM; 1192 return GSS_S_FAILURE; 1193 } 1194 1195 p = output_message_buffer->value; 1196 token = (gss_cfx_wrap_token)p; 1197 token->TOK_ID[0] = 0x05; 1198 token->TOK_ID[1] = 0x04; 1199 token->Flags = 0; 1200 token->Filler = 0xFF; 1201 if ((ctx->more_flags & LOCAL) == 0) 1202 token->Flags |= CFXSentByAcceptor; 1203 if (ctx->more_flags & ACCEPTOR_SUBKEY) 1204 token->Flags |= CFXAcceptorSubkey; 1205 if (conf_req_flag) { 1206 /* 1207 * In Wrap tokens with confidentiality, the EC field is 1208 * used to encode the size (in bytes) of the random filler. 1209 */ 1210 token->Flags |= CFXSealed; 1211 token->EC[0] = (padlength >> 8) & 0xFF; 1212 token->EC[1] = (padlength >> 0) & 0xFF; 1213 } else { 1214 /* 1215 * In Wrap tokens without confidentiality, the EC field is 1216 * used to encode the size (in bytes) of the trailing 1217 * checksum. 1218 * 1219 * This is not used in the checksum calcuation itself, 1220 * because the checksum length could potentially vary 1221 * depending on the data length. 1222 */ 1223 token->EC[0] = 0; 1224 token->EC[1] = 0; 1225 } 1226 1227 /* 1228 * In Wrap tokens that provide for confidentiality, the RRC 1229 * field in the header contains the hex value 00 00 before 1230 * encryption. 1231 * 1232 * In Wrap tokens that do not provide for confidentiality, 1233 * both the EC and RRC fields in the appended checksum 1234 * contain the hex value 00 00 for the purpose of calculating 1235 * the checksum. 1236 */ 1237 token->RRC[0] = 0; 1238 token->RRC[1] = 0; 1239 1240 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 1241 krb5_auth_con_getlocalseqnumber(context, 1242 ctx->auth_context, 1243 &seq_number); 1244 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]); 1245 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]); 1246 krb5_auth_con_setlocalseqnumber(context, 1247 ctx->auth_context, 1248 ++seq_number); 1249 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1250 1251 /* 1252 * If confidentiality is requested, the token header is 1253 * appended to the plaintext before encryption; the resulting 1254 * token is {"header" | encrypt(plaintext | pad | "header")}. 1255 * 1256 * If no confidentiality is requested, the checksum is 1257 * calculated over the plaintext concatenated with the 1258 * token header. 1259 */ 1260 if (ctx->more_flags & LOCAL) { 1261 usage = KRB5_KU_USAGE_INITIATOR_SEAL; 1262 } else { 1263 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL; 1264 } 1265 1266 if (conf_req_flag) { 1267 /* 1268 * Any necessary padding is added here to ensure that the 1269 * encrypted token header is always at the end of the 1270 * ciphertext. 1271 * 1272 * The specification does not require that the padding 1273 * bytes are initialized. 1274 */ 1275 p += sizeof(*token); 1276 memcpy(p, input_message_buffer->value, input_message_buffer->length); 1277 memset(p + input_message_buffer->length, 0xFF, padlength); 1278 memcpy(p + input_message_buffer->length + padlength, 1279 token, sizeof(*token)); 1280 1281 ret = krb5_encrypt(context, ctx->crypto, 1282 usage, p, 1283 input_message_buffer->length + padlength + 1284 sizeof(*token), 1285 &cipher); 1286 if (ret != 0) { 1287 *minor_status = ret; 1288 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1289 return GSS_S_FAILURE; 1290 } 1291 assert(sizeof(*token) + cipher.length == wrapped_len); 1292 token->RRC[0] = (rrc >> 8) & 0xFF; 1293 token->RRC[1] = (rrc >> 0) & 0xFF; 1294 1295 /* 1296 * this is really ugly, but needed against windows 1297 * for DCERPC, as windows rotates by EC+RRC. 1298 */ 1299 if (IS_DCE_STYLE(ctx)) { 1300 ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE); 1301 } else { 1302 ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE); 1303 } 1304 if (ret != 0) { 1305 *minor_status = ret; 1306 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1307 return GSS_S_FAILURE; 1308 } 1309 memcpy(p, cipher.data, cipher.length); 1310 krb5_data_free(&cipher); 1311 } else { 1312 char *buf; 1313 Checksum cksum; 1314 1315 buf = malloc(input_message_buffer->length + sizeof(*token)); 1316 if (buf == NULL) { 1317 *minor_status = ENOMEM; 1318 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1319 return GSS_S_FAILURE; 1320 } 1321 memcpy(buf, input_message_buffer->value, input_message_buffer->length); 1322 memcpy(buf + input_message_buffer->length, token, sizeof(*token)); 1323 1324 ret = krb5_create_checksum(context, ctx->crypto, 1325 usage, 0, buf, 1326 input_message_buffer->length + 1327 sizeof(*token), 1328 &cksum); 1329 if (ret != 0) { 1330 *minor_status = ret; 1331 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1332 free(buf); 1333 return GSS_S_FAILURE; 1334 } 1335 1336 free(buf); 1337 1338 assert(cksum.checksum.length == cksumsize); 1339 token->EC[0] = (cksum.checksum.length >> 8) & 0xFF; 1340 token->EC[1] = (cksum.checksum.length >> 0) & 0xFF; 1341 token->RRC[0] = (rrc >> 8) & 0xFF; 1342 token->RRC[1] = (rrc >> 0) & 0xFF; 1343 1344 p += sizeof(*token); 1345 memcpy(p, input_message_buffer->value, input_message_buffer->length); 1346 memcpy(p + input_message_buffer->length, 1347 cksum.checksum.data, cksum.checksum.length); 1348 1349 ret = rrc_rotate(p, 1350 input_message_buffer->length + cksum.checksum.length, rrc, FALSE); 1351 if (ret != 0) { 1352 *minor_status = ret; 1353 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1354 free_Checksum(&cksum); 1355 return GSS_S_FAILURE; 1356 } 1357 free_Checksum(&cksum); 1358 } 1359 1360 if (conf_state != NULL) { 1361 *conf_state = conf_req_flag; 1362 } 1363 1364 *minor_status = 0; 1365 return GSS_S_COMPLETE; 1366 } 1367 1368 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status, 1369 const gsskrb5_ctx ctx, 1370 krb5_context context, 1371 const gss_buffer_t input_message_buffer, 1372 gss_buffer_t output_message_buffer, 1373 int *conf_state, 1374 gss_qop_t *qop_state) 1375 { 1376 gss_cfx_wrap_token token; 1377 u_char token_flags; 1378 krb5_error_code ret; 1379 unsigned usage; 1380 krb5_data data; 1381 uint16_t ec, rrc; 1382 OM_uint32 seq_number_lo, seq_number_hi; 1383 size_t len; 1384 u_char *p; 1385 1386 *minor_status = 0; 1387 1388 if (input_message_buffer->length < sizeof(*token)) { 1389 return GSS_S_DEFECTIVE_TOKEN; 1390 } 1391 1392 p = input_message_buffer->value; 1393 1394 token = (gss_cfx_wrap_token)p; 1395 1396 if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) { 1397 return GSS_S_DEFECTIVE_TOKEN; 1398 } 1399 1400 /* Ignore unknown flags */ 1401 token_flags = token->Flags & 1402 (CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey); 1403 1404 if (token_flags & CFXSentByAcceptor) { 1405 if ((ctx->more_flags & LOCAL) == 0) 1406 return GSS_S_DEFECTIVE_TOKEN; 1407 } 1408 1409 if (ctx->more_flags & ACCEPTOR_SUBKEY) { 1410 if ((token_flags & CFXAcceptorSubkey) == 0) 1411 return GSS_S_DEFECTIVE_TOKEN; 1412 } else { 1413 if (token_flags & CFXAcceptorSubkey) 1414 return GSS_S_DEFECTIVE_TOKEN; 1415 } 1416 1417 if (token->Filler != 0xFF) { 1418 return GSS_S_DEFECTIVE_TOKEN; 1419 } 1420 1421 if (conf_state != NULL) { 1422 *conf_state = (token_flags & CFXSealed) ? 1 : 0; 1423 } 1424 1425 ec = (token->EC[0] << 8) | token->EC[1]; 1426 rrc = (token->RRC[0] << 8) | token->RRC[1]; 1427 1428 /* 1429 * Check sequence number 1430 */ 1431 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi); 1432 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo); 1433 if (seq_number_hi) { 1434 /* no support for 64-bit sequence numbers */ 1435 *minor_status = ERANGE; 1436 return GSS_S_UNSEQ_TOKEN; 1437 } 1438 1439 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 1440 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo); 1441 if (ret != 0) { 1442 *minor_status = 0; 1443 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1444 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1445 return ret; 1446 } 1447 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1448 1449 /* 1450 * Decrypt and/or verify checksum 1451 */ 1452 1453 if (ctx->more_flags & LOCAL) { 1454 usage = KRB5_KU_USAGE_ACCEPTOR_SEAL; 1455 } else { 1456 usage = KRB5_KU_USAGE_INITIATOR_SEAL; 1457 } 1458 1459 p += sizeof(*token); 1460 len = input_message_buffer->length; 1461 len -= (p - (u_char *)input_message_buffer->value); 1462 1463 if (token_flags & CFXSealed) { 1464 /* 1465 * this is really ugly, but needed against windows 1466 * for DCERPC, as windows rotates by EC+RRC. 1467 */ 1468 if (IS_DCE_STYLE(ctx)) { 1469 *minor_status = rrc_rotate(p, len, rrc+ec, TRUE); 1470 } else { 1471 *minor_status = rrc_rotate(p, len, rrc, TRUE); 1472 } 1473 if (*minor_status != 0) { 1474 return GSS_S_FAILURE; 1475 } 1476 1477 ret = krb5_decrypt(context, ctx->crypto, usage, 1478 p, len, &data); 1479 if (ret != 0) { 1480 *minor_status = ret; 1481 return GSS_S_BAD_MIC; 1482 } 1483 1484 /* Check that there is room for the pad and token header */ 1485 if (data.length < ec + sizeof(*token)) { 1486 krb5_data_free(&data); 1487 return GSS_S_DEFECTIVE_TOKEN; 1488 } 1489 p = data.data; 1490 p += data.length - sizeof(*token); 1491 1492 /* RRC is unprotected; don't modify input buffer */ 1493 ((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0]; 1494 ((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1]; 1495 1496 /* Check the integrity of the header */ 1497 if (ct_memcmp(p, token, sizeof(*token)) != 0) { 1498 krb5_data_free(&data); 1499 return GSS_S_BAD_MIC; 1500 } 1501 1502 output_message_buffer->value = data.data; 1503 output_message_buffer->length = data.length - ec - sizeof(*token); 1504 } else { 1505 Checksum cksum; 1506 1507 /* Rotate by RRC; bogus to do this in-place XXX */ 1508 *minor_status = rrc_rotate(p, len, rrc, TRUE); 1509 if (*minor_status != 0) { 1510 return GSS_S_FAILURE; 1511 } 1512 1513 /* Determine checksum type */ 1514 ret = krb5_crypto_get_checksum_type(context, 1515 ctx->crypto, 1516 &cksum.cksumtype); 1517 if (ret != 0) { 1518 *minor_status = ret; 1519 return GSS_S_FAILURE; 1520 } 1521 1522 cksum.checksum.length = ec; 1523 1524 /* Check we have at least as much data as the checksum */ 1525 if (len < cksum.checksum.length) { 1526 *minor_status = ERANGE; 1527 return GSS_S_BAD_MIC; 1528 } 1529 1530 /* Length now is of the plaintext only, no checksum */ 1531 len -= cksum.checksum.length; 1532 cksum.checksum.data = p + len; 1533 1534 output_message_buffer->length = len; /* for later */ 1535 output_message_buffer->value = malloc(len + sizeof(*token)); 1536 if (output_message_buffer->value == NULL) { 1537 *minor_status = ENOMEM; 1538 return GSS_S_FAILURE; 1539 } 1540 1541 /* Checksum is over (plaintext-data | "header") */ 1542 memcpy(output_message_buffer->value, p, len); 1543 memcpy((u_char *)output_message_buffer->value + len, 1544 token, sizeof(*token)); 1545 1546 /* EC is not included in checksum calculation */ 1547 token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value + 1548 len); 1549 token->EC[0] = 0; 1550 token->EC[1] = 0; 1551 token->RRC[0] = 0; 1552 token->RRC[1] = 0; 1553 1554 ret = krb5_verify_checksum(context, ctx->crypto, 1555 usage, 1556 output_message_buffer->value, 1557 len + sizeof(*token), 1558 &cksum); 1559 if (ret != 0) { 1560 *minor_status = ret; 1561 _gsskrb5_release_buffer(minor_status, output_message_buffer); 1562 return GSS_S_BAD_MIC; 1563 } 1564 } 1565 1566 if (qop_state != NULL) { 1567 *qop_state = GSS_C_QOP_DEFAULT; 1568 } 1569 1570 *minor_status = 0; 1571 return GSS_S_COMPLETE; 1572 } 1573 1574 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status, 1575 const gsskrb5_ctx ctx, 1576 krb5_context context, 1577 gss_qop_t qop_req, 1578 const gss_buffer_t message_buffer, 1579 gss_buffer_t message_token) 1580 { 1581 gss_cfx_mic_token token; 1582 krb5_error_code ret; 1583 unsigned usage; 1584 Checksum cksum; 1585 u_char *buf; 1586 size_t len; 1587 int32_t seq_number; 1588 1589 len = message_buffer->length + sizeof(*token); 1590 buf = malloc(len); 1591 if (buf == NULL) { 1592 *minor_status = ENOMEM; 1593 return GSS_S_FAILURE; 1594 } 1595 1596 memcpy(buf, message_buffer->value, message_buffer->length); 1597 1598 token = (gss_cfx_mic_token)(buf + message_buffer->length); 1599 token->TOK_ID[0] = 0x04; 1600 token->TOK_ID[1] = 0x04; 1601 token->Flags = 0; 1602 if ((ctx->more_flags & LOCAL) == 0) 1603 token->Flags |= CFXSentByAcceptor; 1604 if (ctx->more_flags & ACCEPTOR_SUBKEY) 1605 token->Flags |= CFXAcceptorSubkey; 1606 memset(token->Filler, 0xFF, 5); 1607 1608 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 1609 krb5_auth_con_getlocalseqnumber(context, 1610 ctx->auth_context, 1611 &seq_number); 1612 _gsskrb5_encode_be_om_uint32(0, &token->SND_SEQ[0]); 1613 _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]); 1614 krb5_auth_con_setlocalseqnumber(context, 1615 ctx->auth_context, 1616 ++seq_number); 1617 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1618 1619 if (ctx->more_flags & LOCAL) { 1620 usage = KRB5_KU_USAGE_INITIATOR_SIGN; 1621 } else { 1622 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN; 1623 } 1624 1625 ret = krb5_create_checksum(context, ctx->crypto, 1626 usage, 0, buf, len, &cksum); 1627 if (ret != 0) { 1628 *minor_status = ret; 1629 free(buf); 1630 return GSS_S_FAILURE; 1631 } 1632 1633 /* Determine MIC length */ 1634 message_token->length = sizeof(*token) + cksum.checksum.length; 1635 message_token->value = malloc(message_token->length); 1636 if (message_token->value == NULL) { 1637 *minor_status = ENOMEM; 1638 free_Checksum(&cksum); 1639 free(buf); 1640 return GSS_S_FAILURE; 1641 } 1642 1643 /* Token is { "header" | get_mic("header" | plaintext-data) } */ 1644 memcpy(message_token->value, token, sizeof(*token)); 1645 memcpy((u_char *)message_token->value + sizeof(*token), 1646 cksum.checksum.data, cksum.checksum.length); 1647 1648 free_Checksum(&cksum); 1649 free(buf); 1650 1651 *minor_status = 0; 1652 return GSS_S_COMPLETE; 1653 } 1654 1655 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status, 1656 const gsskrb5_ctx ctx, 1657 krb5_context context, 1658 const gss_buffer_t message_buffer, 1659 const gss_buffer_t token_buffer, 1660 gss_qop_t *qop_state) 1661 { 1662 gss_cfx_mic_token token; 1663 u_char token_flags; 1664 krb5_error_code ret; 1665 unsigned usage; 1666 OM_uint32 seq_number_lo, seq_number_hi; 1667 u_char *buf, *p; 1668 Checksum cksum; 1669 1670 *minor_status = 0; 1671 1672 if (token_buffer->length < sizeof(*token)) { 1673 return GSS_S_DEFECTIVE_TOKEN; 1674 } 1675 1676 p = token_buffer->value; 1677 1678 token = (gss_cfx_mic_token)p; 1679 1680 if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) { 1681 return GSS_S_DEFECTIVE_TOKEN; 1682 } 1683 1684 /* Ignore unknown flags */ 1685 token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey); 1686 1687 if (token_flags & CFXSentByAcceptor) { 1688 if ((ctx->more_flags & LOCAL) == 0) 1689 return GSS_S_DEFECTIVE_TOKEN; 1690 } 1691 if (ctx->more_flags & ACCEPTOR_SUBKEY) { 1692 if ((token_flags & CFXAcceptorSubkey) == 0) 1693 return GSS_S_DEFECTIVE_TOKEN; 1694 } else { 1695 if (token_flags & CFXAcceptorSubkey) 1696 return GSS_S_DEFECTIVE_TOKEN; 1697 } 1698 1699 if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) { 1700 return GSS_S_DEFECTIVE_TOKEN; 1701 } 1702 1703 /* 1704 * Check sequence number 1705 */ 1706 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi); 1707 _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo); 1708 if (seq_number_hi) { 1709 *minor_status = ERANGE; 1710 return GSS_S_UNSEQ_TOKEN; 1711 } 1712 1713 HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex); 1714 ret = _gssapi_msg_order_check(ctx->order, seq_number_lo); 1715 if (ret != 0) { 1716 *minor_status = 0; 1717 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1718 return ret; 1719 } 1720 HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex); 1721 1722 /* 1723 * Verify checksum 1724 */ 1725 ret = krb5_crypto_get_checksum_type(context, ctx->crypto, 1726 &cksum.cksumtype); 1727 if (ret != 0) { 1728 *minor_status = ret; 1729 return GSS_S_FAILURE; 1730 } 1731 1732 cksum.checksum.data = p + sizeof(*token); 1733 cksum.checksum.length = token_buffer->length - sizeof(*token); 1734 1735 if (ctx->more_flags & LOCAL) { 1736 usage = KRB5_KU_USAGE_ACCEPTOR_SIGN; 1737 } else { 1738 usage = KRB5_KU_USAGE_INITIATOR_SIGN; 1739 } 1740 1741 buf = malloc(message_buffer->length + sizeof(*token)); 1742 if (buf == NULL) { 1743 *minor_status = ENOMEM; 1744 return GSS_S_FAILURE; 1745 } 1746 memcpy(buf, message_buffer->value, message_buffer->length); 1747 memcpy(buf + message_buffer->length, token, sizeof(*token)); 1748 1749 ret = krb5_verify_checksum(context, ctx->crypto, 1750 usage, 1751 buf, 1752 sizeof(*token) + message_buffer->length, 1753 &cksum); 1754 if (ret != 0) { 1755 *minor_status = ret; 1756 free(buf); 1757 return GSS_S_BAD_MIC; 1758 } 1759 1760 free(buf); 1761 1762 if (qop_state != NULL) { 1763 *qop_state = GSS_C_QOP_DEFAULT; 1764 } 1765 1766 return GSS_S_COMPLETE; 1767 } 1768