1 /*- 2 * Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD: src/usr.sbin/nscd/query.c,v 1.5 2008/10/12 00:44:27 delphij Exp $ 27 */ 28 29 #include <sys/types.h> 30 #include <sys/socket.h> 31 #include <sys/time.h> 32 #include <sys/event.h> 33 #include <assert.h> 34 #include <errno.h> 35 #include <nsswitch.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include "config.h" 40 #include "debug.h" 41 #include "query.h" 42 #include "log.h" 43 #include "mp_ws_query.h" 44 #include "mp_rs_query.h" 45 #include "singletons.h" 46 47 static const char negative_data[1] = { 0 }; 48 49 extern void get_time_func(struct timeval *); 50 51 static void clear_config_entry(struct configuration_entry *); 52 static void clear_config_entry_part(struct configuration_entry *, 53 const char *, size_t); 54 55 static int on_query_startup(struct query_state *); 56 static void on_query_destroy(struct query_state *); 57 58 static int on_read_request_read1(struct query_state *); 59 static int on_read_request_read2(struct query_state *); 60 static int on_read_request_process(struct query_state *); 61 static int on_read_response_write1(struct query_state *); 62 static int on_read_response_write2(struct query_state *); 63 64 static int on_rw_mapper(struct query_state *); 65 66 static int on_transform_request_read1(struct query_state *); 67 static int on_transform_request_read2(struct query_state *); 68 static int on_transform_request_process(struct query_state *); 69 static int on_transform_response_write1(struct query_state *); 70 71 static int on_write_request_read1(struct query_state *); 72 static int on_write_request_read2(struct query_state *); 73 static int on_negative_write_request_process(struct query_state *); 74 static int on_write_request_process(struct query_state *); 75 static int on_write_response_write1(struct query_state *); 76 77 /* 78 * Clears the specified configuration entry (clears the cache for positive and 79 * and negative entries) and also for all multipart entries. 80 */ 81 static void 82 clear_config_entry(struct configuration_entry *config_entry) 83 { 84 size_t i; 85 86 TRACE_IN(clear_config_entry); 87 configuration_lock_entry(config_entry, CELT_POSITIVE); 88 if (config_entry->positive_cache_entry != NULL) 89 transform_cache_entry( 90 config_entry->positive_cache_entry, 91 CTT_CLEAR); 92 configuration_unlock_entry(config_entry, CELT_POSITIVE); 93 94 configuration_lock_entry(config_entry, CELT_NEGATIVE); 95 if (config_entry->negative_cache_entry != NULL) 96 transform_cache_entry( 97 config_entry->negative_cache_entry, 98 CTT_CLEAR); 99 configuration_unlock_entry(config_entry, CELT_NEGATIVE); 100 101 configuration_lock_entry(config_entry, CELT_MULTIPART); 102 for (i = 0; i < config_entry->mp_cache_entries_size; ++i) 103 transform_cache_entry( 104 config_entry->mp_cache_entries[i], 105 CTT_CLEAR); 106 configuration_unlock_entry(config_entry, CELT_MULTIPART); 107 108 TRACE_OUT(clear_config_entry); 109 } 110 111 /* 112 * Clears the specified configuration entry by deleting only the elements, 113 * that are owned by the user with specified eid_str. 114 */ 115 static void 116 clear_config_entry_part(struct configuration_entry *config_entry, 117 const char *eid_str, size_t eid_str_length) 118 { 119 cache_entry *start, *finish, *mp_entry; 120 TRACE_IN(clear_config_entry_part); 121 configuration_lock_entry(config_entry, CELT_POSITIVE); 122 if (config_entry->positive_cache_entry != NULL) 123 transform_cache_entry_part( 124 config_entry->positive_cache_entry, 125 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT); 126 configuration_unlock_entry(config_entry, CELT_POSITIVE); 127 128 configuration_lock_entry(config_entry, CELT_NEGATIVE); 129 if (config_entry->negative_cache_entry != NULL) 130 transform_cache_entry_part( 131 config_entry->negative_cache_entry, 132 CTT_CLEAR, eid_str, eid_str_length, KPPT_LEFT); 133 configuration_unlock_entry(config_entry, CELT_NEGATIVE); 134 135 configuration_lock_entry(config_entry, CELT_MULTIPART); 136 if (configuration_entry_find_mp_cache_entries(config_entry, 137 eid_str, &start, &finish) == 0) { 138 for (mp_entry = start; mp_entry != finish; ++mp_entry) 139 transform_cache_entry(*mp_entry, CTT_CLEAR); 140 } 141 configuration_unlock_entry(config_entry, CELT_MULTIPART); 142 143 TRACE_OUT(clear_config_entry_part); 144 } 145 146 /* 147 * This function is assigned to the query_state structue on its creation. 148 * It's main purpose is to receive credentials from the client. 149 */ 150 static int 151 on_query_startup(struct query_state *qstate) 152 { 153 struct msghdr cred_hdr; 154 struct iovec iov; 155 struct cmsgcred *cred; 156 int elem_type; 157 158 struct { 159 struct cmsghdr hdr; 160 char cred[CMSG_SPACE(sizeof(struct cmsgcred))]; 161 } cmsg; 162 163 TRACE_IN(on_query_startup); 164 assert(qstate != NULL); 165 166 memset(&cred_hdr, 0, sizeof(struct msghdr)); 167 cred_hdr.msg_iov = &iov; 168 cred_hdr.msg_iovlen = 1; 169 cred_hdr.msg_control = (caddr_t)&cmsg; 170 cred_hdr.msg_controllen = CMSG_LEN(sizeof(struct cmsgcred)); 171 172 memset(&iov, 0, sizeof(struct iovec)); 173 iov.iov_base = &elem_type; 174 iov.iov_len = sizeof(int); 175 176 if (recvmsg(qstate->sockfd, &cred_hdr, 0) == -1) { 177 TRACE_OUT(on_query_startup); 178 return (-1); 179 } 180 181 if (cmsg.hdr.cmsg_len < CMSG_LEN(sizeof(struct cmsgcred)) 182 || cmsg.hdr.cmsg_level != SOL_SOCKET 183 || cmsg.hdr.cmsg_type != SCM_CREDS) { 184 TRACE_OUT(on_query_startup); 185 return (-1); 186 } 187 188 cred = (struct cmsgcred *)CMSG_DATA(&cmsg); 189 qstate->uid = cred->cmcred_uid; 190 qstate->gid = cred->cmcred_gid; 191 192 #if defined(NS_NSCD_EID_CHECKING) || defined(NS_STRICT_NSCD_EID_CHECKING) 193 /* 194 * This check is probably a bit redundant - per-user cache is always separated 195 * by the euid/egid pair 196 */ 197 if (check_query_eids(qstate) != 0) { 198 #ifdef NS_STRICT_NSCD_EID_CHECKING 199 TRACE_OUT(on_query_startup); 200 return (-1); 201 #else 202 if ((elem_type != CET_READ_REQUEST) && 203 (elem_type != CET_MP_READ_SESSION_REQUEST) && 204 (elem_type != CET_WRITE_REQUEST) && 205 (elem_type != CET_MP_WRITE_SESSION_REQUEST)) { 206 TRACE_OUT(on_query_startup); 207 return (-1); 208 } 209 #endif 210 } 211 #endif 212 213 switch (elem_type) { 214 case CET_WRITE_REQUEST: 215 qstate->process_func = on_write_request_read1; 216 break; 217 case CET_READ_REQUEST: 218 qstate->process_func = on_read_request_read1; 219 break; 220 case CET_TRANSFORM_REQUEST: 221 qstate->process_func = on_transform_request_read1; 222 break; 223 case CET_MP_WRITE_SESSION_REQUEST: 224 qstate->process_func = on_mp_write_session_request_read1; 225 break; 226 case CET_MP_READ_SESSION_REQUEST: 227 qstate->process_func = on_mp_read_session_request_read1; 228 break; 229 default: 230 TRACE_OUT(on_query_startup); 231 return (-1); 232 } 233 234 qstate->kevent_watermark = 0; 235 TRACE_OUT(on_query_startup); 236 return (0); 237 } 238 239 /* 240 * on_rw_mapper is used to process multiple read/write requests during 241 * one connection session. It's never called in the beginning (on query_state 242 * creation) as it does not process the multipart requests and does not 243 * receive credentials 244 */ 245 static int 246 on_rw_mapper(struct query_state *qstate) 247 { 248 ssize_t result; 249 int elem_type; 250 251 TRACE_IN(on_rw_mapper); 252 if (qstate->kevent_watermark == 0) { 253 qstate->kevent_watermark = sizeof(int); 254 } else { 255 result = qstate->read_func(qstate, &elem_type, sizeof(int)); 256 if (result != sizeof(int)) { 257 TRACE_OUT(on_rw_mapper); 258 return (-1); 259 } 260 261 switch (elem_type) { 262 case CET_WRITE_REQUEST: 263 qstate->kevent_watermark = sizeof(size_t); 264 qstate->process_func = on_write_request_read1; 265 break; 266 case CET_READ_REQUEST: 267 qstate->kevent_watermark = sizeof(size_t); 268 qstate->process_func = on_read_request_read1; 269 break; 270 default: 271 TRACE_OUT(on_rw_mapper); 272 return (-1); 273 break; 274 } 275 } 276 TRACE_OUT(on_rw_mapper); 277 return (0); 278 } 279 280 /* 281 * The default query_destroy function 282 */ 283 static void 284 on_query_destroy(struct query_state *qstate) 285 { 286 287 TRACE_IN(on_query_destroy); 288 finalize_comm_element(&qstate->response); 289 finalize_comm_element(&qstate->request); 290 TRACE_OUT(on_query_destroy); 291 } 292 293 /* 294 * The functions below are used to process write requests. 295 * - on_write_request_read1 and on_write_request_read2 read the request itself 296 * - on_write_request_process processes it (if the client requests to 297 * cache the negative result, the on_negative_write_request_process is used) 298 * - on_write_response_write1 sends the response 299 */ 300 static int 301 on_write_request_read1(struct query_state *qstate) 302 { 303 struct cache_write_request *write_request; 304 ssize_t result; 305 306 TRACE_IN(on_write_request_read1); 307 if (qstate->kevent_watermark == 0) 308 qstate->kevent_watermark = sizeof(size_t) * 3; 309 else { 310 init_comm_element(&qstate->request, CET_WRITE_REQUEST); 311 write_request = get_cache_write_request(&qstate->request); 312 313 result = qstate->read_func(qstate, &write_request->entry_length, 314 sizeof(size_t)); 315 result += qstate->read_func(qstate, 316 &write_request->cache_key_size, sizeof(size_t)); 317 result += qstate->read_func(qstate, 318 &write_request->data_size, sizeof(size_t)); 319 320 if (result != sizeof(size_t) * 3) { 321 TRACE_OUT(on_write_request_read1); 322 return (-1); 323 } 324 325 if (BUFSIZE_INVALID(write_request->entry_length) || 326 BUFSIZE_INVALID(write_request->cache_key_size) || 327 (BUFSIZE_INVALID(write_request->data_size) && 328 (write_request->data_size != 0))) { 329 TRACE_OUT(on_write_request_read1); 330 return (-1); 331 } 332 333 write_request->entry = (char *)calloc(1, 334 write_request->entry_length + 1); 335 assert(write_request->entry != NULL); 336 337 write_request->cache_key = (char *)calloc(1, 338 write_request->cache_key_size + 339 qstate->eid_str_length); 340 assert(write_request->cache_key != NULL); 341 memcpy(write_request->cache_key, qstate->eid_str, 342 qstate->eid_str_length); 343 344 if (write_request->data_size != 0) { 345 write_request->data = (char *)calloc(1, 346 write_request->data_size); 347 assert(write_request->data != NULL); 348 } 349 350 qstate->kevent_watermark = write_request->entry_length + 351 write_request->cache_key_size + 352 write_request->data_size; 353 qstate->process_func = on_write_request_read2; 354 } 355 356 TRACE_OUT(on_write_request_read1); 357 return (0); 358 } 359 360 static int 361 on_write_request_read2(struct query_state *qstate) 362 { 363 struct cache_write_request *write_request; 364 ssize_t result; 365 366 TRACE_IN(on_write_request_read2); 367 write_request = get_cache_write_request(&qstate->request); 368 369 result = qstate->read_func(qstate, write_request->entry, 370 write_request->entry_length); 371 result += qstate->read_func(qstate, write_request->cache_key + 372 qstate->eid_str_length, write_request->cache_key_size); 373 if (write_request->data_size != 0) 374 result += qstate->read_func(qstate, write_request->data, 375 write_request->data_size); 376 377 if (result != qstate->kevent_watermark) { 378 TRACE_OUT(on_write_request_read2); 379 return (-1); 380 } 381 write_request->cache_key_size += qstate->eid_str_length; 382 383 qstate->kevent_watermark = 0; 384 if (write_request->data_size != 0) 385 qstate->process_func = on_write_request_process; 386 else 387 qstate->process_func = on_negative_write_request_process; 388 TRACE_OUT(on_write_request_read2); 389 return (0); 390 } 391 392 static int 393 on_write_request_process(struct query_state *qstate) 394 { 395 struct cache_write_request *write_request; 396 struct cache_write_response *write_response; 397 cache_entry c_entry; 398 399 TRACE_IN(on_write_request_process); 400 init_comm_element(&qstate->response, CET_WRITE_RESPONSE); 401 write_response = get_cache_write_response(&qstate->response); 402 write_request = get_cache_write_request(&qstate->request); 403 404 qstate->config_entry = configuration_find_entry( 405 s_configuration, write_request->entry); 406 407 if (qstate->config_entry == NULL) { 408 write_response->error_code = ENOENT; 409 410 LOG_ERR_2("write_request", "can't find configuration" 411 " entry '%s'. aborting request", write_request->entry); 412 goto fin; 413 } 414 415 if (qstate->config_entry->enabled == 0) { 416 write_response->error_code = EACCES; 417 418 LOG_ERR_2("write_request", 419 "configuration entry '%s' is disabled", 420 write_request->entry); 421 goto fin; 422 } 423 424 if (qstate->config_entry->perform_actual_lookups != 0) { 425 write_response->error_code = EOPNOTSUPP; 426 427 LOG_ERR_2("write_request", 428 "entry '%s' performs lookups by itself: " 429 "can't write to it", write_request->entry); 430 goto fin; 431 } 432 433 configuration_lock_rdlock(s_configuration); 434 c_entry = find_cache_entry(s_cache, 435 qstate->config_entry->positive_cache_params.entry_name); 436 configuration_unlock(s_configuration); 437 if (c_entry != NULL) { 438 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE); 439 qstate->config_entry->positive_cache_entry = c_entry; 440 write_response->error_code = cache_write(c_entry, 441 write_request->cache_key, 442 write_request->cache_key_size, 443 write_request->data, 444 write_request->data_size); 445 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE); 446 447 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 448 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 449 memcpy(&qstate->timeout, 450 &qstate->config_entry->common_query_timeout, 451 sizeof(struct timeval)); 452 453 } else 454 write_response->error_code = -1; 455 456 fin: 457 qstate->kevent_filter = EVFILT_WRITE; 458 qstate->kevent_watermark = sizeof(int); 459 qstate->process_func = on_write_response_write1; 460 461 TRACE_OUT(on_write_request_process); 462 return (0); 463 } 464 465 static int 466 on_negative_write_request_process(struct query_state *qstate) 467 { 468 struct cache_write_request *write_request; 469 struct cache_write_response *write_response; 470 cache_entry c_entry; 471 472 TRACE_IN(on_negative_write_request_process); 473 init_comm_element(&qstate->response, CET_WRITE_RESPONSE); 474 write_response = get_cache_write_response(&qstate->response); 475 write_request = get_cache_write_request(&qstate->request); 476 477 qstate->config_entry = configuration_find_entry( 478 s_configuration, write_request->entry); 479 480 if (qstate->config_entry == NULL) { 481 write_response->error_code = ENOENT; 482 483 LOG_ERR_2("negative_write_request", 484 "can't find configuration" 485 " entry '%s'. aborting request", write_request->entry); 486 goto fin; 487 } 488 489 if (qstate->config_entry->enabled == 0) { 490 write_response->error_code = EACCES; 491 492 LOG_ERR_2("negative_write_request", 493 "configuration entry '%s' is disabled", 494 write_request->entry); 495 goto fin; 496 } 497 498 if (qstate->config_entry->perform_actual_lookups != 0) { 499 write_response->error_code = EOPNOTSUPP; 500 501 LOG_ERR_2("negative_write_request", 502 "entry '%s' performs lookups by itself: " 503 "can't write to it", write_request->entry); 504 goto fin; 505 } else { 506 #ifdef NS_NSCD_EID_CHECKING 507 if (check_query_eids(qstate) != 0) { 508 write_response->error_code = EPERM; 509 goto fin; 510 } 511 #endif 512 } 513 514 configuration_lock_rdlock(s_configuration); 515 c_entry = find_cache_entry(s_cache, 516 qstate->config_entry->negative_cache_params.entry_name); 517 configuration_unlock(s_configuration); 518 if (c_entry != NULL) { 519 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE); 520 qstate->config_entry->negative_cache_entry = c_entry; 521 write_response->error_code = cache_write(c_entry, 522 write_request->cache_key, 523 write_request->cache_key_size, 524 negative_data, 525 sizeof(negative_data)); 526 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE); 527 528 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 529 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 530 memcpy(&qstate->timeout, 531 &qstate->config_entry->common_query_timeout, 532 sizeof(struct timeval)); 533 } else 534 write_response->error_code = -1; 535 536 fin: 537 qstate->kevent_filter = EVFILT_WRITE; 538 qstate->kevent_watermark = sizeof(int); 539 qstate->process_func = on_write_response_write1; 540 541 TRACE_OUT(on_negative_write_request_process); 542 return (0); 543 } 544 545 static int 546 on_write_response_write1(struct query_state *qstate) 547 { 548 struct cache_write_response *write_response; 549 ssize_t result; 550 551 TRACE_IN(on_write_response_write1); 552 write_response = get_cache_write_response(&qstate->response); 553 result = qstate->write_func(qstate, &write_response->error_code, 554 sizeof(int)); 555 if (result != sizeof(int)) { 556 TRACE_OUT(on_write_response_write1); 557 return (-1); 558 } 559 560 finalize_comm_element(&qstate->request); 561 finalize_comm_element(&qstate->response); 562 563 qstate->kevent_watermark = sizeof(int); 564 qstate->kevent_filter = EVFILT_READ; 565 qstate->process_func = on_rw_mapper; 566 567 TRACE_OUT(on_write_response_write1); 568 return (0); 569 } 570 571 /* 572 * The functions below are used to process read requests. 573 * - on_read_request_read1 and on_read_request_read2 read the request itself 574 * - on_read_request_process processes it 575 * - on_read_response_write1 and on_read_response_write2 send the response 576 */ 577 static int 578 on_read_request_read1(struct query_state *qstate) 579 { 580 struct cache_read_request *read_request; 581 ssize_t result; 582 583 TRACE_IN(on_read_request_read1); 584 if (qstate->kevent_watermark == 0) 585 qstate->kevent_watermark = sizeof(size_t) * 2; 586 else { 587 init_comm_element(&qstate->request, CET_READ_REQUEST); 588 read_request = get_cache_read_request(&qstate->request); 589 590 result = qstate->read_func(qstate, 591 &read_request->entry_length, sizeof(size_t)); 592 result += qstate->read_func(qstate, 593 &read_request->cache_key_size, sizeof(size_t)); 594 595 if (result != sizeof(size_t) * 2) { 596 TRACE_OUT(on_read_request_read1); 597 return (-1); 598 } 599 600 if (BUFSIZE_INVALID(read_request->entry_length) || 601 BUFSIZE_INVALID(read_request->cache_key_size)) { 602 TRACE_OUT(on_read_request_read1); 603 return (-1); 604 } 605 606 read_request->entry = (char *)calloc(1, 607 read_request->entry_length + 1); 608 assert(read_request->entry != NULL); 609 610 read_request->cache_key = (char *)calloc(1, 611 read_request->cache_key_size + 612 qstate->eid_str_length); 613 assert(read_request->cache_key != NULL); 614 memcpy(read_request->cache_key, qstate->eid_str, 615 qstate->eid_str_length); 616 617 qstate->kevent_watermark = read_request->entry_length + 618 read_request->cache_key_size; 619 qstate->process_func = on_read_request_read2; 620 } 621 622 TRACE_OUT(on_read_request_read1); 623 return (0); 624 } 625 626 static int 627 on_read_request_read2(struct query_state *qstate) 628 { 629 struct cache_read_request *read_request; 630 ssize_t result; 631 632 TRACE_IN(on_read_request_read2); 633 read_request = get_cache_read_request(&qstate->request); 634 635 result = qstate->read_func(qstate, read_request->entry, 636 read_request->entry_length); 637 result += qstate->read_func(qstate, 638 read_request->cache_key + qstate->eid_str_length, 639 read_request->cache_key_size); 640 641 if (result != qstate->kevent_watermark) { 642 TRACE_OUT(on_read_request_read2); 643 return (-1); 644 } 645 read_request->cache_key_size += qstate->eid_str_length; 646 647 qstate->kevent_watermark = 0; 648 qstate->process_func = on_read_request_process; 649 650 TRACE_OUT(on_read_request_read2); 651 return (0); 652 } 653 654 static int 655 on_read_request_process(struct query_state *qstate) 656 { 657 struct cache_read_request *read_request; 658 struct cache_read_response *read_response; 659 cache_entry c_entry, neg_c_entry; 660 661 struct agent *lookup_agent; 662 struct common_agent *c_agent; 663 int res; 664 665 TRACE_IN(on_read_request_process); 666 init_comm_element(&qstate->response, CET_READ_RESPONSE); 667 read_response = get_cache_read_response(&qstate->response); 668 read_request = get_cache_read_request(&qstate->request); 669 670 qstate->config_entry = configuration_find_entry( 671 s_configuration, read_request->entry); 672 if (qstate->config_entry == NULL) { 673 read_response->error_code = ENOENT; 674 675 LOG_ERR_2("read_request", 676 "can't find configuration " 677 "entry '%s'. aborting request", read_request->entry); 678 goto fin; 679 } 680 681 if (qstate->config_entry->enabled == 0) { 682 read_response->error_code = EACCES; 683 684 LOG_ERR_2("read_request", 685 "configuration entry '%s' is disabled", 686 read_request->entry); 687 goto fin; 688 } 689 690 /* 691 * if we perform lookups by ourselves, then we don't need to separate 692 * cache entries by euid and egid 693 */ 694 if (qstate->config_entry->perform_actual_lookups != 0) 695 memset(read_request->cache_key, 0, qstate->eid_str_length); 696 else { 697 #ifdef NS_NSCD_EID_CHECKING 698 if (check_query_eids(qstate) != 0) { 699 /* if the lookup is not self-performing, we check for clients euid/egid */ 700 read_response->error_code = EPERM; 701 goto fin; 702 } 703 #endif 704 } 705 706 configuration_lock_rdlock(s_configuration); 707 c_entry = find_cache_entry(s_cache, 708 qstate->config_entry->positive_cache_params.entry_name); 709 neg_c_entry = find_cache_entry(s_cache, 710 qstate->config_entry->negative_cache_params.entry_name); 711 configuration_unlock(s_configuration); 712 if ((c_entry != NULL) && (neg_c_entry != NULL)) { 713 configuration_lock_entry(qstate->config_entry, CELT_POSITIVE); 714 qstate->config_entry->positive_cache_entry = c_entry; 715 read_response->error_code = cache_read(c_entry, 716 read_request->cache_key, 717 read_request->cache_key_size, NULL, 718 &read_response->data_size); 719 720 if (read_response->error_code == -2) { 721 read_response->data = (char *)malloc( 722 read_response->data_size); 723 assert(read_response != NULL); 724 read_response->error_code = cache_read(c_entry, 725 read_request->cache_key, 726 read_request->cache_key_size, 727 read_response->data, 728 &read_response->data_size); 729 } 730 configuration_unlock_entry(qstate->config_entry, CELT_POSITIVE); 731 732 configuration_lock_entry(qstate->config_entry, CELT_NEGATIVE); 733 qstate->config_entry->negative_cache_entry = neg_c_entry; 734 if (read_response->error_code == -1) { 735 read_response->error_code = cache_read(neg_c_entry, 736 read_request->cache_key, 737 read_request->cache_key_size, NULL, 738 &read_response->data_size); 739 740 if (read_response->error_code == -2) { 741 read_response->error_code = 0; 742 read_response->data = NULL; 743 read_response->data_size = 0; 744 } 745 } 746 configuration_unlock_entry(qstate->config_entry, CELT_NEGATIVE); 747 748 if ((read_response->error_code == -1) && 749 (qstate->config_entry->perform_actual_lookups != 0)) { 750 free(read_response->data); 751 read_response->data = NULL; 752 read_response->data_size = 0; 753 754 lookup_agent = find_agent(s_agent_table, 755 read_request->entry, COMMON_AGENT); 756 757 if ((lookup_agent != NULL) && 758 (lookup_agent->type == COMMON_AGENT)) { 759 c_agent = (struct common_agent *)lookup_agent; 760 res = c_agent->lookup_func( 761 read_request->cache_key + 762 qstate->eid_str_length, 763 read_request->cache_key_size - 764 qstate->eid_str_length, 765 &read_response->data, 766 &read_response->data_size); 767 768 if (res == NS_SUCCESS) { 769 read_response->error_code = 0; 770 configuration_lock_entry( 771 qstate->config_entry, 772 CELT_POSITIVE); 773 cache_write(c_entry, 774 read_request->cache_key, 775 read_request->cache_key_size, 776 read_response->data, 777 read_response->data_size); 778 configuration_unlock_entry( 779 qstate->config_entry, 780 CELT_POSITIVE); 781 } else if ((res == NS_NOTFOUND) || 782 (res == NS_RETURN)) { 783 configuration_lock_entry( 784 qstate->config_entry, 785 CELT_NEGATIVE); 786 cache_write(neg_c_entry, 787 read_request->cache_key, 788 read_request->cache_key_size, 789 negative_data, 790 sizeof(negative_data)); 791 configuration_unlock_entry( 792 qstate->config_entry, 793 CELT_NEGATIVE); 794 795 read_response->error_code = 0; 796 read_response->data = NULL; 797 read_response->data_size = 0; 798 } 799 } 800 } 801 802 if ((qstate->config_entry->common_query_timeout.tv_sec != 0) || 803 (qstate->config_entry->common_query_timeout.tv_usec != 0)) 804 memcpy(&qstate->timeout, 805 &qstate->config_entry->common_query_timeout, 806 sizeof(struct timeval)); 807 } else 808 read_response->error_code = -1; 809 810 fin: 811 qstate->kevent_filter = EVFILT_WRITE; 812 if (read_response->error_code == 0) 813 qstate->kevent_watermark = sizeof(int) + sizeof(size_t); 814 else 815 qstate->kevent_watermark = sizeof(int); 816 qstate->process_func = on_read_response_write1; 817 818 TRACE_OUT(on_read_request_process); 819 return (0); 820 } 821 822 static int 823 on_read_response_write1(struct query_state *qstate) 824 { 825 struct cache_read_response *read_response; 826 ssize_t result; 827 828 TRACE_IN(on_read_response_write1); 829 read_response = get_cache_read_response(&qstate->response); 830 831 result = qstate->write_func(qstate, &read_response->error_code, 832 sizeof(int)); 833 834 if (read_response->error_code == 0) { 835 result += qstate->write_func(qstate, &read_response->data_size, 836 sizeof(size_t)); 837 if (result != qstate->kevent_watermark) { 838 TRACE_OUT(on_read_response_write1); 839 return (-1); 840 } 841 842 qstate->kevent_watermark = read_response->data_size; 843 qstate->process_func = on_read_response_write2; 844 } else { 845 if (result != qstate->kevent_watermark) { 846 TRACE_OUT(on_read_response_write1); 847 return (-1); 848 } 849 850 qstate->kevent_watermark = 0; 851 qstate->process_func = NULL; 852 } 853 854 TRACE_OUT(on_read_response_write1); 855 return (0); 856 } 857 858 static int 859 on_read_response_write2(struct query_state *qstate) 860 { 861 struct cache_read_response *read_response; 862 ssize_t result; 863 864 TRACE_IN(on_read_response_write2); 865 read_response = get_cache_read_response(&qstate->response); 866 if (read_response->data_size > 0) { 867 result = qstate->write_func(qstate, read_response->data, 868 read_response->data_size); 869 if (result != qstate->kevent_watermark) { 870 TRACE_OUT(on_read_response_write2); 871 return (-1); 872 } 873 } 874 875 finalize_comm_element(&qstate->request); 876 finalize_comm_element(&qstate->response); 877 878 qstate->kevent_watermark = sizeof(int); 879 qstate->kevent_filter = EVFILT_READ; 880 qstate->process_func = on_rw_mapper; 881 TRACE_OUT(on_read_response_write2); 882 return (0); 883 } 884 885 /* 886 * The functions below are used to process write requests. 887 * - on_transform_request_read1 and on_transform_request_read2 read the 888 * request itself 889 * - on_transform_request_process processes it 890 * - on_transform_response_write1 sends the response 891 */ 892 static int 893 on_transform_request_read1(struct query_state *qstate) 894 { 895 struct cache_transform_request *transform_request; 896 ssize_t result; 897 898 TRACE_IN(on_transform_request_read1); 899 if (qstate->kevent_watermark == 0) 900 qstate->kevent_watermark = sizeof(size_t) + sizeof(int); 901 else { 902 init_comm_element(&qstate->request, CET_TRANSFORM_REQUEST); 903 transform_request = 904 get_cache_transform_request(&qstate->request); 905 906 result = qstate->read_func(qstate, 907 &transform_request->entry_length, sizeof(size_t)); 908 result += qstate->read_func(qstate, 909 &transform_request->transformation_type, sizeof(int)); 910 911 if (result != sizeof(size_t) + sizeof(int)) { 912 TRACE_OUT(on_transform_request_read1); 913 return (-1); 914 } 915 916 if ((transform_request->transformation_type != TT_USER) && 917 (transform_request->transformation_type != TT_ALL)) { 918 TRACE_OUT(on_transform_request_read1); 919 return (-1); 920 } 921 922 if (transform_request->entry_length != 0) { 923 if (BUFSIZE_INVALID(transform_request->entry_length)) { 924 TRACE_OUT(on_transform_request_read1); 925 return (-1); 926 } 927 928 transform_request->entry = (char *)calloc(1, 929 transform_request->entry_length + 1); 930 assert(transform_request->entry != NULL); 931 932 qstate->process_func = on_transform_request_read2; 933 } else 934 qstate->process_func = on_transform_request_process; 935 936 qstate->kevent_watermark = transform_request->entry_length; 937 } 938 939 TRACE_OUT(on_transform_request_read1); 940 return (0); 941 } 942 943 static int 944 on_transform_request_read2(struct query_state *qstate) 945 { 946 struct cache_transform_request *transform_request; 947 ssize_t result; 948 949 TRACE_IN(on_transform_request_read2); 950 transform_request = get_cache_transform_request(&qstate->request); 951 952 result = qstate->read_func(qstate, transform_request->entry, 953 transform_request->entry_length); 954 955 if (result != qstate->kevent_watermark) { 956 TRACE_OUT(on_transform_request_read2); 957 return (-1); 958 } 959 960 qstate->kevent_watermark = 0; 961 qstate->process_func = on_transform_request_process; 962 963 TRACE_OUT(on_transform_request_read2); 964 return (0); 965 } 966 967 static int 968 on_transform_request_process(struct query_state *qstate) 969 { 970 struct cache_transform_request *transform_request; 971 struct cache_transform_response *transform_response; 972 struct configuration_entry *config_entry; 973 size_t i, size; 974 975 TRACE_IN(on_transform_request_process); 976 init_comm_element(&qstate->response, CET_TRANSFORM_RESPONSE); 977 transform_response = get_cache_transform_response(&qstate->response); 978 transform_request = get_cache_transform_request(&qstate->request); 979 980 switch (transform_request->transformation_type) { 981 case TT_USER: 982 if (transform_request->entry == NULL) { 983 size = configuration_get_entries_size(s_configuration); 984 for (i = 0; i < size; ++i) { 985 config_entry = configuration_get_entry( 986 s_configuration, i); 987 988 if (config_entry->perform_actual_lookups == 0) 989 clear_config_entry_part(config_entry, 990 qstate->eid_str, qstate->eid_str_length); 991 } 992 } else { 993 qstate->config_entry = configuration_find_entry( 994 s_configuration, transform_request->entry); 995 996 if (qstate->config_entry == NULL) { 997 LOG_ERR_2("transform_request", 998 "can't find configuration" 999 " entry '%s'. aborting request", 1000 transform_request->entry); 1001 transform_response->error_code = -1; 1002 goto fin; 1003 } 1004 1005 if (qstate->config_entry->perform_actual_lookups != 0) { 1006 LOG_ERR_2("transform_request", 1007 "can't transform the cache entry %s" 1008 ", because it ised for actual lookups", 1009 transform_request->entry); 1010 transform_response->error_code = -1; 1011 goto fin; 1012 } 1013 1014 clear_config_entry_part(qstate->config_entry, 1015 qstate->eid_str, qstate->eid_str_length); 1016 } 1017 break; 1018 case TT_ALL: 1019 if (qstate->euid != 0) 1020 transform_response->error_code = -1; 1021 else { 1022 if (transform_request->entry == NULL) { 1023 size = configuration_get_entries_size( 1024 s_configuration); 1025 for (i = 0; i < size; ++i) { 1026 clear_config_entry( 1027 configuration_get_entry( 1028 s_configuration, i)); 1029 } 1030 } else { 1031 qstate->config_entry = configuration_find_entry( 1032 s_configuration, 1033 transform_request->entry); 1034 1035 if (qstate->config_entry == NULL) { 1036 LOG_ERR_2("transform_request", 1037 "can't find configuration" 1038 " entry '%s'. aborting request", 1039 transform_request->entry); 1040 transform_response->error_code = -1; 1041 goto fin; 1042 } 1043 1044 clear_config_entry(qstate->config_entry); 1045 } 1046 } 1047 break; 1048 default: 1049 transform_response->error_code = -1; 1050 } 1051 1052 fin: 1053 qstate->kevent_watermark = 0; 1054 qstate->process_func = on_transform_response_write1; 1055 TRACE_OUT(on_transform_request_process); 1056 return (0); 1057 } 1058 1059 static int 1060 on_transform_response_write1(struct query_state *qstate) 1061 { 1062 struct cache_transform_response *transform_response; 1063 ssize_t result; 1064 1065 TRACE_IN(on_transform_response_write1); 1066 transform_response = get_cache_transform_response(&qstate->response); 1067 result = qstate->write_func(qstate, &transform_response->error_code, 1068 sizeof(int)); 1069 if (result != sizeof(int)) { 1070 TRACE_OUT(on_transform_response_write1); 1071 return (-1); 1072 } 1073 1074 finalize_comm_element(&qstate->request); 1075 finalize_comm_element(&qstate->response); 1076 1077 qstate->kevent_watermark = 0; 1078 qstate->process_func = NULL; 1079 TRACE_OUT(on_transform_response_write1); 1080 return (0); 1081 } 1082 1083 /* 1084 * Checks if the client's euid and egid do not differ from its uid and gid. 1085 * Returns 0 on success. 1086 */ 1087 int 1088 check_query_eids(struct query_state *qstate) 1089 { 1090 1091 return ((qstate->uid != qstate->euid) || (qstate->gid != qstate->egid) ? -1 : 0); 1092 } 1093 1094 /* 1095 * Uses the qstate fields to process an "alternate" read - when the buffer is 1096 * too large to be received during one socket read operation 1097 */ 1098 ssize_t 1099 query_io_buffer_read(struct query_state *qstate, void *buf, size_t nbytes) 1100 { 1101 ssize_t result; 1102 1103 TRACE_IN(query_io_buffer_read); 1104 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL)) 1105 return (-1); 1106 1107 if (nbytes < qstate->io_buffer + qstate->io_buffer_size - 1108 qstate->io_buffer_p) 1109 result = nbytes; 1110 else 1111 result = qstate->io_buffer + qstate->io_buffer_size - 1112 qstate->io_buffer_p; 1113 1114 memcpy(buf, qstate->io_buffer_p, result); 1115 qstate->io_buffer_p += result; 1116 1117 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) { 1118 free(qstate->io_buffer); 1119 qstate->io_buffer = NULL; 1120 1121 qstate->write_func = query_socket_write; 1122 qstate->read_func = query_socket_read; 1123 } 1124 1125 TRACE_OUT(query_io_buffer_read); 1126 return (result); 1127 } 1128 1129 /* 1130 * Uses the qstate fields to process an "alternate" write - when the buffer is 1131 * too large to be sent during one socket write operation 1132 */ 1133 ssize_t 1134 query_io_buffer_write(struct query_state *qstate, const void *buf, 1135 size_t nbytes) 1136 { 1137 ssize_t result; 1138 1139 TRACE_IN(query_io_buffer_write); 1140 if ((qstate->io_buffer_size == 0) || (qstate->io_buffer == NULL)) 1141 return (-1); 1142 1143 if (nbytes < qstate->io_buffer + qstate->io_buffer_size - 1144 qstate->io_buffer_p) 1145 result = nbytes; 1146 else 1147 result = qstate->io_buffer + qstate->io_buffer_size - 1148 qstate->io_buffer_p; 1149 1150 memcpy(qstate->io_buffer_p, buf, result); 1151 qstate->io_buffer_p += result; 1152 1153 if (qstate->io_buffer_p == qstate->io_buffer + qstate->io_buffer_size) { 1154 qstate->use_alternate_io = 1; 1155 qstate->io_buffer_p = qstate->io_buffer; 1156 1157 qstate->write_func = query_socket_write; 1158 qstate->read_func = query_socket_read; 1159 } 1160 1161 TRACE_OUT(query_io_buffer_write); 1162 return (result); 1163 } 1164 1165 /* 1166 * The default "read" function, which reads data directly from socket 1167 */ 1168 ssize_t 1169 query_socket_read(struct query_state *qstate, void *buf, size_t nbytes) 1170 { 1171 ssize_t result; 1172 1173 TRACE_IN(query_socket_read); 1174 if (qstate->socket_failed != 0) { 1175 TRACE_OUT(query_socket_read); 1176 return (-1); 1177 } 1178 1179 result = read(qstate->sockfd, buf, nbytes); 1180 if ((result == -1) || (result < nbytes)) 1181 qstate->socket_failed = 1; 1182 1183 TRACE_OUT(query_socket_read); 1184 return (result); 1185 } 1186 1187 /* 1188 * The default "write" function, which writes data directly to socket 1189 */ 1190 ssize_t 1191 query_socket_write(struct query_state *qstate, const void *buf, size_t nbytes) 1192 { 1193 ssize_t result; 1194 1195 TRACE_IN(query_socket_write); 1196 if (qstate->socket_failed != 0) { 1197 TRACE_OUT(query_socket_write); 1198 return (-1); 1199 } 1200 1201 result = write(qstate->sockfd, buf, nbytes); 1202 if ((result == -1) || (result < nbytes)) 1203 qstate->socket_failed = 1; 1204 1205 TRACE_OUT(query_socket_write); 1206 return (result); 1207 } 1208 1209 /* 1210 * Initializes the query_state structure by filling it with the default values. 1211 */ 1212 struct query_state * 1213 init_query_state(int sockfd, size_t kevent_watermark, uid_t euid, gid_t egid) 1214 { 1215 struct query_state *retval; 1216 1217 TRACE_IN(init_query_state); 1218 retval = (struct query_state *)calloc(1, sizeof(struct query_state)); 1219 assert(retval != NULL); 1220 1221 retval->sockfd = sockfd; 1222 retval->kevent_filter = EVFILT_READ; 1223 retval->kevent_watermark = kevent_watermark; 1224 1225 retval->euid = euid; 1226 retval->egid = egid; 1227 retval->uid = retval->gid = -1; 1228 1229 if (asprintf(&retval->eid_str, "%d_%d_", retval->euid, 1230 retval->egid) == -1) { 1231 free(retval); 1232 return (NULL); 1233 } 1234 retval->eid_str_length = strlen(retval->eid_str); 1235 1236 init_comm_element(&retval->request, CET_UNDEFINED); 1237 init_comm_element(&retval->response, CET_UNDEFINED); 1238 retval->process_func = on_query_startup; 1239 retval->destroy_func = on_query_destroy; 1240 1241 retval->write_func = query_socket_write; 1242 retval->read_func = query_socket_read; 1243 1244 get_time_func(&retval->creation_time); 1245 memcpy(&retval->timeout, &s_configuration->query_timeout, 1246 sizeof(struct timeval)); 1247 1248 TRACE_OUT(init_query_state); 1249 return (retval); 1250 } 1251 1252 void 1253 destroy_query_state(struct query_state *qstate) 1254 { 1255 1256 TRACE_IN(destroy_query_state); 1257 if (qstate->eid_str != NULL) 1258 free(qstate->eid_str); 1259 1260 if (qstate->io_buffer != NULL) 1261 free(qstate->io_buffer); 1262 1263 qstate->destroy_func(qstate); 1264 free(qstate); 1265 TRACE_OUT(destroy_query_state); 1266 } 1267