1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 1989, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25 /*
26 * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved.
27 */
28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
29 /* All Rights Reserved */
30 /*
31 * Portions of this source code were derived from Berkeley
32 * 4.3 BSD under license from the Regents of the University of
33 * California.
34 */
35
36 /*
37 * svc.c, Server-side remote procedure call interface.
38 *
39 * There are two sets of procedures here. The xprt routines are
40 * for handling transport handles. The svc routines handle the
41 * list of service routines.
42 *
43 */
44
45
46 #include "mt.h"
47 #include "rpc_mt.h"
48 #include <assert.h>
49 #include <errno.h>
50 #include <sys/types.h>
51 #include <stropts.h>
52 #include <sys/conf.h>
53 #include <rpc/rpc.h>
54 #ifdef PORTMAP
55 #include <rpc/pmap_clnt.h>
56 #endif
57 #include <sys/poll.h>
58 #include <netconfig.h>
59 #include <syslog.h>
60 #include <stdlib.h>
61 #include <unistd.h>
62 #include <string.h>
63 #include <limits.h>
64
65 extern bool_t __svc_get_door_cred();
66 extern bool_t __rpc_get_local_cred();
67
68 SVCXPRT **svc_xports;
69 static int nsvc_xports; /* total number of svc_xports allocated */
70
71 XDR **svc_xdrs; /* common XDR receive area */
72 int nsvc_xdrs; /* total number of svc_xdrs allocated */
73
74 int __rpc_use_pollfd_done; /* to unlimit the number of connections */
75
76 #define NULL_SVC ((struct svc_callout *)0)
77 #define RQCRED_SIZE 400 /* this size is excessive */
78
79 /*
80 * The services list
81 * Each entry represents a set of procedures (an rpc program).
82 * The dispatch routine takes request structs and runs the
83 * appropriate procedure.
84 */
85 static struct svc_callout {
86 struct svc_callout *sc_next;
87 rpcprog_t sc_prog;
88 rpcvers_t sc_vers;
89 char *sc_netid;
90 void (*sc_dispatch)();
91 } *svc_head;
92 extern rwlock_t svc_lock;
93
94 static struct svc_callout *svc_find();
95 int _svc_prog_dispatch();
96 void svc_getreq_common();
97 char *strdup();
98
99 extern mutex_t svc_door_mutex;
100 extern cond_t svc_door_waitcv;
101 extern int svc_ndoorfds;
102 extern SVCXPRT_LIST *_svc_xprtlist;
103 extern mutex_t xprtlist_lock;
104 extern void __svc_rm_from_xlist();
105
106 extern fd_set _new_svc_fdset;
107
108 /*
109 * If the allocated array of reactor is too small, this value is used as a
110 * margin. This reduces the number of allocations.
111 */
112 #define USER_FD_INCREMENT 5
113
114 static void add_pollfd(int fd, short events);
115 static void remove_pollfd(int fd);
116 static void __svc_remove_input_of_fd(int fd);
117
118
119 /*
120 * Data used to handle reactor:
121 * - one file descriptor we listen to,
122 * - one callback we call if the fd pops,
123 * - and a cookie passed as a parameter to the callback.
124 *
125 * The structure is an array indexed on the file descriptor. Each entry is
126 * pointing to the first element of a double-linked list of callback.
127 * only one callback may be associated to a couple (fd, event).
128 */
129
130 struct _svc_user_fd_head;
131
132 typedef struct {
133 struct _svc_user_fd_node *next;
134 struct _svc_user_fd_node *previous;
135 } _svc_user_link;
136
137 typedef struct _svc_user_fd_node {
138 /* The lnk field must be the first field. */
139 _svc_user_link lnk;
140 svc_input_id_t id;
141 int fd;
142 unsigned int events;
143 svc_callback_t callback;
144 void* cookie;
145 } _svc_user_fd_node;
146
147 typedef struct _svc_user_fd_head {
148 /* The lnk field must be the first field. */
149 _svc_user_link lnk;
150 unsigned int mask; /* logical OR of all sub-masks */
151 } _svc_user_fd_head;
152
153
154 /* Define some macros to manage the linked list. */
155 #define LIST_ISEMPTY(l) ((_svc_user_fd_node *) &(l.lnk) == l.lnk.next)
156 #define LIST_CLR(l) \
157 (l.lnk.previous = l.lnk.next = (_svc_user_fd_node *) &(l.lnk))
158
159 /* Array of defined reactor - indexed on file descriptor */
160 static _svc_user_fd_head *svc_userfds = NULL;
161
162 /* current size of file descriptor */
163 static int svc_nuserfds = 0;
164
165 /* Mutex to ensure MT safe operations for user fds callbacks. */
166 static mutex_t svc_userfds_lock = DEFAULTMUTEX;
167
168
169 /*
170 * This structure is used to have constant time alogrithms. There is an array
171 * of this structure as large as svc_nuserfds. When the user is registering a
172 * new callback, the address of the created structure is stored in a cell of
173 * this array. The address of this cell is the returned unique identifier.
174 *
175 * On removing, the id is given by the user, then we know if this cell is
176 * filled or not (with free). If it is free, we return an error. Otherwise,
177 * we can free the structure pointed by fd_node.
178 *
179 * On insertion, we use the linked list created by (first_free,
180 * next_free). In this way with a constant time computation, we can give a
181 * correct index to the user.
182 */
183
184 typedef struct _svc_management_user_fd {
185 bool_t free;
186 union {
187 svc_input_id_t next_free;
188 _svc_user_fd_node *fd_node;
189 } data;
190 } _svc_management_user_fd;
191
192 /* index to the first free elem */
193 static svc_input_id_t first_free = (svc_input_id_t)-1;
194 /* the size of this array is the same as svc_nuserfds */
195 static _svc_management_user_fd* user_fd_mgt_array = NULL;
196
197 /* current size of user_fd_mgt_array */
198 static int svc_nmgtuserfds = 0;
199
200
201 /* Define some macros to access data associated to registration ids. */
202 #define node_from_id(id) (user_fd_mgt_array[(int)id].data.fd_node)
203 #define is_free_id(id) (user_fd_mgt_array[(int)id].free)
204
205 #ifndef POLLSTANDARD
206 #define POLLSTANDARD \
207 (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND| \
208 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL)
209 #endif
210
211 /*
212 * To free an Id, we set the cell as free and insert its address in the list
213 * of free cell.
214 */
215
216 static void
_svc_free_id(const svc_input_id_t id)217 _svc_free_id(const svc_input_id_t id)
218 {
219 assert(((int)id >= 0) && ((int)id < svc_nmgtuserfds));
220 user_fd_mgt_array[(int)id].free = TRUE;
221 user_fd_mgt_array[(int)id].data.next_free = first_free;
222 first_free = id;
223 }
224
225 /*
226 * To get a free cell, we just have to take it from the free linked list and
227 * set the flag to "not free". This function also allocates new memory if
228 * necessary
229 */
230 static svc_input_id_t
_svc_attribute_new_id(_svc_user_fd_node * node)231 _svc_attribute_new_id(_svc_user_fd_node *node)
232 {
233 int selected_index = (int)first_free;
234 assert(node != NULL);
235
236 if (selected_index == -1) {
237 /* Allocate new entries */
238 int L_inOldSize = svc_nmgtuserfds;
239 int i;
240
241 svc_nmgtuserfds += USER_FD_INCREMENT;
242
243 user_fd_mgt_array = (_svc_management_user_fd *)
244 realloc(user_fd_mgt_array, svc_nmgtuserfds
245 * sizeof (_svc_management_user_fd));
246
247 if (user_fd_mgt_array == NULL) {
248 syslog(LOG_ERR, "_svc_attribute_new_id: out of memory");
249 errno = ENOMEM;
250 return ((svc_input_id_t)-1);
251 }
252
253 for (i = svc_nmgtuserfds - 1; i >= L_inOldSize; i--)
254 _svc_free_id((svc_input_id_t)i);
255 selected_index = (int)first_free;
256 }
257
258 node->id = (svc_input_id_t)selected_index;
259 first_free = user_fd_mgt_array[selected_index].data.next_free;
260
261 user_fd_mgt_array[selected_index].data.fd_node = node;
262 user_fd_mgt_array[selected_index].free = FALSE;
263
264 return ((svc_input_id_t)selected_index);
265 }
266
267 /*
268 * Access to a pollfd treatment. Scan all the associated callbacks that have
269 * at least one bit in their mask that masks a received event.
270 *
271 * If event POLLNVAL is received, we check that one callback processes it, if
272 * not, then remove the file descriptor from the poll. If there is one, let
273 * the user do the work.
274 */
275 void
__svc_getreq_user(struct pollfd * pfd)276 __svc_getreq_user(struct pollfd *pfd)
277 {
278 int fd = pfd->fd;
279 short revents = pfd->revents;
280 bool_t invalHandled = FALSE;
281 _svc_user_fd_node *node;
282
283 (void) mutex_lock(&svc_userfds_lock);
284
285 if ((fd < 0) || (fd >= svc_nuserfds)) {
286 (void) mutex_unlock(&svc_userfds_lock);
287 return;
288 }
289
290 node = svc_userfds[fd].lnk.next;
291
292 /* check if at least one mask fits */
293 if (0 == (revents & svc_userfds[fd].mask)) {
294 (void) mutex_unlock(&svc_userfds_lock);
295 return;
296 }
297
298 while ((svc_userfds[fd].mask != 0) &&
299 ((_svc_user_link *)node != &(svc_userfds[fd].lnk))) {
300 /*
301 * If one of the received events maps the ones the node listens
302 * to
303 */
304 _svc_user_fd_node *next = node->lnk.next;
305
306 if (node->callback != NULL) {
307 if (node->events & revents) {
308 if (revents & POLLNVAL) {
309 invalHandled = TRUE;
310 }
311
312 /*
313 * The lock must be released before calling the
314 * user function, as this function can call
315 * svc_remove_input() for example.
316 */
317 (void) mutex_unlock(&svc_userfds_lock);
318 node->callback(node->id, node->fd,
319 node->events & revents, node->cookie);
320 /*
321 * Do not use the node structure anymore, as it
322 * could have been deallocated by the previous
323 * callback.
324 */
325 (void) mutex_lock(&svc_userfds_lock);
326 }
327 }
328 node = next;
329 }
330
331 if ((revents & POLLNVAL) && !invalHandled)
332 __svc_remove_input_of_fd(fd);
333 (void) mutex_unlock(&svc_userfds_lock);
334 }
335
336
337 /*
338 * Check if a file descriptor is associated with a user reactor.
339 * To do this, just check that the array indexed on fd has a non-void linked
340 * list (ie. first element is not NULL)
341 */
342 bool_t
__is_a_userfd(int fd)343 __is_a_userfd(int fd)
344 {
345 /* Checks argument */
346 if ((fd < 0) || (fd >= svc_nuserfds))
347 return (FALSE);
348 return ((svc_userfds[fd].mask == 0x0000)? FALSE:TRUE);
349 }
350
351 /* free everything concerning user fd */
352 /* used in svc_run.c => no static */
353
354 void
__destroy_userfd(void)355 __destroy_userfd(void)
356 {
357 int one_fd;
358 /* Clean user fd */
359 if (svc_userfds != NULL) {
360 for (one_fd = 0; one_fd < svc_nuserfds; one_fd++) {
361 _svc_user_fd_node *node;
362
363 node = svc_userfds[one_fd].lnk.next;
364 while ((_svc_user_link *) node
365 != (_svc_user_link *) &(svc_userfds[one_fd])) {
366 _svc_free_id(node->id);
367 node = node->lnk.next;
368 free(node->lnk.previous);
369 }
370 }
371
372 free(user_fd_mgt_array);
373 user_fd_mgt_array = NULL;
374 first_free = (svc_input_id_t)-1;
375
376 free(svc_userfds);
377 svc_userfds = NULL;
378 svc_nuserfds = 0;
379 }
380 }
381
382 /*
383 * Remove all the callback associated with a fd => useful when the fd is
384 * closed for instance
385 */
386 static void
__svc_remove_input_of_fd(int fd)387 __svc_remove_input_of_fd(int fd)
388 {
389 _svc_user_fd_node *one_node;
390
391 if ((fd < 0) || (fd >= svc_nuserfds))
392 return;
393
394 one_node = svc_userfds[fd].lnk.next;
395 while ((_svc_user_link *) one_node
396 != (_svc_user_link *) &(svc_userfds[fd].lnk)) {
397 _svc_free_id(one_node->id);
398 one_node = one_node->lnk.next;
399 free(one_node->lnk.previous);
400 }
401
402 LIST_CLR(svc_userfds[fd]);
403 svc_userfds[fd].mask = 0;
404 }
405
406 /*
407 * Allow user to add an fd in the poll list. If it does not succeed, return
408 * -1. Otherwise, return a svc_id
409 */
410
411 svc_input_id_t
svc_add_input(int user_fd,unsigned int events,svc_callback_t user_callback,void * cookie)412 svc_add_input(int user_fd, unsigned int events,
413 svc_callback_t user_callback, void *cookie)
414 {
415 _svc_user_fd_node *new_node;
416
417 if (user_fd < 0) {
418 errno = EINVAL;
419 return ((svc_input_id_t)-1);
420 }
421
422 if ((events == 0x0000) ||
423 (events & ~(POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\
424 POLLWRBAND|POLLERR|POLLHUP|POLLNVAL))) {
425 errno = EINVAL;
426 return ((svc_input_id_t)-1);
427 }
428
429 (void) mutex_lock(&svc_userfds_lock);
430
431 if ((user_fd < svc_nuserfds) &&
432 (svc_userfds[user_fd].mask & events) != 0) {
433 /* Already registrated call-back */
434 errno = EEXIST;
435 (void) mutex_unlock(&svc_userfds_lock);
436 return ((svc_input_id_t)-1);
437 }
438
439 /* Handle memory allocation. */
440 if (user_fd >= svc_nuserfds) {
441 int oldSize = svc_nuserfds;
442 int i;
443
444 svc_nuserfds = (user_fd + 1) + USER_FD_INCREMENT;
445
446 svc_userfds = (_svc_user_fd_head *)
447 realloc(svc_userfds,
448 svc_nuserfds * sizeof (_svc_user_fd_head));
449
450 if (svc_userfds == NULL) {
451 syslog(LOG_ERR, "svc_add_input: out of memory");
452 errno = ENOMEM;
453 (void) mutex_unlock(&svc_userfds_lock);
454 return ((svc_input_id_t)-1);
455 }
456
457 for (i = oldSize; i < svc_nuserfds; i++) {
458 LIST_CLR(svc_userfds[i]);
459 svc_userfds[i].mask = 0;
460 }
461 }
462
463 new_node = malloc(sizeof (_svc_user_fd_node));
464 if (new_node == NULL) {
465 syslog(LOG_ERR, "svc_add_input: out of memory");
466 errno = ENOMEM;
467 (void) mutex_unlock(&svc_userfds_lock);
468 return ((svc_input_id_t)-1);
469 }
470
471 /* create a new node */
472 new_node->fd = user_fd;
473 new_node->events = events;
474 new_node->callback = user_callback;
475 new_node->cookie = cookie;
476
477 (void) _svc_attribute_new_id(new_node);
478
479 /* Add the new element at the beginning of the list. */
480 if (LIST_ISEMPTY(svc_userfds[user_fd])) {
481 svc_userfds[user_fd].lnk.previous = new_node;
482 }
483 new_node->lnk.next = svc_userfds[user_fd].lnk.next;
484 new_node->lnk.previous = (_svc_user_fd_node *)&(svc_userfds[user_fd]);
485
486 svc_userfds[user_fd].lnk.next = new_node;
487
488 /* refresh global mask for this file desciptor */
489 svc_userfds[user_fd].mask |= events;
490
491 /* refresh mask for the poll */
492 add_pollfd(user_fd, (svc_userfds[user_fd].mask));
493
494 (void) mutex_unlock(&svc_userfds_lock);
495 return (new_node->id);
496 }
497
498
499 int
svc_remove_input(svc_input_id_t id)500 svc_remove_input(svc_input_id_t id)
501 {
502 _svc_user_fd_node* node;
503 _svc_user_fd_node* next;
504 _svc_user_fd_node* previous;
505 int fd; /* caching optim */
506
507 (void) mutex_lock(&svc_userfds_lock);
508
509 /* Immediately update data for id management */
510 if (user_fd_mgt_array == NULL || id >= svc_nmgtuserfds ||
511 is_free_id(id)) {
512 errno = EINVAL;
513 (void) mutex_unlock(&svc_userfds_lock);
514 return (-1);
515 }
516
517 node = node_from_id(id);
518 assert(node != NULL);
519
520 _svc_free_id(id);
521 next = node->lnk.next;
522 previous = node->lnk.previous;
523 fd = node->fd; /* caching optim */
524
525 /* Remove this node from the list. */
526 previous->lnk.next = next;
527 next->lnk.previous = previous;
528
529 /* Remove the node flags from the global mask */
530 svc_userfds[fd].mask ^= node->events;
531
532 free(node);
533 if (svc_userfds[fd].mask == 0) {
534 LIST_CLR(svc_userfds[fd]);
535 assert(LIST_ISEMPTY(svc_userfds[fd]));
536 remove_pollfd(fd);
537 }
538 /* <=> CLEAN NEEDED TO SHRINK MEMORY USAGE */
539
540 (void) mutex_unlock(&svc_userfds_lock);
541 return (0);
542 }
543
544
545 /*
546 * Provides default service-side functions for authentication flavors
547 * that do not use all the fields in struct svc_auth_ops.
548 */
549
550 /*ARGSUSED*/
551 static int
authany_wrap(AUTH * auth,XDR * xdrs,xdrproc_t xfunc,caddr_t xwhere)552 authany_wrap(AUTH *auth, XDR *xdrs, xdrproc_t xfunc, caddr_t xwhere)
553 {
554 return (*xfunc)(xdrs, xwhere);
555 }
556
557 struct svc_auth_ops svc_auth_any_ops = {
558 authany_wrap,
559 authany_wrap,
560 };
561
562 /*
563 * Return pointer to server authentication structure.
564 */
565 SVCAUTH *
__svc_get_svcauth(SVCXPRT * xprt)566 __svc_get_svcauth(SVCXPRT *xprt)
567 {
568 /* LINTED pointer alignment */
569 return (&SVC_XP_AUTH(xprt));
570 }
571
572 /*
573 * A callback routine to cleanup after a procedure is executed.
574 */
575 void (*__proc_cleanup_cb)() = NULL;
576
577 void *
__svc_set_proc_cleanup_cb(void * cb)578 __svc_set_proc_cleanup_cb(void *cb)
579 {
580 void *tmp = (void *)__proc_cleanup_cb;
581
582 __proc_cleanup_cb = (void (*)())cb;
583 return (tmp);
584 }
585
586 /* *************** SVCXPRT related stuff **************** */
587
588
589 static int pollfd_shrinking = 1;
590
591
592 /*
593 * Add fd to svc_pollfd
594 */
595 static void
add_pollfd(int fd,short events)596 add_pollfd(int fd, short events)
597 {
598 if (fd < FD_SETSIZE) {
599 FD_SET(fd, &svc_fdset);
600 #if !defined(_LP64)
601 FD_SET(fd, &_new_svc_fdset);
602 #endif
603 svc_nfds++;
604 svc_nfds_set++;
605 if (fd >= svc_max_fd)
606 svc_max_fd = fd + 1;
607 }
608 if (fd >= svc_max_pollfd)
609 svc_max_pollfd = fd + 1;
610 if (svc_max_pollfd > svc_pollfd_allocd) {
611 int i = svc_pollfd_allocd;
612 pollfd_t *tmp;
613 do {
614 svc_pollfd_allocd += POLLFD_EXTEND;
615 } while (svc_max_pollfd > svc_pollfd_allocd);
616 tmp = realloc(svc_pollfd,
617 sizeof (pollfd_t) * svc_pollfd_allocd);
618 if (tmp != NULL) {
619 svc_pollfd = tmp;
620 for (; i < svc_pollfd_allocd; i++)
621 POLLFD_CLR(i, tmp);
622 } else {
623 /*
624 * give an error message; undo fdset setting
625 * above; reset the pollfd_shrinking flag.
626 * because of this poll will not be done
627 * on these fds.
628 */
629 if (fd < FD_SETSIZE) {
630 FD_CLR(fd, &svc_fdset);
631 #if !defined(_LP64)
632 FD_CLR(fd, &_new_svc_fdset);
633 #endif
634 svc_nfds--;
635 svc_nfds_set--;
636 if (fd == (svc_max_fd - 1))
637 svc_max_fd--;
638 }
639 if (fd == (svc_max_pollfd - 1))
640 svc_max_pollfd--;
641 pollfd_shrinking = 0;
642 syslog(LOG_ERR, "add_pollfd: out of memory");
643 _exit(1);
644 }
645 }
646 svc_pollfd[fd].fd = fd;
647 svc_pollfd[fd].events = events;
648 svc_npollfds++;
649 svc_npollfds_set++;
650 }
651
652 /*
653 * the fd is still active but only the bit in fdset is cleared.
654 * do not subtract svc_nfds or svc_npollfds
655 */
656 void
clear_pollfd(int fd)657 clear_pollfd(int fd)
658 {
659 if (fd < FD_SETSIZE && FD_ISSET(fd, &svc_fdset)) {
660 FD_CLR(fd, &svc_fdset);
661 #if !defined(_LP64)
662 FD_CLR(fd, &_new_svc_fdset);
663 #endif
664 svc_nfds_set--;
665 }
666 if (fd < svc_pollfd_allocd && POLLFD_ISSET(fd, svc_pollfd)) {
667 POLLFD_CLR(fd, svc_pollfd);
668 svc_npollfds_set--;
669 }
670 }
671
672 /*
673 * sets the bit in fdset for an active fd so that poll() is done for that
674 */
675 void
set_pollfd(int fd,short events)676 set_pollfd(int fd, short events)
677 {
678 if (fd < FD_SETSIZE) {
679 FD_SET(fd, &svc_fdset);
680 #if !defined(_LP64)
681 FD_SET(fd, &_new_svc_fdset);
682 #endif
683 svc_nfds_set++;
684 }
685 if (fd < svc_pollfd_allocd) {
686 svc_pollfd[fd].fd = fd;
687 svc_pollfd[fd].events = events;
688 svc_npollfds_set++;
689 }
690 }
691
692 /*
693 * remove a svc_pollfd entry; it does not shrink the memory
694 */
695 static void
remove_pollfd(int fd)696 remove_pollfd(int fd)
697 {
698 clear_pollfd(fd);
699 if (fd == (svc_max_fd - 1))
700 svc_max_fd--;
701 svc_nfds--;
702 if (fd == (svc_max_pollfd - 1))
703 svc_max_pollfd--;
704 svc_npollfds--;
705 }
706
707 /*
708 * delete a svc_pollfd entry; it shrinks the memory
709 * use remove_pollfd if you do not want to shrink
710 */
711 static void
delete_pollfd(int fd)712 delete_pollfd(int fd)
713 {
714 remove_pollfd(fd);
715 if (pollfd_shrinking && svc_max_pollfd <
716 (svc_pollfd_allocd - POLLFD_SHRINK)) {
717 do {
718 svc_pollfd_allocd -= POLLFD_SHRINK;
719 } while (svc_max_pollfd < (svc_pollfd_allocd - POLLFD_SHRINK));
720 svc_pollfd = realloc(svc_pollfd,
721 sizeof (pollfd_t) * svc_pollfd_allocd);
722 if (svc_pollfd == NULL) {
723 syslog(LOG_ERR, "delete_pollfd: out of memory");
724 _exit(1);
725 }
726 }
727 }
728
729
730 /*
731 * Activate a transport handle.
732 */
733 void
xprt_register(const SVCXPRT * xprt)734 xprt_register(const SVCXPRT *xprt)
735 {
736 int fd = xprt->xp_fd;
737 #ifdef CALLBACK
738 extern void (*_svc_getreqset_proc)();
739 #endif
740 /* VARIABLES PROTECTED BY svc_fd_lock: svc_xports, svc_fdset */
741
742 (void) rw_wrlock(&svc_fd_lock);
743 if (svc_xports == NULL) {
744 /* allocate some small amount first */
745 svc_xports = calloc(FD_INCREMENT, sizeof (SVCXPRT *));
746 if (svc_xports == NULL) {
747 syslog(LOG_ERR, "xprt_register: out of memory");
748 _exit(1);
749 }
750 nsvc_xports = FD_INCREMENT;
751
752 #ifdef CALLBACK
753 /*
754 * XXX: This code does not keep track of the server state.
755 *
756 * This provides for callback support. When a client
757 * recv's a call from another client on the server fd's,
758 * it calls _svc_getreqset_proc() which would return
759 * after serving all the server requests. Also look under
760 * clnt_dg.c and clnt_vc.c (clnt_call part of it)
761 */
762 _svc_getreqset_proc = svc_getreq_poll;
763 #endif
764 }
765
766 while (fd >= nsvc_xports) {
767 SVCXPRT **tmp_xprts = svc_xports;
768
769 /* time to expand svc_xprts */
770 tmp_xprts = realloc(svc_xports,
771 sizeof (SVCXPRT *) * (nsvc_xports + FD_INCREMENT));
772 if (tmp_xprts == NULL) {
773 syslog(LOG_ERR, "xprt_register : out of memory.");
774 _exit(1);
775 }
776
777 svc_xports = tmp_xprts;
778 (void) memset(&svc_xports[nsvc_xports], 0,
779 sizeof (SVCXPRT *) * FD_INCREMENT);
780 nsvc_xports += FD_INCREMENT;
781 }
782
783 svc_xports[fd] = (SVCXPRT *)xprt;
784
785 add_pollfd(fd, MASKVAL);
786
787 if (svc_polling) {
788 char dummy;
789
790 /*
791 * This happens only in one of the MT modes.
792 * Wake up poller.
793 */
794 (void) write(svc_pipe[1], &dummy, sizeof (dummy));
795 }
796 /*
797 * If already dispatching door based services, start
798 * dispatching TLI based services now.
799 */
800 (void) mutex_lock(&svc_door_mutex);
801 if (svc_ndoorfds > 0)
802 (void) cond_signal(&svc_door_waitcv);
803 (void) mutex_unlock(&svc_door_mutex);
804
805 if (svc_xdrs == NULL) {
806 /* allocate initial chunk */
807 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
808 if (svc_xdrs != NULL)
809 nsvc_xdrs = FD_INCREMENT;
810 else {
811 syslog(LOG_ERR, "xprt_register : out of memory.");
812 _exit(1);
813 }
814 }
815 (void) rw_unlock(&svc_fd_lock);
816 }
817
818 /*
819 * De-activate a transport handle.
820 */
821 void
__xprt_unregister_private(const SVCXPRT * xprt,bool_t lock_not_held)822 __xprt_unregister_private(const SVCXPRT *xprt, bool_t lock_not_held)
823 {
824 int fd = xprt->xp_fd;
825
826 if (lock_not_held)
827 (void) rw_wrlock(&svc_fd_lock);
828 if ((fd < nsvc_xports) && (svc_xports[fd] == xprt)) {
829 svc_xports[fd] = NULL;
830 delete_pollfd(fd);
831 }
832 if (lock_not_held)
833 (void) rw_unlock(&svc_fd_lock);
834 __svc_rm_from_xlist(&_svc_xprtlist, xprt, &xprtlist_lock);
835 }
836
837 void
xprt_unregister(const SVCXPRT * xprt)838 xprt_unregister(const SVCXPRT *xprt)
839 {
840 __xprt_unregister_private(xprt, TRUE);
841 }
842
843 /* ********************** CALLOUT list related stuff ************* */
844
845 /*
846 * Add a service program to the callout list.
847 * The dispatch routine will be called when a rpc request for this
848 * program number comes in.
849 */
850 bool_t
svc_reg(const SVCXPRT * xprt,const rpcprog_t prog,const rpcvers_t vers,void (* dispatch)(),const struct netconfig * nconf)851 svc_reg(const SVCXPRT *xprt, const rpcprog_t prog, const rpcvers_t vers,
852 void (*dispatch)(), const struct netconfig *nconf)
853 {
854 struct svc_callout *prev;
855 struct svc_callout *s, **s2;
856 struct netconfig *tnconf;
857 char *netid = NULL;
858 int flag = 0;
859
860 /* VARIABLES PROTECTED BY svc_lock: s, prev, svc_head */
861
862 if (xprt->xp_netid) {
863 netid = strdup(xprt->xp_netid);
864 flag = 1;
865 } else if (nconf && nconf->nc_netid) {
866 netid = strdup(nconf->nc_netid);
867 flag = 1;
868 } else if ((tnconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
869 != NULL) {
870 netid = strdup(tnconf->nc_netid);
871 flag = 1;
872 freenetconfigent(tnconf);
873 } /* must have been created with svc_raw_create */
874 if ((netid == NULL) && (flag == 1))
875 return (FALSE);
876
877 (void) rw_wrlock(&svc_lock);
878 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
879 if (netid)
880 free(netid);
881 if (s->sc_dispatch == dispatch)
882 goto rpcb_it; /* he is registering another xptr */
883 (void) rw_unlock(&svc_lock);
884 return (FALSE);
885 }
886 s = malloc(sizeof (struct svc_callout));
887 if (s == NULL) {
888 if (netid)
889 free(netid);
890 (void) rw_unlock(&svc_lock);
891 return (FALSE);
892 }
893
894 s->sc_prog = prog;
895 s->sc_vers = vers;
896 s->sc_dispatch = dispatch;
897 s->sc_netid = netid;
898 s->sc_next = NULL;
899
900 /*
901 * The ordering of transports is such that the most frequently used
902 * one appears first. So add the new entry to the end of the list.
903 */
904 for (s2 = &svc_head; *s2 != NULL; s2 = &(*s2)->sc_next)
905 ;
906 *s2 = s;
907
908 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
909 if ((((SVCXPRT *)xprt)->xp_netid = strdup(netid)) == NULL) {
910 syslog(LOG_ERR, "svc_reg : strdup failed.");
911 free(netid);
912 free(s);
913 *s2 = NULL;
914 (void) rw_unlock(&svc_lock);
915 return (FALSE);
916 }
917
918 rpcb_it:
919 (void) rw_unlock(&svc_lock);
920
921 /* now register the information with the local binder service */
922 if (nconf)
923 return (rpcb_set(prog, vers, nconf, &xprt->xp_ltaddr));
924 return (TRUE);
925 /*NOTREACHED*/
926 }
927
928 /*
929 * Remove a service program from the callout list.
930 */
931 void
svc_unreg(const rpcprog_t prog,const rpcvers_t vers)932 svc_unreg(const rpcprog_t prog, const rpcvers_t vers)
933 {
934 struct svc_callout *prev;
935 struct svc_callout *s;
936
937 /* unregister the information anyway */
938 (void) rpcb_unset(prog, vers, NULL);
939
940 (void) rw_wrlock(&svc_lock);
941 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
942 if (prev == NULL_SVC) {
943 svc_head = s->sc_next;
944 } else {
945 prev->sc_next = s->sc_next;
946 }
947 s->sc_next = NULL_SVC;
948 if (s->sc_netid)
949 free(s->sc_netid);
950 free(s);
951 }
952 (void) rw_unlock(&svc_lock);
953 }
954
955 #ifdef PORTMAP
956 /*
957 * Add a service program to the callout list.
958 * The dispatch routine will be called when a rpc request for this
959 * program number comes in.
960 * For version 2 portmappers.
961 */
962 bool_t
svc_register(SVCXPRT * xprt,rpcprog_t prog,rpcvers_t vers,void (* dispatch)(),int protocol)963 svc_register(SVCXPRT *xprt, rpcprog_t prog, rpcvers_t vers,
964 void (*dispatch)(), int protocol)
965 {
966 struct svc_callout *prev;
967 struct svc_callout *s;
968 struct netconfig *nconf;
969 char *netid = NULL;
970 int flag = 0;
971
972 if (xprt->xp_netid) {
973 netid = strdup(xprt->xp_netid);
974 flag = 1;
975 } else if ((ioctl(xprt->xp_fd, I_FIND, "timod") > 0) && ((nconf =
976 __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type)) != NULL)) {
977 /* fill in missing netid field in SVCXPRT */
978 netid = strdup(nconf->nc_netid);
979 flag = 1;
980 freenetconfigent(nconf);
981 } /* must be svc_raw_create */
982
983 if ((netid == NULL) && (flag == 1))
984 return (FALSE);
985
986 (void) rw_wrlock(&svc_lock);
987 if ((s = svc_find(prog, vers, &prev, netid)) != NULL_SVC) {
988 if (netid)
989 free(netid);
990 if (s->sc_dispatch == dispatch)
991 goto pmap_it; /* he is registering another xptr */
992 (void) rw_unlock(&svc_lock);
993 return (FALSE);
994 }
995 s = malloc(sizeof (struct svc_callout));
996 if (s == (struct svc_callout *)0) {
997 if (netid)
998 free(netid);
999 (void) rw_unlock(&svc_lock);
1000 return (FALSE);
1001 }
1002 s->sc_prog = prog;
1003 s->sc_vers = vers;
1004 s->sc_dispatch = dispatch;
1005 s->sc_netid = netid;
1006 s->sc_next = svc_head;
1007 svc_head = s;
1008
1009 if ((xprt->xp_netid == NULL) && (flag == 1) && netid)
1010 if ((xprt->xp_netid = strdup(netid)) == NULL) {
1011 syslog(LOG_ERR, "svc_register : strdup failed.");
1012 free(netid);
1013 svc_head = s->sc_next;
1014 free(s);
1015 (void) rw_unlock(&svc_lock);
1016 return (FALSE);
1017 }
1018
1019 pmap_it:
1020 (void) rw_unlock(&svc_lock);
1021 /* now register the information with the local binder service */
1022 if (protocol)
1023 return (pmap_set(prog, vers, protocol, xprt->xp_port));
1024 return (TRUE);
1025 }
1026
1027 /*
1028 * Remove a service program from the callout list.
1029 * For version 2 portmappers.
1030 */
1031 void
svc_unregister(rpcprog_t prog,rpcvers_t vers)1032 svc_unregister(rpcprog_t prog, rpcvers_t vers)
1033 {
1034 struct svc_callout *prev;
1035 struct svc_callout *s;
1036
1037 (void) rw_wrlock(&svc_lock);
1038 while ((s = svc_find(prog, vers, &prev, NULL)) != NULL_SVC) {
1039 if (prev == NULL_SVC) {
1040 svc_head = s->sc_next;
1041 } else {
1042 prev->sc_next = s->sc_next;
1043 }
1044 s->sc_next = NULL_SVC;
1045 if (s->sc_netid)
1046 free(s->sc_netid);
1047 free(s);
1048 /* unregister the information with the local binder service */
1049 (void) pmap_unset(prog, vers);
1050 }
1051 (void) rw_unlock(&svc_lock);
1052 }
1053 #endif /* PORTMAP */
1054
1055 /*
1056 * Search the callout list for a program number, return the callout
1057 * struct.
1058 * Also check for transport as well. Many routines such as svc_unreg
1059 * dont give any corresponding transport, so dont check for transport if
1060 * netid == NULL
1061 */
1062 static struct svc_callout *
svc_find(rpcprog_t prog,rpcvers_t vers,struct svc_callout ** prev,char * netid)1063 svc_find(rpcprog_t prog, rpcvers_t vers, struct svc_callout **prev, char *netid)
1064 {
1065 struct svc_callout *s, *p;
1066
1067 /* WRITE LOCK HELD ON ENTRY: svc_lock */
1068
1069 /* assert(RW_WRITE_HELD(&svc_lock)); */
1070 p = NULL_SVC;
1071 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1072 if (((s->sc_prog == prog) && (s->sc_vers == vers)) &&
1073 ((netid == NULL) || (s->sc_netid == NULL) ||
1074 (strcmp(netid, s->sc_netid) == 0)))
1075 break;
1076 p = s;
1077 }
1078 *prev = p;
1079 return (s);
1080 }
1081
1082
1083 /* ******************* REPLY GENERATION ROUTINES ************ */
1084
1085 /*
1086 * Send a reply to an rpc request
1087 */
1088 bool_t
svc_sendreply(const SVCXPRT * xprt,const xdrproc_t xdr_results,const caddr_t xdr_location)1089 svc_sendreply(const SVCXPRT *xprt, const xdrproc_t xdr_results,
1090 const caddr_t xdr_location)
1091 {
1092 struct rpc_msg rply;
1093
1094 rply.rm_direction = REPLY;
1095 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1096 rply.acpted_rply.ar_verf = xprt->xp_verf;
1097 rply.acpted_rply.ar_stat = SUCCESS;
1098 rply.acpted_rply.ar_results.where = xdr_location;
1099 rply.acpted_rply.ar_results.proc = xdr_results;
1100 return (SVC_REPLY((SVCXPRT *)xprt, &rply));
1101 }
1102
1103 /*
1104 * No procedure error reply
1105 */
1106 void
svcerr_noproc(const SVCXPRT * xprt)1107 svcerr_noproc(const SVCXPRT *xprt)
1108 {
1109 struct rpc_msg rply;
1110
1111 rply.rm_direction = REPLY;
1112 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1113 rply.acpted_rply.ar_verf = xprt->xp_verf;
1114 rply.acpted_rply.ar_stat = PROC_UNAVAIL;
1115 SVC_REPLY((SVCXPRT *)xprt, &rply);
1116 }
1117
1118 /*
1119 * Can't decode args error reply
1120 */
1121 void
svcerr_decode(const SVCXPRT * xprt)1122 svcerr_decode(const SVCXPRT *xprt)
1123 {
1124 struct rpc_msg rply;
1125
1126 rply.rm_direction = REPLY;
1127 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1128 rply.acpted_rply.ar_verf = xprt->xp_verf;
1129 rply.acpted_rply.ar_stat = GARBAGE_ARGS;
1130 SVC_REPLY((SVCXPRT *)xprt, &rply);
1131 }
1132
1133 /*
1134 * Some system error
1135 */
1136 void
svcerr_systemerr(const SVCXPRT * xprt)1137 svcerr_systemerr(const SVCXPRT *xprt)
1138 {
1139 struct rpc_msg rply;
1140
1141 rply.rm_direction = REPLY;
1142 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1143 rply.acpted_rply.ar_verf = xprt->xp_verf;
1144 rply.acpted_rply.ar_stat = SYSTEM_ERR;
1145 SVC_REPLY((SVCXPRT *)xprt, &rply);
1146 }
1147
1148 /*
1149 * Tell RPC package to not complain about version errors to the client. This
1150 * is useful when revving broadcast protocols that sit on a fixed address.
1151 * There is really one (or should be only one) example of this kind of
1152 * protocol: the portmapper (or rpc binder).
1153 */
1154 void
__svc_versquiet_on(const SVCXPRT * xprt)1155 __svc_versquiet_on(const SVCXPRT *xprt)
1156 {
1157 /* LINTED pointer alignment */
1158 svc_flags(xprt) |= SVC_VERSQUIET;
1159 }
1160
1161 void
__svc_versquiet_off(const SVCXPRT * xprt)1162 __svc_versquiet_off(const SVCXPRT *xprt)
1163 {
1164 /* LINTED pointer alignment */
1165 svc_flags(xprt) &= ~SVC_VERSQUIET;
1166 }
1167
1168 void
svc_versquiet(const SVCXPRT * xprt)1169 svc_versquiet(const SVCXPRT *xprt)
1170 {
1171 __svc_versquiet_on(xprt);
1172 }
1173
1174 int
__svc_versquiet_get(const SVCXPRT * xprt)1175 __svc_versquiet_get(const SVCXPRT *xprt)
1176 {
1177 /* LINTED pointer alignment */
1178 return (svc_flags(xprt) & SVC_VERSQUIET);
1179 }
1180
1181 /*
1182 * Authentication error reply
1183 */
1184 void
svcerr_auth(const SVCXPRT * xprt,const enum auth_stat why)1185 svcerr_auth(const SVCXPRT *xprt, const enum auth_stat why)
1186 {
1187 struct rpc_msg rply;
1188
1189 rply.rm_direction = REPLY;
1190 rply.rm_reply.rp_stat = MSG_DENIED;
1191 rply.rjcted_rply.rj_stat = AUTH_ERROR;
1192 rply.rjcted_rply.rj_why = why;
1193 SVC_REPLY((SVCXPRT *)xprt, &rply);
1194 }
1195
1196 /*
1197 * Auth too weak error reply
1198 */
1199 void
svcerr_weakauth(const SVCXPRT * xprt)1200 svcerr_weakauth(const SVCXPRT *xprt)
1201 {
1202 svcerr_auth(xprt, AUTH_TOOWEAK);
1203 }
1204
1205 /*
1206 * Program unavailable error reply
1207 */
1208 void
svcerr_noprog(const SVCXPRT * xprt)1209 svcerr_noprog(const SVCXPRT *xprt)
1210 {
1211 struct rpc_msg rply;
1212
1213 rply.rm_direction = REPLY;
1214 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1215 rply.acpted_rply.ar_verf = xprt->xp_verf;
1216 rply.acpted_rply.ar_stat = PROG_UNAVAIL;
1217 SVC_REPLY((SVCXPRT *)xprt, &rply);
1218 }
1219
1220 /*
1221 * Program version mismatch error reply
1222 */
1223 void
svcerr_progvers(const SVCXPRT * xprt,const rpcvers_t low_vers,const rpcvers_t high_vers)1224 svcerr_progvers(const SVCXPRT *xprt, const rpcvers_t low_vers,
1225 const rpcvers_t high_vers)
1226 {
1227 struct rpc_msg rply;
1228
1229 rply.rm_direction = REPLY;
1230 rply.rm_reply.rp_stat = MSG_ACCEPTED;
1231 rply.acpted_rply.ar_verf = xprt->xp_verf;
1232 rply.acpted_rply.ar_stat = PROG_MISMATCH;
1233 rply.acpted_rply.ar_vers.low = low_vers;
1234 rply.acpted_rply.ar_vers.high = high_vers;
1235 SVC_REPLY((SVCXPRT *)xprt, &rply);
1236 }
1237
1238 /* ******************* SERVER INPUT STUFF ******************* */
1239
1240 /*
1241 * Get server side input from some transport.
1242 *
1243 * Statement of authentication parameters management:
1244 * This function owns and manages all authentication parameters, specifically
1245 * the "raw" parameters (msg.rm_call.cb_cred and msg.rm_call.cb_verf) and
1246 * the "cooked" credentials (rqst->rq_clntcred).
1247 * However, this function does not know the structure of the cooked
1248 * credentials, so it make the following assumptions:
1249 * a) the structure is contiguous (no pointers), and
1250 * b) the cred structure size does not exceed RQCRED_SIZE bytes.
1251 * In all events, all three parameters are freed upon exit from this routine.
1252 * The storage is trivially management on the call stack in user land, but
1253 * is mallocated in kernel land.
1254 */
1255
1256 void
svc_getreq(int rdfds)1257 svc_getreq(int rdfds)
1258 {
1259 fd_set readfds;
1260
1261 FD_ZERO(&readfds);
1262 readfds.fds_bits[0] = rdfds;
1263 svc_getreqset(&readfds);
1264 }
1265
1266 void
svc_getreqset(fd_set * readfds)1267 svc_getreqset(fd_set *readfds)
1268 {
1269 int i;
1270
1271 for (i = 0; i < svc_max_fd; i++) {
1272 /* fd has input waiting */
1273 if (FD_ISSET(i, readfds))
1274 svc_getreq_common(i);
1275 }
1276 }
1277
1278 void
svc_getreq_poll(struct pollfd * pfdp,const int pollretval)1279 svc_getreq_poll(struct pollfd *pfdp, const int pollretval)
1280 {
1281 int i;
1282 int fds_found;
1283
1284 for (i = fds_found = 0; fds_found < pollretval; i++) {
1285 struct pollfd *p = &pfdp[i];
1286
1287 if (p->revents) {
1288 /* fd has input waiting */
1289 fds_found++;
1290 /*
1291 * We assume that this function is only called
1292 * via someone select()ing from svc_fdset or
1293 * poll()ing from svc_pollset[]. Thus it's safe
1294 * to handle the POLLNVAL event by simply turning
1295 * the corresponding bit off in svc_fdset. The
1296 * svc_pollset[] array is derived from svc_fdset
1297 * and so will also be updated eventually.
1298 *
1299 * XXX Should we do an xprt_unregister() instead?
1300 */
1301 /* Handle user callback */
1302 if (__is_a_userfd(p->fd) == TRUE) {
1303 (void) rw_rdlock(&svc_fd_lock);
1304 __svc_getreq_user(p);
1305 (void) rw_unlock(&svc_fd_lock);
1306 } else {
1307 if (p->revents & POLLNVAL) {
1308 (void) rw_wrlock(&svc_fd_lock);
1309 remove_pollfd(p->fd); /* XXX */
1310 (void) rw_unlock(&svc_fd_lock);
1311 } else {
1312 svc_getreq_common(p->fd);
1313 }
1314 }
1315 }
1316 }
1317 }
1318
1319 void
svc_getreq_common(const int fd)1320 svc_getreq_common(const int fd)
1321 {
1322 SVCXPRT *xprt;
1323 enum xprt_stat stat;
1324 struct rpc_msg *msg;
1325 struct svc_req *r;
1326 char *cred_area;
1327
1328 (void) rw_rdlock(&svc_fd_lock);
1329
1330 /* HANDLE USER CALLBACK */
1331 if (__is_a_userfd(fd) == TRUE) {
1332 struct pollfd virtual_fd;
1333
1334 virtual_fd.events = virtual_fd.revents = (short)0xFFFF;
1335 virtual_fd.fd = fd;
1336 __svc_getreq_user(&virtual_fd);
1337 (void) rw_unlock(&svc_fd_lock);
1338 return;
1339 }
1340
1341 /*
1342 * The transport associated with this fd could have been
1343 * removed from svc_timeout_nonblock_xprt_and_LRU, for instance.
1344 * This can happen if two or more fds get read events and are
1345 * passed to svc_getreq_poll/set, the first fd is seviced by
1346 * the dispatch routine and cleans up any dead transports. If
1347 * one of the dead transports removed is the other fd that
1348 * had a read event then svc_getreq_common() will be called with no
1349 * xprt associated with the fd that had the original read event.
1350 */
1351 if ((fd >= nsvc_xports) || (xprt = svc_xports[fd]) == NULL) {
1352 (void) rw_unlock(&svc_fd_lock);
1353 return;
1354 }
1355 (void) rw_unlock(&svc_fd_lock);
1356 /* LINTED pointer alignment */
1357 msg = SVCEXT(xprt)->msg;
1358 /* LINTED pointer alignment */
1359 r = SVCEXT(xprt)->req;
1360 /* LINTED pointer alignment */
1361 cred_area = SVCEXT(xprt)->cred_area;
1362 msg->rm_call.cb_cred.oa_base = cred_area;
1363 msg->rm_call.cb_verf.oa_base = &(cred_area[MAX_AUTH_BYTES]);
1364 r->rq_clntcred = &(cred_area[2 * MAX_AUTH_BYTES]);
1365
1366 /* receive msgs from xprtprt (support batch calls) */
1367 do {
1368 bool_t dispatch;
1369
1370 if (dispatch = SVC_RECV(xprt, msg))
1371 (void) _svc_prog_dispatch(xprt, msg, r);
1372 /*
1373 * Check if the xprt has been disconnected in a recursive call
1374 * in the service dispatch routine. If so, then break
1375 */
1376 (void) rw_rdlock(&svc_fd_lock);
1377 if (xprt != svc_xports[fd]) {
1378 (void) rw_unlock(&svc_fd_lock);
1379 break;
1380 }
1381 (void) rw_unlock(&svc_fd_lock);
1382
1383 /*
1384 * Call cleanup procedure if set.
1385 */
1386 if (__proc_cleanup_cb != NULL && dispatch)
1387 (*__proc_cleanup_cb)(xprt);
1388
1389 if ((stat = SVC_STAT(xprt)) == XPRT_DIED) {
1390 SVC_DESTROY(xprt);
1391 break;
1392 }
1393 } while (stat == XPRT_MOREREQS);
1394 }
1395
1396 int
_svc_prog_dispatch(SVCXPRT * xprt,struct rpc_msg * msg,struct svc_req * r)1397 _svc_prog_dispatch(SVCXPRT *xprt, struct rpc_msg *msg, struct svc_req *r)
1398 {
1399 struct svc_callout *s;
1400 enum auth_stat why;
1401 int prog_found;
1402 rpcvers_t low_vers;
1403 rpcvers_t high_vers;
1404 void (*disp_fn)();
1405
1406 r->rq_xprt = xprt;
1407 r->rq_prog = msg->rm_call.cb_prog;
1408 r->rq_vers = msg->rm_call.cb_vers;
1409 r->rq_proc = msg->rm_call.cb_proc;
1410 r->rq_cred = msg->rm_call.cb_cred;
1411 /* LINTED pointer alignment */
1412 SVC_XP_AUTH(r->rq_xprt).svc_ah_ops = svc_auth_any_ops;
1413 /* LINTED pointer alignment */
1414 SVC_XP_AUTH(r->rq_xprt).svc_ah_private = NULL;
1415
1416 /* first authenticate the message */
1417 /* Check for null flavor and bypass these calls if possible */
1418
1419 if (msg->rm_call.cb_cred.oa_flavor == AUTH_NULL) {
1420 r->rq_xprt->xp_verf.oa_flavor = _null_auth.oa_flavor;
1421 r->rq_xprt->xp_verf.oa_length = 0;
1422 } else {
1423 bool_t no_dispatch;
1424
1425 if ((why = __gss_authenticate(r, msg,
1426 &no_dispatch)) != AUTH_OK) {
1427 svcerr_auth(xprt, why);
1428 return (0);
1429 }
1430 if (no_dispatch)
1431 return (0);
1432 }
1433 /* match message with a registered service */
1434 prog_found = FALSE;
1435 low_vers = (rpcvers_t)(0 - 1);
1436 high_vers = 0;
1437 (void) rw_rdlock(&svc_lock);
1438 for (s = svc_head; s != NULL_SVC; s = s->sc_next) {
1439 if (s->sc_prog == r->rq_prog) {
1440 prog_found = TRUE;
1441 if (s->sc_vers == r->rq_vers) {
1442 if ((xprt->xp_netid == NULL) ||
1443 (s->sc_netid == NULL) ||
1444 (strcmp(xprt->xp_netid,
1445 s->sc_netid) == 0)) {
1446 disp_fn = (*s->sc_dispatch);
1447 (void) rw_unlock(&svc_lock);
1448 disp_fn(r, xprt);
1449 return (1);
1450 }
1451 prog_found = FALSE;
1452 }
1453 if (s->sc_vers < low_vers)
1454 low_vers = s->sc_vers;
1455 if (s->sc_vers > high_vers)
1456 high_vers = s->sc_vers;
1457 } /* found correct program */
1458 }
1459 (void) rw_unlock(&svc_lock);
1460
1461 /*
1462 * if we got here, the program or version
1463 * is not served ...
1464 */
1465 if (prog_found) {
1466 /* LINTED pointer alignment */
1467 if (!version_keepquiet(xprt))
1468 svcerr_progvers(xprt, low_vers, high_vers);
1469 } else {
1470 svcerr_noprog(xprt);
1471 }
1472 return (0);
1473 }
1474
1475 /* ******************* SVCXPRT allocation and deallocation ***************** */
1476
1477 /*
1478 * svc_xprt_alloc() - allocate a service transport handle
1479 */
1480 SVCXPRT *
svc_xprt_alloc(void)1481 svc_xprt_alloc(void)
1482 {
1483 SVCXPRT *xprt = NULL;
1484 SVCXPRT_EXT *xt = NULL;
1485 SVCXPRT_LIST *xlist = NULL;
1486 struct rpc_msg *msg = NULL;
1487 struct svc_req *req = NULL;
1488 char *cred_area = NULL;
1489
1490 if ((xprt = calloc(1, sizeof (SVCXPRT))) == NULL)
1491 goto err_exit;
1492
1493 if ((xt = calloc(1, sizeof (SVCXPRT_EXT))) == NULL)
1494 goto err_exit;
1495 xprt->xp_p3 = (caddr_t)xt; /* SVCEXT(xprt) = xt */
1496
1497 if ((xlist = calloc(1, sizeof (SVCXPRT_LIST))) == NULL)
1498 goto err_exit;
1499 xt->my_xlist = xlist;
1500 xlist->xprt = xprt;
1501
1502 if ((msg = malloc(sizeof (struct rpc_msg))) == NULL)
1503 goto err_exit;
1504 xt->msg = msg;
1505
1506 if ((req = malloc(sizeof (struct svc_req))) == NULL)
1507 goto err_exit;
1508 xt->req = req;
1509
1510 if ((cred_area = malloc(2*MAX_AUTH_BYTES + RQCRED_SIZE)) == NULL)
1511 goto err_exit;
1512 xt->cred_area = cred_area;
1513
1514 /* LINTED pointer alignment */
1515 (void) mutex_init(&svc_send_mutex(xprt), USYNC_THREAD, (void *)0);
1516 return (xprt);
1517
1518 err_exit:
1519 svc_xprt_free(xprt);
1520 return (NULL);
1521 }
1522
1523
1524 /*
1525 * svc_xprt_free() - free a service handle
1526 */
1527 void
svc_xprt_free(SVCXPRT * xprt)1528 svc_xprt_free(SVCXPRT *xprt)
1529 {
1530 /* LINTED pointer alignment */
1531 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
1532 SVCXPRT_LIST *my_xlist = xt ? xt->my_xlist: NULL;
1533 struct rpc_msg *msg = xt ? xt->msg : NULL;
1534 struct svc_req *req = xt ? xt->req : NULL;
1535 char *cred_area = xt ? xt->cred_area : NULL;
1536
1537 if (xprt)
1538 free(xprt);
1539 if (xt)
1540 free(xt);
1541 if (my_xlist)
1542 free(my_xlist);
1543 if (msg)
1544 free(msg);
1545 if (req)
1546 free(req);
1547 if (cred_area)
1548 free(cred_area);
1549 }
1550
1551
1552 /*
1553 * svc_xprt_destroy() - free parent and child xprt list
1554 */
1555 void
svc_xprt_destroy(SVCXPRT * xprt)1556 svc_xprt_destroy(SVCXPRT *xprt)
1557 {
1558 SVCXPRT_LIST *xlist, *xnext = NULL;
1559 int type;
1560
1561 /* LINTED pointer alignment */
1562 if (SVCEXT(xprt)->parent)
1563 /* LINTED pointer alignment */
1564 xprt = SVCEXT(xprt)->parent;
1565 /* LINTED pointer alignment */
1566 type = svc_type(xprt);
1567 /* LINTED pointer alignment */
1568 for (xlist = SVCEXT(xprt)->my_xlist; xlist != NULL; xlist = xnext) {
1569 xnext = xlist->next;
1570 xprt = xlist->xprt;
1571 switch (type) {
1572 case SVC_DGRAM:
1573 svc_dg_xprtfree(xprt);
1574 break;
1575 case SVC_RENDEZVOUS:
1576 svc_vc_xprtfree(xprt);
1577 break;
1578 case SVC_CONNECTION:
1579 svc_fd_xprtfree(xprt);
1580 break;
1581 case SVC_DOOR:
1582 svc_door_xprtfree(xprt);
1583 break;
1584 }
1585 }
1586 }
1587
1588
1589 /*
1590 * svc_copy() - make a copy of parent
1591 */
1592 SVCXPRT *
svc_copy(SVCXPRT * xprt)1593 svc_copy(SVCXPRT *xprt)
1594 {
1595 /* LINTED pointer alignment */
1596 switch (svc_type(xprt)) {
1597 case SVC_DGRAM:
1598 return (svc_dg_xprtcopy(xprt));
1599 case SVC_RENDEZVOUS:
1600 return (svc_vc_xprtcopy(xprt));
1601 case SVC_CONNECTION:
1602 return (svc_fd_xprtcopy(xprt));
1603 }
1604 return (NULL);
1605 }
1606
1607
1608 /*
1609 * _svc_destroy_private() - private SVC_DESTROY interface
1610 */
1611 void
_svc_destroy_private(SVCXPRT * xprt)1612 _svc_destroy_private(SVCXPRT *xprt)
1613 {
1614 /* LINTED pointer alignment */
1615 switch (svc_type(xprt)) {
1616 case SVC_DGRAM:
1617 _svc_dg_destroy_private(xprt);
1618 break;
1619 case SVC_RENDEZVOUS:
1620 case SVC_CONNECTION:
1621 _svc_vc_destroy_private(xprt, TRUE);
1622 break;
1623 }
1624 }
1625
1626 /*
1627 * svc_get_local_cred() - fetch local user credentials. This always
1628 * works over doors based transports. For local transports, this
1629 * does not yield correct results unless the __rpc_negotiate_uid()
1630 * call has been invoked to enable this feature.
1631 */
1632 bool_t
svc_get_local_cred(SVCXPRT * xprt,svc_local_cred_t * lcred)1633 svc_get_local_cred(SVCXPRT *xprt, svc_local_cred_t *lcred)
1634 {
1635 /* LINTED pointer alignment */
1636 if (svc_type(xprt) == SVC_DOOR)
1637 return (__svc_get_door_cred(xprt, lcred));
1638 return (__rpc_get_local_cred(xprt, lcred));
1639 }
1640
1641
1642 /* ******************* DUPLICATE ENTRY HANDLING ROUTINES ************** */
1643
1644 /*
1645 * the dup cacheing routines below provide a cache of received
1646 * transactions. rpc service routines can use this to detect
1647 * retransmissions and re-send a non-failure response. Uses a
1648 * lru scheme to find entries to get rid of entries in the cache,
1649 * though only DUP_DONE entries are placed on the lru list.
1650 * the routines were written towards development of a generic
1651 * SVC_DUP() interface, which can be expanded to encompass the
1652 * svc_dg_enablecache() routines as well. the cache is currently
1653 * private to the automounter.
1654 */
1655
1656
1657 /* dupcache header contains xprt specific information */
1658 struct dupcache {
1659 rwlock_t dc_lock;
1660 time_t dc_time;
1661 int dc_buckets;
1662 int dc_maxsz;
1663 int dc_basis;
1664 struct dupreq *dc_mru;
1665 struct dupreq **dc_hashtbl;
1666 };
1667
1668 /*
1669 * private duplicate cache request routines
1670 */
1671 static int __svc_dupcache_check(struct svc_req *, caddr_t *, uint_t *,
1672 struct dupcache *, uint32_t, uint32_t);
1673 static struct dupreq *__svc_dupcache_victim(struct dupcache *, time_t);
1674 static int __svc_dupcache_enter(struct svc_req *, struct dupreq *,
1675 struct dupcache *, uint32_t, uint32_t, time_t);
1676 static int __svc_dupcache_update(struct svc_req *, caddr_t, uint_t, int,
1677 struct dupcache *, uint32_t, uint32_t);
1678 #ifdef DUP_DEBUG
1679 static void __svc_dupcache_debug(struct dupcache *);
1680 #endif /* DUP_DEBUG */
1681
1682 /* default parameters for the dupcache */
1683 #define DUPCACHE_BUCKETS 257
1684 #define DUPCACHE_TIME 900
1685 #define DUPCACHE_MAXSZ INT_MAX
1686
1687 /*
1688 * __svc_dupcache_init(void *condition, int basis, char *xprt_cache)
1689 * initialize the duprequest cache and assign it to the xprt_cache
1690 * Use default values depending on the cache condition and basis.
1691 * return TRUE on success and FALSE on failure
1692 */
1693 bool_t
__svc_dupcache_init(void * condition,int basis,char ** xprt_cache)1694 __svc_dupcache_init(void *condition, int basis, char **xprt_cache)
1695 {
1696 static mutex_t initdc_lock = DEFAULTMUTEX;
1697 int i;
1698 struct dupcache *dc;
1699
1700 (void) mutex_lock(&initdc_lock);
1701 if (*xprt_cache != NULL) { /* do only once per xprt */
1702 (void) mutex_unlock(&initdc_lock);
1703 syslog(LOG_ERR,
1704 "__svc_dupcache_init: multiply defined dup cache");
1705 return (FALSE);
1706 }
1707
1708 switch (basis) {
1709 case DUPCACHE_FIXEDTIME:
1710 dc = malloc(sizeof (struct dupcache));
1711 if (dc == NULL) {
1712 (void) mutex_unlock(&initdc_lock);
1713 syslog(LOG_ERR,
1714 "__svc_dupcache_init: memory alloc failed");
1715 return (FALSE);
1716 }
1717 (void) rwlock_init(&(dc->dc_lock), USYNC_THREAD, NULL);
1718 if (condition != NULL)
1719 dc->dc_time = *((time_t *)condition);
1720 else
1721 dc->dc_time = DUPCACHE_TIME;
1722 dc->dc_buckets = DUPCACHE_BUCKETS;
1723 dc->dc_maxsz = DUPCACHE_MAXSZ;
1724 dc->dc_basis = basis;
1725 dc->dc_mru = NULL;
1726 dc->dc_hashtbl = malloc(dc->dc_buckets *
1727 sizeof (struct dupreq *));
1728 if (dc->dc_hashtbl == NULL) {
1729 free(dc);
1730 (void) mutex_unlock(&initdc_lock);
1731 syslog(LOG_ERR,
1732 "__svc_dupcache_init: memory alloc failed");
1733 return (FALSE);
1734 }
1735 for (i = 0; i < DUPCACHE_BUCKETS; i++)
1736 dc->dc_hashtbl[i] = NULL;
1737 *xprt_cache = (char *)dc;
1738 break;
1739 default:
1740 (void) mutex_unlock(&initdc_lock);
1741 syslog(LOG_ERR,
1742 "__svc_dupcache_init: undefined dup cache basis");
1743 return (FALSE);
1744 }
1745
1746 (void) mutex_unlock(&initdc_lock);
1747
1748 return (TRUE);
1749 }
1750
1751 /*
1752 * __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1753 * char *xprt_cache)
1754 * searches the request cache. Creates an entry and returns DUP_NEW if
1755 * the request is not found in the cache. If it is found, then it
1756 * returns the state of the request (in progress, drop, or done) and
1757 * also allocates, and passes back results to the user (if any) in
1758 * resp_buf, and its length in resp_bufsz. DUP_ERROR is returned on error.
1759 */
1760 int
__svc_dup(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,char * xprt_cache)1761 __svc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1762 char *xprt_cache)
1763 {
1764 uint32_t drxid, drhash;
1765 int rc;
1766 struct dupreq *dr = NULL;
1767 time_t timenow = time(NULL);
1768
1769 /* LINTED pointer alignment */
1770 struct dupcache *dc = (struct dupcache *)xprt_cache;
1771
1772 if (dc == NULL) {
1773 syslog(LOG_ERR, "__svc_dup: undefined cache");
1774 return (DUP_ERROR);
1775 }
1776
1777 /* get the xid of the request */
1778 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
1779 syslog(LOG_ERR, "__svc_dup: xid error");
1780 return (DUP_ERROR);
1781 }
1782 drhash = drxid % dc->dc_buckets;
1783
1784 if ((rc = __svc_dupcache_check(req, resp_buf, resp_bufsz, dc, drxid,
1785 drhash)) != DUP_NEW)
1786 return (rc);
1787
1788 if ((dr = __svc_dupcache_victim(dc, timenow)) == NULL)
1789 return (DUP_ERROR);
1790
1791 if ((rc = __svc_dupcache_enter(req, dr, dc, drxid, drhash, timenow))
1792 == DUP_ERROR)
1793 return (rc);
1794
1795 return (DUP_NEW);
1796 }
1797
1798
1799
1800 /*
1801 * __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf,
1802 * uint_t *resp_bufsz,truct dupcache *dc, uint32_t drxid,
1803 * uint32_t drhash)
1804 * Checks to see whether an entry already exists in the cache. If it does
1805 * copy back into the resp_buf, if appropriate. Return the status of
1806 * the request, or DUP_NEW if the entry is not in the cache
1807 */
1808 static int
__svc_dupcache_check(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz,struct dupcache * dc,uint32_t drxid,uint32_t drhash)1809 __svc_dupcache_check(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz,
1810 struct dupcache *dc, uint32_t drxid, uint32_t drhash)
1811 {
1812 struct dupreq *dr = NULL;
1813
1814 (void) rw_rdlock(&(dc->dc_lock));
1815 dr = dc->dc_hashtbl[drhash];
1816 while (dr != NULL) {
1817 if (dr->dr_xid == drxid &&
1818 dr->dr_proc == req->rq_proc &&
1819 dr->dr_prog == req->rq_prog &&
1820 dr->dr_vers == req->rq_vers &&
1821 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
1822 memcmp(dr->dr_addr.buf,
1823 req->rq_xprt->xp_rtaddr.buf,
1824 dr->dr_addr.len) == 0) { /* entry found */
1825 if (dr->dr_hash != drhash) {
1826 /* sanity check */
1827 (void) rw_unlock((&dc->dc_lock));
1828 syslog(LOG_ERR,
1829 "\n__svc_dupdone: hashing error");
1830 return (DUP_ERROR);
1831 }
1832
1833 /*
1834 * return results for requests on lru list, if
1835 * appropriate requests must be DUP_DROP or DUP_DONE
1836 * to have a result. A NULL buffer in the cache
1837 * implies no results were sent during dupdone.
1838 * A NULL buffer in the call implies not interested
1839 * in results.
1840 */
1841 if (((dr->dr_status == DUP_DONE) ||
1842 (dr->dr_status == DUP_DROP)) &&
1843 resp_buf != NULL &&
1844 dr->dr_resp.buf != NULL) {
1845 *resp_buf = malloc(dr->dr_resp.len);
1846 if (*resp_buf == NULL) {
1847 syslog(LOG_ERR,
1848 "__svc_dupcache_check: malloc failed");
1849 (void) rw_unlock(&(dc->dc_lock));
1850 return (DUP_ERROR);
1851 }
1852 (void) memset(*resp_buf, 0, dr->dr_resp.len);
1853 (void) memcpy(*resp_buf, dr->dr_resp.buf,
1854 dr->dr_resp.len);
1855 *resp_bufsz = dr->dr_resp.len;
1856 } else {
1857 /* no result */
1858 if (resp_buf)
1859 *resp_buf = NULL;
1860 if (resp_bufsz)
1861 *resp_bufsz = 0;
1862 }
1863 (void) rw_unlock(&(dc->dc_lock));
1864 return (dr->dr_status);
1865 }
1866 dr = dr->dr_chain;
1867 }
1868 (void) rw_unlock(&(dc->dc_lock));
1869 return (DUP_NEW);
1870 }
1871
1872 /*
1873 * __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1874 * Return a victim dupreq entry to the caller, depending on cache policy.
1875 */
1876 static struct dupreq *
__svc_dupcache_victim(struct dupcache * dc,time_t timenow)1877 __svc_dupcache_victim(struct dupcache *dc, time_t timenow)
1878 {
1879 struct dupreq *dr = NULL;
1880
1881 switch (dc->dc_basis) {
1882 case DUPCACHE_FIXEDTIME:
1883 /*
1884 * The hash policy is to free up a bit of the hash
1885 * table before allocating a new entry as the victim.
1886 * Freeing up the hash table each time should split
1887 * the cost of keeping the hash table clean among threads.
1888 * Note that only DONE or DROPPED entries are on the lru
1889 * list but we do a sanity check anyway.
1890 */
1891 (void) rw_wrlock(&(dc->dc_lock));
1892 while ((dc->dc_mru) && (dr = dc->dc_mru->dr_next) &&
1893 ((timenow - dr->dr_time) > dc->dc_time)) {
1894 /* clean and then free the entry */
1895 if (dr->dr_status != DUP_DONE &&
1896 dr->dr_status != DUP_DROP) {
1897 /*
1898 * The LRU list can't contain an
1899 * entry where the status is other than
1900 * DUP_DONE or DUP_DROP.
1901 */
1902 syslog(LOG_ERR,
1903 "__svc_dupcache_victim: bad victim");
1904 #ifdef DUP_DEBUG
1905 /*
1906 * Need to hold the reader/writers lock to
1907 * print the cache info, since we already
1908 * hold the writers lock, we shall continue
1909 * calling __svc_dupcache_debug()
1910 */
1911 __svc_dupcache_debug(dc);
1912 #endif /* DUP_DEBUG */
1913 (void) rw_unlock(&(dc->dc_lock));
1914 return (NULL);
1915 }
1916 /* free buffers */
1917 if (dr->dr_resp.buf) {
1918 free(dr->dr_resp.buf);
1919 dr->dr_resp.buf = NULL;
1920 }
1921 if (dr->dr_addr.buf) {
1922 free(dr->dr_addr.buf);
1923 dr->dr_addr.buf = NULL;
1924 }
1925
1926 /* unhash the entry */
1927 if (dr->dr_chain)
1928 dr->dr_chain->dr_prevchain = dr->dr_prevchain;
1929 if (dr->dr_prevchain)
1930 dr->dr_prevchain->dr_chain = dr->dr_chain;
1931 if (dc->dc_hashtbl[dr->dr_hash] == dr)
1932 dc->dc_hashtbl[dr->dr_hash] = dr->dr_chain;
1933
1934 /* modify the lru pointers */
1935 if (dc->dc_mru == dr) {
1936 dc->dc_mru = NULL;
1937 } else {
1938 dc->dc_mru->dr_next = dr->dr_next;
1939 dr->dr_next->dr_prev = dc->dc_mru;
1940 }
1941 free(dr);
1942 dr = NULL;
1943 }
1944 (void) rw_unlock(&(dc->dc_lock));
1945
1946 /*
1947 * Allocate and return new clean entry as victim
1948 */
1949 if ((dr = malloc(sizeof (*dr))) == NULL) {
1950 syslog(LOG_ERR,
1951 "__svc_dupcache_victim: malloc failed");
1952 return (NULL);
1953 }
1954 (void) memset(dr, 0, sizeof (*dr));
1955 return (dr);
1956 default:
1957 syslog(LOG_ERR,
1958 "__svc_dupcache_victim: undefined dup cache_basis");
1959 return (NULL);
1960 }
1961 }
1962
1963 /*
1964 * __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1965 * struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1966 * build new duprequest entry and then insert into the cache
1967 */
1968 static int
__svc_dupcache_enter(struct svc_req * req,struct dupreq * dr,struct dupcache * dc,uint32_t drxid,uint32_t drhash,time_t timenow)1969 __svc_dupcache_enter(struct svc_req *req, struct dupreq *dr,
1970 struct dupcache *dc, uint32_t drxid, uint32_t drhash, time_t timenow)
1971 {
1972 dr->dr_xid = drxid;
1973 dr->dr_prog = req->rq_prog;
1974 dr->dr_vers = req->rq_vers;
1975 dr->dr_proc = req->rq_proc;
1976 dr->dr_addr.maxlen = req->rq_xprt->xp_rtaddr.len;
1977 dr->dr_addr.len = dr->dr_addr.maxlen;
1978 if ((dr->dr_addr.buf = malloc(dr->dr_addr.maxlen)) == NULL) {
1979 syslog(LOG_ERR, "__svc_dupcache_enter: malloc failed");
1980 free(dr);
1981 return (DUP_ERROR);
1982 }
1983 (void) memset(dr->dr_addr.buf, 0, dr->dr_addr.len);
1984 (void) memcpy(dr->dr_addr.buf, req->rq_xprt->xp_rtaddr.buf,
1985 dr->dr_addr.len);
1986 dr->dr_resp.buf = NULL;
1987 dr->dr_resp.maxlen = 0;
1988 dr->dr_resp.len = 0;
1989 dr->dr_status = DUP_INPROGRESS;
1990 dr->dr_time = timenow;
1991 dr->dr_hash = drhash; /* needed for efficient victim cleanup */
1992
1993 /* place entry at head of hash table */
1994 (void) rw_wrlock(&(dc->dc_lock));
1995 dr->dr_chain = dc->dc_hashtbl[drhash];
1996 dr->dr_prevchain = NULL;
1997 if (dc->dc_hashtbl[drhash] != NULL)
1998 dc->dc_hashtbl[drhash]->dr_prevchain = dr;
1999 dc->dc_hashtbl[drhash] = dr;
2000 (void) rw_unlock(&(dc->dc_lock));
2001 return (DUP_NEW);
2002 }
2003
2004 /*
2005 * __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2006 * int status, char *xprt_cache)
2007 * Marks the request done (DUP_DONE or DUP_DROP) and stores the response.
2008 * Only DONE and DROP requests can be marked as done. Sets the lru pointers
2009 * to make the entry the most recently used. Returns DUP_ERROR or status.
2010 */
2011 int
__svc_dupdone(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,char * xprt_cache)2012 __svc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2013 int status, char *xprt_cache)
2014 {
2015 uint32_t drxid, drhash;
2016 int rc;
2017
2018 /* LINTED pointer alignment */
2019 struct dupcache *dc = (struct dupcache *)xprt_cache;
2020
2021 if (dc == NULL) {
2022 syslog(LOG_ERR, "__svc_dupdone: undefined cache");
2023 return (DUP_ERROR);
2024 }
2025
2026 if (status != DUP_DONE && status != DUP_DROP) {
2027 syslog(LOG_ERR, "__svc_dupdone: invalid dupdone status");
2028 syslog(LOG_ERR, " must be DUP_DONE or DUP_DROP");
2029 return (DUP_ERROR);
2030 }
2031
2032 /* find the xid of the entry in the cache */
2033 if (SVC_CONTROL(req->rq_xprt, SVCGET_XID, (void*)&drxid) == FALSE) {
2034 syslog(LOG_ERR, "__svc_dup: xid error");
2035 return (DUP_ERROR);
2036 }
2037 drhash = drxid % dc->dc_buckets;
2038
2039 /* update the status of the entry and result buffers, if required */
2040 if ((rc = __svc_dupcache_update(req, resp_buf, resp_bufsz, status,
2041 dc, drxid, drhash)) == DUP_ERROR) {
2042 syslog(LOG_ERR, "__svc_dupdone: cache entry error");
2043 return (DUP_ERROR);
2044 }
2045
2046 return (rc);
2047 }
2048
2049 /*
2050 * __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf,
2051 * uint_t resp_bufsz, int status, struct dupcache *dc, uint32_t drxid,
2052 * uint32_t drhash)
2053 * Check if entry exists in the dupcacache. If it does, update its status
2054 * and time and also its buffer, if appropriate. Its possible, but unlikely
2055 * for DONE requests to not exist in the cache. Return DUP_ERROR or status.
2056 */
2057 static int
__svc_dupcache_update(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status,struct dupcache * dc,uint32_t drxid,uint32_t drhash)2058 __svc_dupcache_update(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
2059 int status, struct dupcache *dc, uint32_t drxid, uint32_t drhash)
2060 {
2061 struct dupreq *dr = NULL;
2062 time_t timenow = time(NULL);
2063
2064 (void) rw_wrlock(&(dc->dc_lock));
2065 dr = dc->dc_hashtbl[drhash];
2066 while (dr != NULL) {
2067 if (dr->dr_xid == drxid &&
2068 dr->dr_proc == req->rq_proc &&
2069 dr->dr_prog == req->rq_prog &&
2070 dr->dr_vers == req->rq_vers &&
2071 dr->dr_addr.len == req->rq_xprt->xp_rtaddr.len &&
2072 memcmp(dr->dr_addr.buf,
2073 req->rq_xprt->xp_rtaddr.buf,
2074 dr->dr_addr.len) == 0) { /* entry found */
2075 if (dr->dr_hash != drhash) {
2076 /* sanity check */
2077 (void) rw_unlock(&(dc->dc_lock));
2078 syslog(LOG_ERR,
2079 "\n__svc_dupdone: hashing error");
2080 return (DUP_ERROR);
2081 }
2082
2083 /* store the results if bufer is not NULL */
2084 if (resp_buf != NULL) {
2085 if ((dr->dr_resp.buf =
2086 malloc(resp_bufsz)) == NULL) {
2087 (void) rw_unlock(&(dc->dc_lock));
2088 syslog(LOG_ERR,
2089 "__svc_dupdone: malloc failed");
2090 return (DUP_ERROR);
2091 }
2092 (void) memset(dr->dr_resp.buf, 0, resp_bufsz);
2093 (void) memcpy(dr->dr_resp.buf, resp_buf,
2094 (uint_t)resp_bufsz);
2095 dr->dr_resp.len = resp_bufsz;
2096 }
2097
2098 /* update status and done time */
2099 dr->dr_status = status;
2100 dr->dr_time = timenow;
2101
2102 /* move the entry to the mru position */
2103 if (dc->dc_mru == NULL) {
2104 dr->dr_next = dr;
2105 dr->dr_prev = dr;
2106 } else {
2107 dr->dr_next = dc->dc_mru->dr_next;
2108 dc->dc_mru->dr_next->dr_prev = dr;
2109 dr->dr_prev = dc->dc_mru;
2110 dc->dc_mru->dr_next = dr;
2111 }
2112 dc->dc_mru = dr;
2113
2114 (void) rw_unlock(&(dc->dc_lock));
2115 return (status);
2116 }
2117 dr = dr->dr_chain;
2118 }
2119 (void) rw_unlock(&(dc->dc_lock));
2120 syslog(LOG_ERR, "__svc_dupdone: entry not in dup cache");
2121 return (DUP_ERROR);
2122 }
2123
2124 #ifdef DUP_DEBUG
2125 /*
2126 * __svc_dupcache_debug(struct dupcache *dc)
2127 * print out the hash table stuff
2128 *
2129 * This function requires the caller to hold the reader
2130 * or writer version of the duplicate request cache lock (dc_lock).
2131 */
2132 static void
__svc_dupcache_debug(struct dupcache * dc)2133 __svc_dupcache_debug(struct dupcache *dc)
2134 {
2135 struct dupreq *dr = NULL;
2136 int i;
2137 bool_t bval;
2138
2139 fprintf(stderr, " HASHTABLE\n");
2140 for (i = 0; i < dc->dc_buckets; i++) {
2141 bval = FALSE;
2142 dr = dc->dc_hashtbl[i];
2143 while (dr != NULL) {
2144 if (!bval) { /* ensures bucket printed only once */
2145 fprintf(stderr, " bucket : %d\n", i);
2146 bval = TRUE;
2147 }
2148 fprintf(stderr, "\txid: %u status: %d time: %ld",
2149 dr->dr_xid, dr->dr_status, dr->dr_time);
2150 fprintf(stderr, " dr: %x chain: %x prevchain: %x\n",
2151 dr, dr->dr_chain, dr->dr_prevchain);
2152 dr = dr->dr_chain;
2153 }
2154 }
2155
2156 fprintf(stderr, " LRU\n");
2157 if (dc->dc_mru) {
2158 dr = dc->dc_mru->dr_next; /* lru */
2159 while (dr != dc->dc_mru) {
2160 fprintf(stderr, "\txid: %u status : %d time : %ld",
2161 dr->dr_xid, dr->dr_status, dr->dr_time);
2162 fprintf(stderr, " dr: %x next: %x prev: %x\n",
2163 dr, dr->dr_next, dr->dr_prev);
2164 dr = dr->dr_next;
2165 }
2166 fprintf(stderr, "\txid: %u status: %d time: %ld",
2167 dr->dr_xid, dr->dr_status, dr->dr_time);
2168 fprintf(stderr, " dr: %x next: %x prev: %x\n", dr,
2169 dr->dr_next, dr->dr_prev);
2170 }
2171 }
2172 #endif /* DUP_DEBUG */
2173