1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
28 /* All Rights Reserved */
29 /*
30 * Portions of this source code were derived from Berkeley
31 * 4.3 BSD under license from the Regents of the University of
32 * California.
33 */
34
35 /*
36 * Server side for Connection Oriented RPC.
37 *
38 * Actually implements two flavors of transporter -
39 * a rendezvouser (a listener and connection establisher)
40 * and a record stream.
41 */
42
43 #include "mt.h"
44 #include "rpc_mt.h"
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <rpc/rpc.h>
48 #include <sys/types.h>
49 #include <errno.h>
50 #include <sys/stat.h>
51 #include <sys/mkdev.h>
52 #include <sys/poll.h>
53 #include <syslog.h>
54 #include <rpc/nettype.h>
55 #include <tiuser.h>
56 #include <string.h>
57 #include <stropts.h>
58 #include <stdlib.h>
59 #include <unistd.h>
60 #include <sys/timod.h>
61 #include <limits.h>
62
63 #ifndef MIN
64 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
65 #endif
66
67 #define CLEANUP_SIZE 1024
68
69 extern int nsvc_xdrs;
70 extern int __rpc_connmaxrec;
71 extern int __rpc_irtimeout;
72
73 extern SVCXPRT **svc_xports;
74 extern int __td_setnodelay(int);
75 extern bool_t __xdrrec_getbytes_nonblock(XDR *, enum xprt_stat *);
76 extern bool_t __xdrrec_set_conn_nonblock(XDR *, uint32_t);
77 extern int _t_do_ioctl(int, char *, int, int, int *);
78 extern int __rpc_legal_connmaxrec(int);
79 /* Structure used to initialize SVC_XP_AUTH(xprt).svc_ah_ops. */
80 extern struct svc_auth_ops svc_auth_any_ops;
81 extern void __xprt_unregister_private(const SVCXPRT *, bool_t);
82
83 static struct xp_ops *svc_vc_ops(void);
84 static struct xp_ops *svc_vc_rendezvous_ops(void);
85 static void svc_vc_destroy(SVCXPRT *);
86 static bool_t svc_vc_nonblock(SVCXPRT *, SVCXPRT *);
87 static int read_vc(SVCXPRT *, caddr_t, int);
88 static int write_vc(SVCXPRT *, caddr_t, int);
89 static SVCXPRT *makefd_xprt(int, uint_t, uint_t, t_scalar_t, char *);
90 static bool_t fd_is_dead(int);
91 static void update_nonblock_timestamps(SVCXPRT *);
92
93 struct cf_rendezvous { /* kept in xprt->xp_p1 for rendezvouser */
94 uint_t sendsize;
95 uint_t recvsize;
96 struct t_call *t_call;
97 struct t_bind *t_bind;
98 t_scalar_t cf_tsdu;
99 char *cf_cache;
100 int tcp_flag;
101 int tcp_keepalive;
102 int cf_connmaxrec;
103 };
104
105 struct cf_conn { /* kept in xprt->xp_p1 for actual connection */
106 uint_t sendsize;
107 uint_t recvsize;
108 enum xprt_stat strm_stat;
109 uint32_t x_id;
110 t_scalar_t cf_tsdu;
111 XDR xdrs;
112 char *cf_cache;
113 char verf_body[MAX_AUTH_BYTES];
114 bool_t cf_conn_nonblock;
115 time_t cf_conn_nonblock_timestamp;
116 };
117
118 static int t_rcvall(int, char *, int);
119 static int t_rcvnonblock(SVCXPRT *, caddr_t, int);
120 static void svc_timeout_nonblock_xprt_and_LRU(bool_t);
121
122 extern int __xdrrec_setfirst(XDR *);
123 extern int __xdrrec_resetfirst(XDR *);
124 extern int __is_xdrrec_first(XDR *);
125
126 void __svc_nisplus_enable_timestamps(void);
127 void __svc_timeout_nonblock_xprt(void);
128
129 /*
130 * This is intended as a performance improvement on the old string handling
131 * stuff by read only moving data into the text segment.
132 * Format = <routine> : <error>
133 */
134
135 static const char errstring[] = " %s : %s";
136
137 /* Routine names */
138
139 static const char svc_vc_create_str[] = "svc_vc_create";
140 static const char svc_fd_create_str[] = "svc_fd_create";
141 static const char makefd_xprt_str[] = "svc_vc_create: makefd_xprt ";
142 static const char rendezvous_request_str[] = "rendezvous_request";
143 static const char svc_vc_fderr[] =
144 "fd > FD_SETSIZE; Use rpc_control(RPC_SVC_USE_POLLFD,...);";
145 static const char do_accept_str[] = "do_accept";
146
147 /* error messages */
148
149 static const char no_mem_str[] = "out of memory";
150 static const char no_tinfo_str[] = "could not get transport information";
151 static const char no_fcntl_getfl_str[] = "could not get status flags and modes";
152 static const char no_nonblock_str[] = "could not set transport non-blocking";
153
154 /*
155 * Records a timestamp when data comes in on a descriptor. This is
156 * only used if timestamps are enabled with __svc_nisplus_enable_timestamps().
157 */
158 static long *timestamps;
159 static int ntimestamps; /* keep track how many timestamps */
160 static mutex_t timestamp_lock = DEFAULTMUTEX;
161
162 /*
163 * Used to determine whether the time-out logic should be executed.
164 */
165 static bool_t check_nonblock_timestamps = FALSE;
166
167 void
svc_vc_xprtfree(SVCXPRT * xprt)168 svc_vc_xprtfree(SVCXPRT *xprt)
169 {
170 /* LINTED pointer alignment */
171 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
172 struct cf_rendezvous *r = xprt ?
173 /* LINTED pointer alignment */
174 (struct cf_rendezvous *)xprt->xp_p1 : NULL;
175
176 if (!xprt)
177 return;
178
179 if (xprt->xp_tp)
180 free(xprt->xp_tp);
181 if (xprt->xp_netid)
182 free(xprt->xp_netid);
183 if (xt && (xt->parent == NULL)) {
184 if (xprt->xp_ltaddr.buf)
185 free(xprt->xp_ltaddr.buf);
186 if (xprt->xp_rtaddr.buf)
187 free(xprt->xp_rtaddr.buf);
188 }
189 if (r) {
190 if (r->t_call)
191 (void) t_free((char *)r->t_call, T_CALL);
192 if (r->t_bind)
193 (void) t_free((char *)r->t_bind, T_BIND);
194 free(r);
195 }
196 svc_xprt_free(xprt);
197 }
198
199 /*
200 * Usage:
201 * xprt = svc_vc_create(fd, sendsize, recvsize);
202 * Since connection streams do buffered io similar to stdio, the caller
203 * can specify how big the send and receive buffers are. If recvsize
204 * or sendsize are 0, defaults will be chosen.
205 * fd should be open and bound.
206 */
207 SVCXPRT *
svc_vc_create_private(int fd,uint_t sendsize,uint_t recvsize)208 svc_vc_create_private(int fd, uint_t sendsize, uint_t recvsize)
209 {
210 struct cf_rendezvous *r;
211 SVCXPRT *xprt;
212 struct t_info tinfo;
213
214 if (RPC_FD_NOTIN_FDSET(fd)) {
215 errno = EBADF;
216 t_errno = TBADF;
217 (void) syslog(LOG_ERR, errstring, svc_vc_create_str,
218 svc_vc_fderr);
219 return (NULL);
220 }
221 if ((xprt = svc_xprt_alloc()) == NULL) {
222 (void) syslog(LOG_ERR, errstring,
223 svc_vc_create_str, no_mem_str);
224 return (NULL);
225 }
226 /* LINTED pointer alignment */
227 svc_flags(xprt) |= SVC_RENDEZVOUS;
228
229 r = calloc(1, sizeof (*r));
230 if (r == NULL) {
231 (void) syslog(LOG_ERR, errstring,
232 svc_vc_create_str, no_mem_str);
233 svc_vc_xprtfree(xprt);
234 return (NULL);
235 }
236 if (t_getinfo(fd, &tinfo) == -1) {
237 char errorstr[100];
238
239 __tli_sys_strerror(errorstr, sizeof (errorstr),
240 t_errno, errno);
241 (void) syslog(LOG_ERR, "%s : %s : %s",
242 svc_vc_create_str, no_tinfo_str, errorstr);
243 free(r);
244 svc_vc_xprtfree(xprt);
245 return (NULL);
246 }
247 /*
248 * Find the receive and the send size
249 */
250 r->sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
251 r->recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
252 if ((r->sendsize == 0) || (r->recvsize == 0)) {
253 syslog(LOG_ERR,
254 "svc_vc_create: transport does not support "
255 "data transfer");
256 free(r);
257 svc_vc_xprtfree(xprt);
258 return (NULL);
259 }
260
261 /* LINTED pointer alignment */
262 r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
263 if (r->t_call == NULL) {
264 (void) syslog(LOG_ERR, errstring,
265 svc_vc_create_str, no_mem_str);
266 free(r);
267 svc_vc_xprtfree(xprt);
268 return (NULL);
269 }
270
271 /* LINTED pointer alignment */
272 r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
273 if (r->t_bind == NULL) {
274 (void) syslog(LOG_ERR, errstring,
275 svc_vc_create_str, no_mem_str);
276 (void) t_free((char *)r->t_call, T_CALL);
277 free(r);
278 svc_vc_xprtfree(xprt);
279 return (NULL);
280 }
281
282 r->cf_tsdu = tinfo.tsdu;
283 r->tcp_flag = FALSE;
284 r->tcp_keepalive = FALSE;
285 r->cf_connmaxrec = __rpc_connmaxrec;
286 xprt->xp_fd = fd;
287 xprt->xp_p1 = (caddr_t)r;
288 xprt->xp_p2 = NULL;
289 xprt->xp_verf = _null_auth;
290 xprt->xp_ops = svc_vc_rendezvous_ops();
291 /* LINTED pointer alignment */
292 SVC_XP_AUTH(xprt).svc_ah_ops = svc_auth_any_ops;
293 /* LINTED pointer alignment */
294 SVC_XP_AUTH(xprt).svc_ah_private = NULL;
295
296 return (xprt);
297 }
298
299 SVCXPRT *
svc_vc_create(const int fd,const uint_t sendsize,const uint_t recvsize)300 svc_vc_create(const int fd, const uint_t sendsize, const uint_t recvsize)
301 {
302 SVCXPRT *xprt;
303
304 if ((xprt = svc_vc_create_private(fd, sendsize, recvsize)) != NULL)
305 xprt_register(xprt);
306 return (xprt);
307 }
308
309 SVCXPRT *
svc_vc_xprtcopy(SVCXPRT * parent)310 svc_vc_xprtcopy(SVCXPRT *parent)
311 {
312 SVCXPRT *xprt;
313 struct cf_rendezvous *r, *pr;
314 int fd = parent->xp_fd;
315
316 if ((xprt = svc_xprt_alloc()) == NULL)
317 return (NULL);
318
319 /* LINTED pointer alignment */
320 SVCEXT(xprt)->parent = parent;
321 /* LINTED pointer alignment */
322 SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
323
324 xprt->xp_fd = fd;
325 xprt->xp_ops = svc_vc_rendezvous_ops();
326 if (parent->xp_tp) {
327 xprt->xp_tp = (char *)strdup(parent->xp_tp);
328 if (xprt->xp_tp == NULL) {
329 syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
330 svc_vc_xprtfree(xprt);
331 return (NULL);
332 }
333 }
334 if (parent->xp_netid) {
335 xprt->xp_netid = (char *)strdup(parent->xp_netid);
336 if (xprt->xp_netid == NULL) {
337 syslog(LOG_ERR, "svc_vc_xprtcopy: strdup failed");
338 if (xprt->xp_tp)
339 free(xprt->xp_tp);
340 svc_vc_xprtfree(xprt);
341 return (NULL);
342 }
343 }
344
345 /*
346 * can share both local and remote address
347 */
348 xprt->xp_ltaddr = parent->xp_ltaddr;
349 xprt->xp_rtaddr = parent->xp_rtaddr; /* XXX - not used for rendezvous */
350 xprt->xp_type = parent->xp_type;
351 xprt->xp_verf = parent->xp_verf;
352
353 if ((r = calloc(1, sizeof (*r))) == NULL) {
354 svc_vc_xprtfree(xprt);
355 return (NULL);
356 }
357 xprt->xp_p1 = (caddr_t)r;
358 /* LINTED pointer alignment */
359 pr = (struct cf_rendezvous *)parent->xp_p1;
360 r->sendsize = pr->sendsize;
361 r->recvsize = pr->recvsize;
362 r->cf_tsdu = pr->cf_tsdu;
363 r->cf_cache = pr->cf_cache;
364 r->tcp_flag = pr->tcp_flag;
365 r->tcp_keepalive = pr->tcp_keepalive;
366 r->cf_connmaxrec = pr->cf_connmaxrec;
367 /* LINTED pointer alignment */
368 r->t_call = (struct t_call *)t_alloc(fd, T_CALL, T_ADDR | T_OPT);
369 if (r->t_call == NULL) {
370 svc_vc_xprtfree(xprt);
371 return (NULL);
372 }
373 /* LINTED pointer alignment */
374 r->t_bind = (struct t_bind *)t_alloc(fd, T_BIND, T_ADDR);
375 if (r->t_bind == NULL) {
376 svc_vc_xprtfree(xprt);
377 return (NULL);
378 }
379
380 return (xprt);
381 }
382
383 /*
384 * XXX : Used for setting flag to indicate that this is TCP
385 */
386
387 /*ARGSUSED*/
388 int
__svc_vc_setflag(SVCXPRT * xprt,int flag)389 __svc_vc_setflag(SVCXPRT *xprt, int flag)
390 {
391 struct cf_rendezvous *r;
392
393 /* LINTED pointer alignment */
394 r = (struct cf_rendezvous *)xprt->xp_p1;
395 r->tcp_flag = TRUE;
396 return (1);
397 }
398
399 /*
400 * used for the actual connection.
401 */
402 SVCXPRT *
svc_fd_create_private(int fd,uint_t sendsize,uint_t recvsize)403 svc_fd_create_private(int fd, uint_t sendsize, uint_t recvsize)
404 {
405 struct t_info tinfo;
406 SVCXPRT *dummy;
407 struct netbuf tres = {0};
408
409 if (RPC_FD_NOTIN_FDSET(fd)) {
410 errno = EBADF;
411 t_errno = TBADF;
412 (void) syslog(LOG_ERR, errstring,
413 svc_fd_create_str, svc_vc_fderr);
414 return (NULL);
415 }
416 if (t_getinfo(fd, &tinfo) == -1) {
417 char errorstr[100];
418
419 __tli_sys_strerror(errorstr, sizeof (errorstr),
420 t_errno, errno);
421 (void) syslog(LOG_ERR, "%s : %s : %s",
422 svc_fd_create_str, no_tinfo_str, errorstr);
423 return (NULL);
424 }
425 /*
426 * Find the receive and the send size
427 */
428 sendsize = __rpc_get_t_size((int)sendsize, tinfo.tsdu);
429 recvsize = __rpc_get_t_size((int)recvsize, tinfo.tsdu);
430 if ((sendsize == 0) || (recvsize == 0)) {
431 syslog(LOG_ERR, errstring, svc_fd_create_str,
432 "transport does not support data transfer");
433 return (NULL);
434 }
435 dummy = makefd_xprt(fd, sendsize, recvsize, tinfo.tsdu, NULL);
436 /* NULL signifies no dup cache */
437 /* Assign the local bind address */
438 if (t_getname(fd, &tres, LOCALNAME) == -1)
439 tres.len = 0;
440 dummy->xp_ltaddr = tres;
441 /* Fill in type of service */
442 dummy->xp_type = tinfo.servtype;
443 return (dummy);
444 }
445
446 SVCXPRT *
svc_fd_create(const int fd,const uint_t sendsize,const uint_t recvsize)447 svc_fd_create(const int fd, const uint_t sendsize, const uint_t recvsize)
448 {
449 SVCXPRT *xprt;
450
451 if ((xprt = svc_fd_create_private(fd, sendsize, recvsize)) != NULL)
452 xprt_register(xprt);
453 return (xprt);
454 }
455
456 void
svc_fd_xprtfree(SVCXPRT * xprt)457 svc_fd_xprtfree(SVCXPRT *xprt)
458 {
459 /* LINTED pointer alignment */
460 SVCXPRT_EXT *xt = xprt ? SVCEXT(xprt) : NULL;
461 /* LINTED pointer alignment */
462 struct cf_conn *cd = xprt ? (struct cf_conn *)xprt->xp_p1 : NULL;
463
464 if (!xprt)
465 return;
466
467 if (xprt->xp_tp)
468 free(xprt->xp_tp);
469 if (xprt->xp_netid)
470 free(xprt->xp_netid);
471 if (xt && (xt->parent == NULL)) {
472 if (xprt->xp_ltaddr.buf)
473 free(xprt->xp_ltaddr.buf);
474 if (xprt->xp_rtaddr.buf)
475 free(xprt->xp_rtaddr.buf);
476 }
477 if (cd) {
478 XDR_DESTROY(&(cd->xdrs));
479 free(cd);
480 }
481 if (xt && (xt->parent == NULL) && xprt->xp_p2) {
482 /* LINTED pointer alignment */
483 free(((struct netbuf *)xprt->xp_p2)->buf);
484 free(xprt->xp_p2);
485 }
486 svc_xprt_free(xprt);
487 }
488
489 static SVCXPRT *
makefd_xprt(int fd,uint_t sendsize,uint_t recvsize,t_scalar_t tsdu,char * cache)490 makefd_xprt(int fd, uint_t sendsize, uint_t recvsize, t_scalar_t tsdu,
491 char *cache)
492 {
493 SVCXPRT *xprt;
494 struct cf_conn *cd;
495
496 xprt = svc_xprt_alloc();
497 if (xprt == NULL) {
498 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
499 return (NULL);
500 }
501 /* LINTED pointer alignment */
502 svc_flags(xprt) |= SVC_CONNECTION;
503
504 cd = malloc(sizeof (struct cf_conn));
505 if (cd == NULL) {
506 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
507 svc_fd_xprtfree(xprt);
508 return (NULL);
509 }
510 cd->sendsize = sendsize;
511 cd->recvsize = recvsize;
512 cd->strm_stat = XPRT_IDLE;
513 cd->cf_tsdu = tsdu;
514 cd->cf_cache = cache;
515 cd->cf_conn_nonblock = FALSE;
516 cd->cf_conn_nonblock_timestamp = 0;
517 cd->xdrs.x_ops = NULL;
518 xdrrec_create(&(cd->xdrs), sendsize, 0, (caddr_t)xprt,
519 (int(*)())NULL, (int(*)(void *, char *, int))write_vc);
520 if (cd->xdrs.x_ops == NULL) {
521 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
522 free(cd);
523 svc_fd_xprtfree(xprt);
524 return (NULL);
525 }
526
527 (void) rw_wrlock(&svc_fd_lock);
528 if (svc_xdrs == NULL) {
529 svc_xdrs = calloc(FD_INCREMENT, sizeof (XDR *));
530 if (svc_xdrs == NULL) {
531 (void) syslog(LOG_ERR, errstring, makefd_xprt_str,
532 no_mem_str);
533 XDR_DESTROY(&(cd->xdrs));
534 free(cd);
535 svc_fd_xprtfree(xprt);
536 (void) rw_unlock(&svc_fd_lock);
537 return (NULL);
538 }
539 nsvc_xdrs = FD_INCREMENT;
540 }
541
542 while (fd >= nsvc_xdrs) {
543 XDR **tmp_xdrs = svc_xdrs;
544 tmp_xdrs = realloc(svc_xdrs,
545 sizeof (XDR *) * (nsvc_xdrs + FD_INCREMENT));
546 if (tmp_xdrs == NULL) {
547 (void) syslog(LOG_ERR, errstring, makefd_xprt_str,
548 no_mem_str);
549 XDR_DESTROY(&(cd->xdrs));
550 free(cd);
551 svc_fd_xprtfree(xprt);
552 (void) rw_unlock(&svc_fd_lock);
553 return (NULL);
554 }
555
556 svc_xdrs = tmp_xdrs;
557 /* initial the new array to 0 from the last allocated array */
558 (void) memset(&svc_xdrs[nsvc_xdrs], 0,
559 sizeof (XDR *) * FD_INCREMENT);
560 nsvc_xdrs += FD_INCREMENT;
561 }
562
563 if (svc_xdrs[fd] != NULL) {
564 XDR_DESTROY(svc_xdrs[fd]);
565 } else if ((svc_xdrs[fd] = malloc(sizeof (XDR))) == NULL) {
566 (void) syslog(LOG_ERR, errstring, makefd_xprt_str, no_mem_str);
567 XDR_DESTROY(&(cd->xdrs));
568 free(cd);
569 svc_fd_xprtfree(xprt);
570 (void) rw_unlock(&svc_fd_lock);
571 return (NULL);
572 }
573 (void) memset(svc_xdrs[fd], 0, sizeof (XDR));
574 xdrrec_create(svc_xdrs[fd], 0, recvsize, (caddr_t)xprt,
575 (int(*)(void *, char *, int))read_vc, (int(*)())NULL);
576 if (svc_xdrs[fd]->x_ops == NULL) {
577 free(svc_xdrs[fd]);
578 svc_xdrs[fd] = NULL;
579 XDR_DESTROY(&(cd->xdrs));
580 free(cd);
581 svc_fd_xprtfree(xprt);
582 (void) rw_unlock(&svc_fd_lock);
583 return (NULL);
584 }
585 (void) rw_unlock(&svc_fd_lock);
586
587 xprt->xp_p1 = (caddr_t)cd;
588 xprt->xp_p2 = NULL;
589 xprt->xp_verf.oa_base = cd->verf_body;
590 xprt->xp_ops = svc_vc_ops(); /* truely deals with calls */
591 xprt->xp_fd = fd;
592 return (xprt);
593 }
594
595 SVCXPRT *
svc_fd_xprtcopy(SVCXPRT * parent)596 svc_fd_xprtcopy(SVCXPRT *parent)
597 {
598 SVCXPRT *xprt;
599 struct cf_conn *cd, *pcd;
600
601 if ((xprt = svc_xprt_alloc()) == NULL)
602 return (NULL);
603
604 /* LINTED pointer alignment */
605 SVCEXT(xprt)->parent = parent;
606 /* LINTED pointer alignment */
607 SVCEXT(xprt)->flags = SVCEXT(parent)->flags;
608
609 xprt->xp_fd = parent->xp_fd;
610 xprt->xp_ops = svc_vc_ops();
611 if (parent->xp_tp) {
612 xprt->xp_tp = (char *)strdup(parent->xp_tp);
613 if (xprt->xp_tp == NULL) {
614 syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
615 svc_fd_xprtfree(xprt);
616 return (NULL);
617 }
618 }
619 if (parent->xp_netid) {
620 xprt->xp_netid = (char *)strdup(parent->xp_netid);
621 if (xprt->xp_netid == NULL) {
622 syslog(LOG_ERR, "svc_fd_xprtcopy: strdup failed");
623 if (xprt->xp_tp)
624 free(xprt->xp_tp);
625 svc_fd_xprtfree(xprt);
626 return (NULL);
627 }
628 }
629 /*
630 * share local and remote addresses with parent
631 */
632 xprt->xp_ltaddr = parent->xp_ltaddr;
633 xprt->xp_rtaddr = parent->xp_rtaddr;
634 xprt->xp_type = parent->xp_type;
635
636 if ((cd = malloc(sizeof (struct cf_conn))) == NULL) {
637 svc_fd_xprtfree(xprt);
638 return (NULL);
639 }
640 /* LINTED pointer alignment */
641 pcd = (struct cf_conn *)parent->xp_p1;
642 cd->sendsize = pcd->sendsize;
643 cd->recvsize = pcd->recvsize;
644 cd->strm_stat = pcd->strm_stat;
645 cd->x_id = pcd->x_id;
646 cd->cf_tsdu = pcd->cf_tsdu;
647 cd->cf_cache = pcd->cf_cache;
648 cd->cf_conn_nonblock = pcd->cf_conn_nonblock;
649 cd->cf_conn_nonblock_timestamp = pcd->cf_conn_nonblock_timestamp;
650 cd->xdrs.x_ops = NULL;
651 xdrrec_create(&(cd->xdrs), cd->sendsize, 0, (caddr_t)xprt,
652 (int(*)())NULL, (int(*)(void *, char *, int))write_vc);
653 if (cd->xdrs.x_ops == NULL) {
654 free(cd);
655 svc_fd_xprtfree(xprt);
656 return (NULL);
657 }
658 xprt->xp_verf.oa_base = cd->verf_body;
659 xprt->xp_p1 = (char *)cd;
660 xprt->xp_p2 = parent->xp_p2; /* shared */
661
662 return (xprt);
663 }
664
665 static void do_accept();
666
667 /*
668 * This routine is called by svc_getreqset(), when a packet is recd.
669 * The listener process creates another end point on which the actual
670 * connection is carried. It returns FALSE to indicate that it was
671 * not a rpc packet (falsely though), but as a side effect creates
672 * another endpoint which is also registered, which then always
673 * has a request ready to be served.
674 */
675 /* ARGSUSED1 */
676 static bool_t
rendezvous_request(SVCXPRT * xprt,struct rpc_msg * msg)677 rendezvous_request(SVCXPRT *xprt, struct rpc_msg *msg)
678 {
679 struct cf_rendezvous *r;
680 char *tpname = NULL;
681 char devbuf[256];
682
683 /* LINTED pointer alignment */
684 r = (struct cf_rendezvous *)xprt->xp_p1;
685
686 again:
687 switch (t_look(xprt->xp_fd)) {
688 case T_DISCONNECT:
689 (void) t_rcvdis(xprt->xp_fd, NULL);
690 return (FALSE);
691
692 case T_LISTEN:
693
694 if (t_listen(xprt->xp_fd, r->t_call) == -1) {
695 if ((t_errno == TSYSERR) && (errno == EINTR))
696 goto again;
697
698 if (t_errno == TLOOK) {
699 if (t_look(xprt->xp_fd) == T_DISCONNECT)
700 (void) t_rcvdis(xprt->xp_fd, NULL);
701 }
702 return (FALSE);
703 }
704 break;
705 default:
706 return (FALSE);
707 }
708 /*
709 * Now create another endpoint, and accept the connection
710 * on it.
711 */
712
713 if (xprt->xp_tp) {
714 tpname = xprt->xp_tp;
715 } else {
716 /*
717 * If xprt->xp_tp is NULL, then try to extract the
718 * transport protocol information from the transport
719 * protcol corresponding to xprt->xp_fd
720 */
721 struct netconfig *nconf;
722 tpname = devbuf;
723 if ((nconf = __rpcfd_to_nconf(xprt->xp_fd, xprt->xp_type))
724 == NULL) {
725 (void) syslog(LOG_ERR, errstring,
726 rendezvous_request_str,
727 "no suitable transport");
728 goto err;
729 }
730 (void) strcpy(tpname, nconf->nc_device);
731 freenetconfigent(nconf);
732 }
733
734 do_accept(xprt->xp_fd, tpname, xprt->xp_netid, r->t_call, r);
735
736 err:
737 return (FALSE); /* there is never an rpc msg to be processed */
738 }
739
740 static void
do_accept(int srcfd,char * tpname,char * netid,struct t_call * tcp,struct cf_rendezvous * r)741 do_accept(int srcfd, char *tpname, char *netid, struct t_call *tcp,
742 struct cf_rendezvous *r)
743 {
744 int destfd;
745 struct t_call t_call;
746 struct t_call *tcp2 = NULL;
747 struct t_info tinfo;
748 SVCXPRT *xprt = NULL;
749 SVCXPRT *xprt_srcfd = NULL;
750 char *option, *option_ret;
751 struct opthdr *opt;
752 struct t_optmgmt optreq, optret;
753 int *p_optval;
754
755 destfd = t_open(tpname, O_RDWR, &tinfo);
756 if (check_nonblock_timestamps) {
757 if (destfd == -1 && t_errno == TSYSERR && errno == EMFILE) {
758 /*
759 * Since there are nonblocking connection xprts and
760 * too many open files, the LRU connection xprt should
761 * get destroyed in case an attacker has been creating
762 * many connections.
763 */
764 (void) mutex_lock(&svc_mutex);
765 svc_timeout_nonblock_xprt_and_LRU(TRUE);
766 (void) mutex_unlock(&svc_mutex);
767 destfd = t_open(tpname, O_RDWR, &tinfo);
768 } else {
769 /*
770 * Destroy/timeout all nonblock connection xprts
771 * that have not had recent activity.
772 * Do not destroy LRU xprt unless there are
773 * too many open files.
774 */
775 (void) mutex_lock(&svc_mutex);
776 svc_timeout_nonblock_xprt_and_LRU(FALSE);
777 (void) mutex_unlock(&svc_mutex);
778 }
779 }
780 if (destfd == -1) {
781 char errorstr[100];
782
783 __tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
784 errno);
785 (void) syslog(LOG_ERR, "%s : %s : %s", do_accept_str,
786 "can't open connection", errorstr);
787 (void) t_snddis(srcfd, tcp);
788 return;
789 }
790 if (RPC_FD_NOTIN_FDSET(destfd)) {
791 (void) syslog(LOG_ERR, errstring, do_accept_str,
792 svc_vc_fderr);
793 (void) t_close(destfd);
794 (void) t_snddis(srcfd, tcp);
795 errno = EBADF;
796 t_errno = TBADF;
797 return;
798 }
799 (void) fcntl(destfd, F_SETFD, 1); /* make it "close on exec" */
800 if ((tinfo.servtype != T_COTS) && (tinfo.servtype != T_COTS_ORD)) {
801 /* Not a connection oriented mode */
802 (void) syslog(LOG_ERR, errstring, do_accept_str,
803 "do_accept: illegal transport");
804 (void) t_close(destfd);
805 (void) t_snddis(srcfd, tcp);
806 return;
807 }
808
809
810 if (t_bind(destfd, NULL, r->t_bind) == -1) {
811 char errorstr[100];
812
813 __tli_sys_strerror(errorstr, sizeof (errorstr), t_errno,
814 errno);
815 (void) syslog(LOG_ERR, " %s : %s : %s", do_accept_str,
816 "t_bind failed", errorstr);
817 (void) t_close(destfd);
818 (void) t_snddis(srcfd, tcp);
819 return;
820 }
821
822 if (r->tcp_flag) /* if TCP, set NODELAY flag */
823 (void) __td_setnodelay(destfd);
824
825 /*
826 * This connection is not listening, hence no need to set
827 * the qlen.
828 */
829
830 /*
831 * XXX: The local transport chokes on its own listen
832 * options so we zero them for now
833 */
834 t_call = *tcp;
835 t_call.opt.len = 0;
836 t_call.opt.maxlen = 0;
837 t_call.opt.buf = NULL;
838
839 while (t_accept(srcfd, destfd, &t_call) == -1) {
840 char errorstr[100];
841
842 switch (t_errno) {
843 case TLOOK:
844 again:
845 switch (t_look(srcfd)) {
846 case T_CONNECT:
847 case T_DATA:
848 case T_EXDATA:
849 /* this should not happen */
850 break;
851
852 case T_DISCONNECT:
853 (void) t_rcvdis(srcfd, NULL);
854 break;
855
856 case T_LISTEN:
857 if (tcp2 == NULL)
858 /* LINTED pointer alignment */
859 tcp2 = (struct t_call *)t_alloc(srcfd,
860 T_CALL, T_ADDR | T_OPT);
861 if (tcp2 == NULL) {
862
863 (void) t_close(destfd);
864 (void) t_snddis(srcfd, tcp);
865 syslog(LOG_ERR, errstring,
866 do_accept_str, no_mem_str);
867 return;
868 /* NOTREACHED */
869 }
870 if (t_listen(srcfd, tcp2) == -1) {
871 switch (t_errno) {
872 case TSYSERR:
873 if (errno == EINTR)
874 goto again;
875 break;
876
877 case TLOOK:
878 goto again;
879 }
880 (void) t_free((char *)tcp2, T_CALL);
881 (void) t_close(destfd);
882 (void) t_snddis(srcfd, tcp);
883 return;
884 /* NOTREACHED */
885 }
886 do_accept(srcfd, tpname, netid, tcp2, r);
887 break;
888
889 case T_ORDREL:
890 (void) t_rcvrel(srcfd);
891 (void) t_sndrel(srcfd);
892 break;
893 }
894 if (tcp2) {
895 (void) t_free((char *)tcp2, T_CALL);
896 tcp2 = NULL;
897 }
898 break;
899
900 case TBADSEQ:
901 /*
902 * This can happen if the remote side has
903 * disconnected before the connection is
904 * accepted. In this case, a disconnect
905 * should not be sent on srcfd (important!
906 * the listening fd will be hosed otherwise!).
907 * This error is not logged since this is an
908 * operational situation that is recoverable.
909 */
910 (void) t_close(destfd);
911 return;
912 /* NOTREACHED */
913
914 case TOUTSTATE:
915 /*
916 * This can happen if the t_rcvdis() or t_rcvrel()/
917 * t_sndrel() put srcfd into the T_IDLE state.
918 */
919 if (t_getstate(srcfd) == T_IDLE) {
920 (void) t_close(destfd);
921 (void) t_snddis(srcfd, tcp);
922 return;
923 }
924 /* else FALL THROUGH TO */
925
926 default:
927 __tli_sys_strerror(errorstr, sizeof (errorstr),
928 t_errno, errno);
929 (void) syslog(LOG_ERR,
930 "cannot accept connection: %s (current state %d)",
931 errorstr, t_getstate(srcfd));
932 (void) t_close(destfd);
933 (void) t_snddis(srcfd, tcp);
934 return;
935 /* NOTREACHED */
936 }
937 }
938
939 if (r->tcp_flag && r->tcp_keepalive) {
940 option = malloc(sizeof (struct opthdr) + sizeof (int));
941 option_ret = malloc(sizeof (struct opthdr) + sizeof (int));
942 if (option && option_ret) {
943 /* LINTED pointer cast */
944 opt = (struct opthdr *)option;
945 opt->level = SOL_SOCKET;
946 opt->name = SO_KEEPALIVE;
947 opt->len = sizeof (int);
948 p_optval = (int *)(opt + 1);
949 *p_optval = SO_KEEPALIVE;
950 optreq.opt.maxlen = optreq.opt.len =
951 sizeof (struct opthdr) + sizeof (int);
952 optreq.opt.buf = (char *)option;
953 optreq.flags = T_NEGOTIATE;
954 optret.opt.maxlen = sizeof (struct opthdr)
955 + sizeof (int);
956 optret.opt.buf = (char *)option_ret;
957 (void) t_optmgmt(destfd, &optreq, &optret);
958 free(option);
959 free(option_ret);
960 } else {
961 if (option)
962 free(option);
963 if (option_ret)
964 free(option_ret);
965 }
966 }
967
968
969 /*
970 * make a new transporter
971 */
972 xprt = makefd_xprt(destfd, r->sendsize, r->recvsize, r->cf_tsdu,
973 r->cf_cache);
974 if (xprt == NULL) {
975 /*
976 * makefd_xprt() returns a NULL xprt only when
977 * it's out of memory.
978 */
979 goto memerr;
980 }
981
982 /*
983 * Copy the new local and remote bind information
984 */
985
986 xprt->xp_rtaddr.len = tcp->addr.len;
987 xprt->xp_rtaddr.maxlen = tcp->addr.len;
988 if ((xprt->xp_rtaddr.buf = malloc(tcp->addr.len)) == NULL)
989 goto memerr;
990 (void) memcpy(xprt->xp_rtaddr.buf, tcp->addr.buf, tcp->addr.len);
991
992 if (strcmp(netid, "tcp") == 0) {
993 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in);
994 if ((xprt->xp_ltaddr.buf =
995 malloc(xprt->xp_ltaddr.maxlen)) == NULL)
996 goto memerr;
997 if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
998 (void) syslog(LOG_ERR,
999 "do_accept: t_getname for tcp failed!");
1000 goto xprt_err;
1001 }
1002 } else if (strcmp(netid, "tcp6") == 0) {
1003 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_in6);
1004 if ((xprt->xp_ltaddr.buf =
1005 malloc(xprt->xp_ltaddr.maxlen)) == NULL)
1006 goto memerr;
1007 if (t_getname(destfd, &xprt->xp_ltaddr, LOCALNAME) < 0) {
1008 (void) syslog(LOG_ERR,
1009 "do_accept: t_getname for tcp6 failed!");
1010 goto xprt_err;
1011 }
1012 }
1013
1014 xprt->xp_tp = strdup(tpname);
1015 xprt->xp_netid = strdup(netid);
1016 if ((xprt->xp_tp == NULL) ||
1017 (xprt->xp_netid == NULL)) {
1018 goto memerr;
1019 }
1020 if (tcp->opt.len > 0) {
1021 struct netbuf *netptr;
1022
1023 xprt->xp_p2 = malloc(sizeof (struct netbuf));
1024
1025 if (xprt->xp_p2 != NULL) {
1026 /* LINTED pointer alignment */
1027 netptr = (struct netbuf *)xprt->xp_p2;
1028
1029 netptr->len = tcp->opt.len;
1030 netptr->maxlen = tcp->opt.len;
1031 if ((netptr->buf = malloc(tcp->opt.len)) == NULL)
1032 goto memerr;
1033 (void) memcpy(netptr->buf, tcp->opt.buf, tcp->opt.len);
1034 } else
1035 goto memerr;
1036 }
1037 /* (void) ioctl(destfd, I_POP, NULL); */
1038
1039 /*
1040 * If a nonblocked connection fd has been requested,
1041 * perform the necessary operations.
1042 */
1043 xprt_srcfd = svc_xports[srcfd];
1044 /* LINTED pointer cast */
1045 if (((struct cf_rendezvous *)(xprt_srcfd->xp_p1))->cf_connmaxrec) {
1046 if (!svc_vc_nonblock(xprt_srcfd, xprt))
1047 goto xprt_err;
1048 }
1049
1050 /*
1051 * Copy the call back declared for the service to the current
1052 * connection
1053 */
1054 xprt->xp_closeclnt = xprt_srcfd->xp_closeclnt;
1055 xprt_register(xprt);
1056
1057 return;
1058
1059 memerr:
1060 (void) syslog(LOG_ERR, errstring, do_accept_str, no_mem_str);
1061 xprt_err:
1062 if (xprt)
1063 svc_vc_destroy(xprt);
1064 (void) t_close(destfd);
1065 }
1066
1067 /*
1068 * This routine performs the necessary fcntl() operations to create
1069 * a nonblocked connection fd.
1070 * It also adjusts the sizes and allocates the buffer
1071 * for the nonblocked operations, and updates the associated
1072 * timestamp field in struct cf_conn for timeout bookkeeping.
1073 */
1074 static bool_t
svc_vc_nonblock(SVCXPRT * xprt_rendezvous,SVCXPRT * xprt_conn)1075 svc_vc_nonblock(SVCXPRT *xprt_rendezvous, SVCXPRT *xprt_conn)
1076 {
1077 int nn;
1078 int fdconn = xprt_conn->xp_fd;
1079 struct cf_rendezvous *r =
1080 /* LINTED pointer cast */
1081 (struct cf_rendezvous *)xprt_rendezvous->xp_p1;
1082 /* LINTED pointer cast */
1083 struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1084 uint32_t maxrecsz;
1085
1086 if ((nn = fcntl(fdconn, F_GETFL, 0)) < 0) {
1087 (void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1088 no_fcntl_getfl_str);
1089 return (FALSE);
1090 }
1091
1092 if (fcntl(fdconn, F_SETFL, nn|O_NONBLOCK) != 0) {
1093 (void) syslog(LOG_ERR, "%s : %s : %m", do_accept_str,
1094 no_nonblock_str);
1095 return (FALSE);
1096 }
1097
1098 cd->cf_conn_nonblock = TRUE;
1099 /*
1100 * If the max fragment size has not been set via
1101 * rpc_control(), use the default.
1102 */
1103 if ((maxrecsz = r->cf_connmaxrec) == 0)
1104 maxrecsz = r->recvsize;
1105 /* Set XDR stream to use non-blocking semantics. */
1106 if (__xdrrec_set_conn_nonblock(svc_xdrs[fdconn], maxrecsz)) {
1107 check_nonblock_timestamps = TRUE;
1108 update_nonblock_timestamps(xprt_conn);
1109 return (TRUE);
1110 }
1111 return (FALSE);
1112 }
1113
1114 /* ARGSUSED */
1115 static enum xprt_stat
rendezvous_stat(SVCXPRT * xprt)1116 rendezvous_stat(SVCXPRT *xprt)
1117 {
1118 return (XPRT_IDLE);
1119 }
1120
1121 static void
svc_vc_destroy(SVCXPRT * xprt)1122 svc_vc_destroy(SVCXPRT *xprt)
1123 {
1124 (void) mutex_lock(&svc_mutex);
1125 _svc_vc_destroy_private(xprt, TRUE);
1126 (void) svc_timeout_nonblock_xprt_and_LRU(FALSE);
1127 (void) mutex_unlock(&svc_mutex);
1128 }
1129
1130 void
_svc_vc_destroy_private(SVCXPRT * xprt,bool_t lock_not_held)1131 _svc_vc_destroy_private(SVCXPRT *xprt, bool_t lock_not_held)
1132 {
1133 if (svc_mt_mode != RPC_SVC_MT_NONE) {
1134 /* LINTED pointer alignment */
1135 if (SVCEXT(xprt)->parent)
1136 /* LINTED pointer alignment */
1137 xprt = SVCEXT(xprt)->parent;
1138 /* LINTED pointer alignment */
1139 svc_flags(xprt) |= SVC_DEFUNCT;
1140 /* LINTED pointer alignment */
1141 if (SVCEXT(xprt)->refcnt > 0)
1142 return;
1143 }
1144
1145 if (xprt->xp_closeclnt != NULL) {
1146 svc_errorhandler_t cb = xprt->xp_closeclnt;
1147
1148 /*
1149 * Reset the pointer here to avoid reentrance on the same
1150 * SVCXPRT handle.
1151 */
1152 xprt->xp_closeclnt = NULL;
1153 cb(xprt, (xprt->xp_rtaddr.len != 0));
1154 }
1155
1156 __xprt_unregister_private(xprt, lock_not_held);
1157 (void) t_close(xprt->xp_fd);
1158
1159 (void) mutex_lock(×tamp_lock);
1160 if (timestamps && xprt->xp_fd < ntimestamps) {
1161 timestamps[xprt->xp_fd] = 0;
1162 }
1163 (void) mutex_unlock(×tamp_lock);
1164
1165 if (svc_mt_mode != RPC_SVC_MT_NONE) {
1166 svc_xprt_destroy(xprt);
1167 } else {
1168 /* LINTED pointer alignment */
1169 if (svc_type(xprt) == SVC_RENDEZVOUS)
1170 svc_vc_xprtfree(xprt);
1171 else
1172 svc_fd_xprtfree(xprt);
1173 }
1174 }
1175
1176 /*ARGSUSED*/
1177 static bool_t
svc_vc_control(SVCXPRT * xprt,const uint_t rq,void * in)1178 svc_vc_control(SVCXPRT *xprt, const uint_t rq, void *in)
1179 {
1180 switch (rq) {
1181 case SVCSET_RECVERRHANDLER:
1182 xprt->xp_closeclnt = (svc_errorhandler_t)in;
1183 return (TRUE);
1184 case SVCGET_RECVERRHANDLER:
1185 *(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1186 return (TRUE);
1187 case SVCGET_XID:
1188 if (xprt->xp_p1 == NULL)
1189 return (FALSE);
1190 /* LINTED pointer alignment */
1191 *(uint32_t *)in = ((struct cf_conn *)(xprt->xp_p1))->x_id;
1192 return (TRUE);
1193 default:
1194 return (FALSE);
1195 }
1196 }
1197
1198 static bool_t
rendezvous_control(SVCXPRT * xprt,const uint_t rq,void * in)1199 rendezvous_control(SVCXPRT *xprt, const uint_t rq, void *in)
1200 {
1201 struct cf_rendezvous *r;
1202 int tmp;
1203
1204 switch (rq) {
1205 case SVCSET_RECVERRHANDLER:
1206 xprt->xp_closeclnt = (svc_errorhandler_t)in;
1207 return (TRUE);
1208 case SVCGET_RECVERRHANDLER:
1209 *(svc_errorhandler_t *)in = xprt->xp_closeclnt;
1210 return (TRUE);
1211 case SVCSET_KEEPALIVE:
1212 /* LINTED pointer cast */
1213 r = (struct cf_rendezvous *)xprt->xp_p1;
1214 if (r->tcp_flag) {
1215 r->tcp_keepalive = (int)(intptr_t)in;
1216 return (TRUE);
1217 }
1218 return (FALSE);
1219 case SVCSET_CONNMAXREC:
1220 /*
1221 * Override the default maximum record size, set via
1222 * rpc_control(), for this connection. Only appropriate
1223 * for connection oriented transports, but is ignored for
1224 * the connectionless case, so no need to check the
1225 * connection type here.
1226 */
1227 /* LINTED pointer cast */
1228 r = (struct cf_rendezvous *)xprt->xp_p1;
1229 tmp = __rpc_legal_connmaxrec(*(int *)in);
1230 if (r != 0 && tmp >= 0) {
1231 r->cf_connmaxrec = tmp;
1232 return (TRUE);
1233 }
1234 return (FALSE);
1235 case SVCGET_CONNMAXREC:
1236 /* LINTED pointer cast */
1237 r = (struct cf_rendezvous *)xprt->xp_p1;
1238 if (r != 0) {
1239 *(int *)in = r->cf_connmaxrec;
1240 return (TRUE);
1241 }
1242 return (FALSE);
1243 case SVCGET_XID: /* fall through for now */
1244 default:
1245 return (FALSE);
1246 }
1247 }
1248
1249 /*
1250 * All read operations timeout after 35 seconds.
1251 * A timeout is fatal for the connection.
1252 * update_timestamps() is used by nisplus operations,
1253 * update_nonblock_timestamps() is used for nonblocked
1254 * connection fds.
1255 */
1256 #define WAIT_PER_TRY 35000 /* milliseconds */
1257
1258 static void
update_timestamps(int fd)1259 update_timestamps(int fd)
1260 {
1261 (void) mutex_lock(×tamp_lock);
1262 if (timestamps) {
1263 struct timeval tv;
1264
1265 (void) gettimeofday(&tv, NULL);
1266 while (fd >= ntimestamps) {
1267 long *tmp_timestamps = timestamps;
1268
1269 /* allocate more timestamps */
1270 tmp_timestamps = realloc(timestamps,
1271 sizeof (long) *
1272 (ntimestamps + FD_INCREMENT));
1273 if (tmp_timestamps == NULL) {
1274 (void) mutex_unlock(×tamp_lock);
1275 syslog(LOG_ERR,
1276 "update_timestamps: out of memory");
1277 return;
1278 }
1279
1280 timestamps = tmp_timestamps;
1281 (void) memset(×tamps[ntimestamps], 0,
1282 sizeof (long) * FD_INCREMENT);
1283 ntimestamps += FD_INCREMENT;
1284 }
1285 timestamps[fd] = tv.tv_sec;
1286 }
1287 (void) mutex_unlock(×tamp_lock);
1288 }
1289
1290 static void
update_nonblock_timestamps(SVCXPRT * xprt_conn)1291 update_nonblock_timestamps(SVCXPRT *xprt_conn)
1292 {
1293 struct timeval tv;
1294 /* LINTED pointer cast */
1295 struct cf_conn *cd = (struct cf_conn *)xprt_conn->xp_p1;
1296
1297 (void) gettimeofday(&tv, NULL);
1298 cd->cf_conn_nonblock_timestamp = tv.tv_sec;
1299 }
1300
1301 /*
1302 * reads data from the vc conection.
1303 * any error is fatal and the connection is closed.
1304 * (And a read of zero bytes is a half closed stream => error.)
1305 */
1306 static int
read_vc(SVCXPRT * xprt,caddr_t buf,int len)1307 read_vc(SVCXPRT *xprt, caddr_t buf, int len)
1308 {
1309 int fd = xprt->xp_fd;
1310 XDR *xdrs = svc_xdrs[fd];
1311 struct pollfd pfd;
1312 int ret;
1313
1314 /*
1315 * Make sure the connection is not already dead.
1316 */
1317 /* LINTED pointer alignment */
1318 if (svc_failed(xprt))
1319 return (-1);
1320
1321 /* LINTED pointer cast */
1322 if (((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock) {
1323 /*
1324 * For nonblocked reads, only update the
1325 * timestamps to record the activity so the
1326 * connection will not be timedout.
1327 * Up to "len" bytes are requested.
1328 * If fewer than "len" bytes are received, the
1329 * connection is poll()ed again.
1330 * The poll() for the connection fd is performed
1331 * in the main poll() so that all outstanding fds
1332 * are polled rather than just the vc connection.
1333 * Polling on only the vc connection until the entire
1334 * fragment has been read can be exploited in
1335 * a Denial of Service Attack such as telnet <host> 111.
1336 */
1337 if ((len = t_rcvnonblock(xprt, buf, len)) >= 0) {
1338 if (len > 0) {
1339 update_timestamps(fd);
1340 update_nonblock_timestamps(xprt);
1341 }
1342 return (len);
1343 }
1344 goto fatal_err;
1345 }
1346
1347 if (!__is_xdrrec_first(xdrs)) {
1348
1349 pfd.fd = fd;
1350 pfd.events = MASKVAL;
1351
1352 do {
1353 if ((ret = poll(&pfd, 1, WAIT_PER_TRY)) <= 0) {
1354 /*
1355 * If errno is EINTR, ERESTART, or EAGAIN
1356 * ignore error and repeat poll
1357 */
1358 if (ret < 0 && (errno == EINTR ||
1359 errno == ERESTART || errno == EAGAIN))
1360 continue;
1361 goto fatal_err;
1362 }
1363 } while (pfd.revents == 0);
1364 if (pfd.revents & POLLNVAL)
1365 goto fatal_err;
1366 }
1367 (void) __xdrrec_resetfirst(xdrs);
1368 if ((len = t_rcvall(fd, buf, len)) > 0) {
1369 update_timestamps(fd);
1370 return (len);
1371 }
1372
1373 fatal_err:
1374 /* LINTED pointer alignment */
1375 ((struct cf_conn *)(xprt->xp_p1))->strm_stat = XPRT_DIED;
1376 /* LINTED pointer alignment */
1377 svc_flags(xprt) |= SVC_FAILED;
1378 return (-1);
1379 }
1380
1381 /*
1382 * Requests up to "len" bytes of data.
1383 * Returns number of bytes actually received, or error indication.
1384 */
1385 static int
t_rcvnonblock(SVCXPRT * xprt,caddr_t buf,int len)1386 t_rcvnonblock(SVCXPRT *xprt, caddr_t buf, int len)
1387 {
1388 int fd = xprt->xp_fd;
1389 int flag;
1390 int res;
1391
1392 res = t_rcv(fd, buf, (unsigned)len, &flag);
1393 if (res == -1) {
1394 switch (t_errno) {
1395 case TLOOK:
1396 switch (t_look(fd)) {
1397 case T_DISCONNECT:
1398 (void) t_rcvdis(fd, NULL);
1399 break;
1400 case T_ORDREL:
1401 (void) t_rcvrel(fd);
1402 (void) t_sndrel(fd);
1403 break;
1404 default:
1405 break;
1406 }
1407 break;
1408 case TNODATA:
1409 /*
1410 * Either poll() lied, or the xprt/fd was closed and
1411 * re-opened under our feet. Return 0, so that we go
1412 * back to waiting for data.
1413 */
1414 res = 0;
1415 break;
1416 /* Should handle TBUFOVFLW TSYSERR ? */
1417 default:
1418 break;
1419 }
1420 }
1421 return (res);
1422 }
1423
1424 /*
1425 * Timeout out nonblocked connection fds
1426 * If there has been no activity on the fd for __rpc_irtimeout
1427 * seconds, timeout the fd by destroying its xprt.
1428 * If the caller gets an EMFILE error, the caller may also request
1429 * that the least busy xprt gets destroyed as well.
1430 * svc_thr_mutex is held when this is called.
1431 * svc_mutex is held when this is called.
1432 */
1433 static void
svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru)1434 svc_timeout_nonblock_xprt_and_LRU(bool_t destroy_lru)
1435 {
1436 SVCXPRT *xprt;
1437 SVCXPRT *dead_xprt[CLEANUP_SIZE];
1438 SVCXPRT *candidate_xprt = NULL;
1439 struct cf_conn *cd;
1440 int i, fd_idx = 0, dead_idx = 0;
1441 struct timeval now;
1442 time_t lasttime, maxctime = 0;
1443 extern rwlock_t svc_fd_lock;
1444
1445 if (!check_nonblock_timestamps)
1446 return;
1447
1448 (void) gettimeofday(&now, NULL);
1449 if (svc_xports == NULL)
1450 return;
1451 /*
1452 * Hold svc_fd_lock to protect
1453 * svc_xports, svc_maxpollfd, svc_max_pollfd
1454 */
1455 (void) rw_wrlock(&svc_fd_lock);
1456 for (;;) {
1457 /*
1458 * Timeout upto CLEANUP_SIZE connection fds per
1459 * iteration for the while(1) loop
1460 */
1461 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1462 if ((xprt = svc_xports[fd_idx]) == NULL) {
1463 continue;
1464 }
1465 /* Only look at connection fds */
1466 /* LINTED pointer cast */
1467 if (svc_type(xprt) != SVC_CONNECTION) {
1468 continue;
1469 }
1470 /* LINTED pointer cast */
1471 cd = (struct cf_conn *)xprt->xp_p1;
1472 if (!cd->cf_conn_nonblock)
1473 continue;
1474 lasttime = now.tv_sec - cd->cf_conn_nonblock_timestamp;
1475 if (lasttime >= __rpc_irtimeout &&
1476 __rpc_irtimeout != 0) {
1477 /* Enter in timedout/dead array */
1478 dead_xprt[dead_idx++] = xprt;
1479 if (dead_idx >= CLEANUP_SIZE)
1480 break;
1481 } else
1482 if (lasttime > maxctime) {
1483 /* Possible LRU xprt */
1484 candidate_xprt = xprt;
1485 maxctime = lasttime;
1486 }
1487 }
1488
1489 for (i = 0; i < dead_idx; i++) {
1490 /* Still holding svc_fd_lock */
1491 _svc_vc_destroy_private(dead_xprt[i], FALSE);
1492 }
1493
1494 /*
1495 * If all the nonblocked fds have been checked, we're done.
1496 */
1497 if (fd_idx++ >= svc_max_pollfd)
1498 break;
1499 }
1500 if ((destroy_lru) && (candidate_xprt != NULL)) {
1501 _svc_vc_destroy_private(candidate_xprt, FALSE);
1502 }
1503 (void) rw_unlock(&svc_fd_lock);
1504 }
1505 /*
1506 * Receive the required bytes of data, even if it is fragmented.
1507 */
1508 static int
t_rcvall(int fd,char * buf,int len)1509 t_rcvall(int fd, char *buf, int len)
1510 {
1511 int flag;
1512 int final = 0;
1513 int res;
1514
1515 do {
1516 res = t_rcv(fd, buf, (unsigned)len, &flag);
1517 if (res == -1) {
1518 if (t_errno == TLOOK) {
1519 switch (t_look(fd)) {
1520 case T_DISCONNECT:
1521 (void) t_rcvdis(fd, NULL);
1522 break;
1523 case T_ORDREL:
1524 (void) t_rcvrel(fd);
1525 (void) t_sndrel(fd);
1526 break;
1527 default:
1528 break;
1529 }
1530 }
1531 break;
1532 }
1533 final += res;
1534 buf += res;
1535 len -= res;
1536 } while (len && (flag & T_MORE));
1537 return (res == -1 ? -1 : final);
1538 }
1539
1540 /*
1541 * writes data to the vc connection.
1542 * Any error is fatal and the connection is closed.
1543 */
1544 static int
write_vc(SVCXPRT * xprt,caddr_t buf,int len)1545 write_vc(SVCXPRT *xprt, caddr_t buf, int len)
1546 {
1547 int i, cnt;
1548 int flag;
1549 int maxsz;
1550 int nonblock;
1551 struct pollfd pfd;
1552
1553 /* LINTED pointer alignment */
1554 maxsz = ((struct cf_conn *)(xprt->xp_p1))->cf_tsdu;
1555 /* LINTED pointer cast */
1556 nonblock = ((struct cf_conn *)(xprt->xp_p1))->cf_conn_nonblock;
1557 if (nonblock && maxsz <= 0)
1558 maxsz = len;
1559 if ((maxsz == 0) || (maxsz == -1)) {
1560 if ((len = t_snd(xprt->xp_fd, buf, (unsigned)len,
1561 (int)0)) == -1) {
1562 if (t_errno == TLOOK) {
1563 switch (t_look(xprt->xp_fd)) {
1564 case T_DISCONNECT:
1565 (void) t_rcvdis(xprt->xp_fd, NULL);
1566 break;
1567 case T_ORDREL:
1568 (void) t_rcvrel(xprt->xp_fd);
1569 (void) t_sndrel(xprt->xp_fd);
1570 break;
1571 default:
1572 break;
1573 }
1574 }
1575 /* LINTED pointer alignment */
1576 ((struct cf_conn *)(xprt->xp_p1))->strm_stat
1577 = XPRT_DIED;
1578 /* LINTED pointer alignment */
1579 svc_flags(xprt) |= SVC_FAILED;
1580 }
1581 return (len);
1582 }
1583
1584 /*
1585 * Setup for polling. We want to be able to write normal
1586 * data to the transport
1587 */
1588 pfd.fd = xprt->xp_fd;
1589 pfd.events = POLLWRNORM;
1590
1591 /*
1592 * This for those transports which have a max size for data,
1593 * and for the non-blocking case, where t_snd() may send less
1594 * than requested.
1595 */
1596 for (cnt = len, i = 0; cnt > 0; cnt -= i, buf += i) {
1597 flag = cnt > maxsz ? T_MORE : 0;
1598 if ((i = t_snd(xprt->xp_fd, buf,
1599 (unsigned)MIN(cnt, maxsz), flag)) == -1) {
1600 if (t_errno == TLOOK) {
1601 switch (t_look(xprt->xp_fd)) {
1602 case T_DISCONNECT:
1603 (void) t_rcvdis(xprt->xp_fd, NULL);
1604 break;
1605 case T_ORDREL:
1606 (void) t_rcvrel(xprt->xp_fd);
1607 break;
1608 default:
1609 break;
1610 }
1611 } else if (t_errno == TFLOW) {
1612 /* Try again */
1613 i = 0;
1614 /* Wait till we can write to the transport */
1615 do {
1616 if (poll(&pfd, 1, WAIT_PER_TRY) < 0) {
1617 /*
1618 * If errno is ERESTART, or
1619 * EAGAIN ignore error and repeat poll
1620 */
1621 if (errno == ERESTART ||
1622 errno == EAGAIN)
1623 continue;
1624 else
1625 goto fatal_err;
1626 }
1627 } while (pfd.revents == 0);
1628 if (pfd.revents & (POLLNVAL | POLLERR |
1629 POLLHUP))
1630 goto fatal_err;
1631 continue;
1632 }
1633 fatal_err:
1634 /* LINTED pointer alignment */
1635 ((struct cf_conn *)(xprt->xp_p1))->strm_stat
1636 = XPRT_DIED;
1637 /* LINTED pointer alignment */
1638 svc_flags(xprt) |= SVC_FAILED;
1639 return (-1);
1640 }
1641 }
1642 return (len);
1643 }
1644
1645 static enum xprt_stat
svc_vc_stat(SVCXPRT * xprt)1646 svc_vc_stat(SVCXPRT *xprt)
1647 {
1648 /* LINTED pointer alignment */
1649 SVCXPRT *parent = SVCEXT(xprt)->parent ? SVCEXT(xprt)->parent : xprt;
1650
1651 /* LINTED pointer alignment */
1652 if (svc_failed(parent) || svc_failed(xprt))
1653 return (XPRT_DIED);
1654 if (!xdrrec_eof(svc_xdrs[xprt->xp_fd]))
1655 return (XPRT_MOREREQS);
1656 /*
1657 * xdrrec_eof could have noticed that the connection is dead, so
1658 * check status again.
1659 */
1660 /* LINTED pointer alignment */
1661 if (svc_failed(parent) || svc_failed(xprt))
1662 return (XPRT_DIED);
1663 return (XPRT_IDLE);
1664 }
1665
1666
1667
1668 static bool_t
svc_vc_recv(SVCXPRT * xprt,struct rpc_msg * msg)1669 svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg)
1670 {
1671 /* LINTED pointer alignment */
1672 struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1673 XDR *xdrs = svc_xdrs[xprt->xp_fd];
1674
1675 xdrs->x_op = XDR_DECODE;
1676
1677 if (cd->cf_conn_nonblock) {
1678 /* Get the next input */
1679 if (!__xdrrec_getbytes_nonblock(xdrs, &cd->strm_stat)) {
1680 /*
1681 * The entire record has not been received.
1682 * If the xprt has died, pass it along in svc_flags.
1683 * Return FALSE; For nonblocked vc connection,
1684 * xdr_callmsg() is called only after the entire
1685 * record has been received. For blocked vc
1686 * connection, the data is received on the fly as it
1687 * is being processed through the xdr routines.
1688 */
1689 if (cd->strm_stat == XPRT_DIED)
1690 /* LINTED pointer cast */
1691 svc_flags(xprt) |= SVC_FAILED;
1692 return (FALSE);
1693 }
1694 } else {
1695 if (!xdrrec_skiprecord(xdrs))
1696 return (FALSE);
1697 (void) __xdrrec_setfirst(xdrs);
1698 }
1699
1700 if (xdr_callmsg(xdrs, msg)) {
1701 cd->x_id = msg->rm_xid;
1702 return (TRUE);
1703 }
1704
1705 /*
1706 * If a non-blocking connection, drop it when message decode fails.
1707 * We are either under attack, or we're talking to a broken client.
1708 */
1709 if (cd->cf_conn_nonblock) {
1710 /* LINTED pointer cast */
1711 svc_flags(xprt) |= SVC_FAILED;
1712 }
1713
1714 return (FALSE);
1715 }
1716
1717 static bool_t
svc_vc_getargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)1718 svc_vc_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1719 {
1720 bool_t dummy;
1721
1722 /* LINTED pointer alignment */
1723 dummy = SVCAUTH_UNWRAP(&SVC_XP_AUTH(xprt), svc_xdrs[xprt->xp_fd],
1724 xdr_args, args_ptr);
1725 if (svc_mt_mode != RPC_SVC_MT_NONE)
1726 svc_args_done(xprt);
1727 return (dummy);
1728 }
1729
1730 static bool_t
svc_vc_freeargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)1731 svc_vc_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
1732 {
1733 /* LINTED pointer alignment */
1734 XDR *xdrs = &(((struct cf_conn *)(xprt->xp_p1))->xdrs);
1735
1736 xdrs->x_op = XDR_FREE;
1737 return ((*xdr_args)(xdrs, args_ptr));
1738 }
1739
1740 static bool_t
svc_vc_reply(SVCXPRT * xprt,struct rpc_msg * msg)1741 svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg)
1742 {
1743 /* LINTED pointer alignment */
1744 struct cf_conn *cd = (struct cf_conn *)(xprt->xp_p1);
1745 XDR *xdrs = &(cd->xdrs);
1746 bool_t stat = FALSE;
1747 xdrproc_t xdr_results;
1748 caddr_t xdr_location;
1749 bool_t has_args;
1750
1751 #ifdef __lock_lint
1752 (void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1753 #else
1754 if (svc_mt_mode != RPC_SVC_MT_NONE)
1755 /* LINTED pointer alignment */
1756 (void) mutex_lock(&svc_send_mutex(SVCEXT(xprt)->parent));
1757 #endif
1758
1759 if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
1760 msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
1761 has_args = TRUE;
1762 xdr_results = msg->acpted_rply.ar_results.proc;
1763 xdr_location = msg->acpted_rply.ar_results.where;
1764 msg->acpted_rply.ar_results.proc = xdr_void;
1765 msg->acpted_rply.ar_results.where = NULL;
1766 } else
1767 has_args = FALSE;
1768
1769 xdrs->x_op = XDR_ENCODE;
1770 msg->rm_xid = cd->x_id;
1771 /* LINTED pointer alignment */
1772 if (xdr_replymsg(xdrs, msg) && (!has_args || SVCAUTH_WRAP(
1773 &SVC_XP_AUTH(xprt), xdrs, xdr_results, xdr_location))) {
1774 stat = TRUE;
1775 }
1776 (void) xdrrec_endofrecord(xdrs, TRUE);
1777
1778 #ifdef __lock_lint
1779 (void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1780 #else
1781 if (svc_mt_mode != RPC_SVC_MT_NONE)
1782 /* LINTED pointer alignment */
1783 (void) mutex_unlock(&svc_send_mutex(SVCEXT(xprt)->parent));
1784 #endif
1785
1786 return (stat);
1787 }
1788
1789 static struct xp_ops *
svc_vc_ops(void)1790 svc_vc_ops(void)
1791 {
1792 static struct xp_ops ops;
1793 extern mutex_t ops_lock;
1794
1795 /* VARIABLES PROTECTED BY ops_lock: ops */
1796
1797 (void) mutex_lock(&ops_lock);
1798 if (ops.xp_recv == NULL) {
1799 ops.xp_recv = svc_vc_recv;
1800 ops.xp_stat = svc_vc_stat;
1801 ops.xp_getargs = svc_vc_getargs;
1802 ops.xp_reply = svc_vc_reply;
1803 ops.xp_freeargs = svc_vc_freeargs;
1804 ops.xp_destroy = svc_vc_destroy;
1805 ops.xp_control = svc_vc_control;
1806 }
1807 (void) mutex_unlock(&ops_lock);
1808 return (&ops);
1809 }
1810
1811 static struct xp_ops *
svc_vc_rendezvous_ops(void)1812 svc_vc_rendezvous_ops(void)
1813 {
1814 static struct xp_ops ops;
1815 extern mutex_t ops_lock;
1816
1817 (void) mutex_lock(&ops_lock);
1818 if (ops.xp_recv == NULL) {
1819 ops.xp_recv = rendezvous_request;
1820 ops.xp_stat = rendezvous_stat;
1821 ops.xp_getargs = (bool_t (*)())abort;
1822 ops.xp_reply = (bool_t (*)())abort;
1823 ops.xp_freeargs = (bool_t (*)())abort,
1824 ops.xp_destroy = svc_vc_destroy;
1825 ops.xp_control = rendezvous_control;
1826 }
1827 (void) mutex_unlock(&ops_lock);
1828 return (&ops);
1829 }
1830
1831 /*
1832 * PRIVATE RPC INTERFACE
1833 *
1834 * This is a hack to let NIS+ clean up connections that have already been
1835 * closed. This problem arises because rpc.nisd forks a child to handle
1836 * existing connections when it does checkpointing. The child may close
1837 * some of these connections. But the descriptors still stay open in the
1838 * parent, and because TLI descriptors don't support persistent EOF
1839 * condition (like sockets do), the parent will never detect that these
1840 * descriptors are dead.
1841 *
1842 * The following internal procedure __svc_nisplus_fdcleanup_hack() - should
1843 * be removed as soon as rpc.nisd is rearchitected to do the right thing.
1844 * This procedure should not find its way into any header files.
1845 *
1846 * This procedure should be called only when rpc.nisd knows that there
1847 * are no children servicing clients.
1848 */
1849
1850 static bool_t
fd_is_dead(int fd)1851 fd_is_dead(int fd)
1852 {
1853 struct T_info_ack inforeq;
1854 int retval;
1855
1856 inforeq.PRIM_type = T_INFO_REQ;
1857 if (!_t_do_ioctl(fd, (caddr_t)&inforeq, sizeof (struct T_info_req),
1858 TI_GETINFO, &retval))
1859 return (TRUE);
1860 if (retval != (int)sizeof (struct T_info_ack))
1861 return (TRUE);
1862
1863 switch (inforeq.CURRENT_state) {
1864 case TS_UNBND:
1865 case TS_IDLE:
1866 return (TRUE);
1867 default:
1868 break;
1869 }
1870 return (FALSE);
1871 }
1872
1873 void
__svc_nisplus_fdcleanup_hack(void)1874 __svc_nisplus_fdcleanup_hack(void)
1875 {
1876 SVCXPRT *xprt;
1877 SVCXPRT *dead_xprt[CLEANUP_SIZE];
1878 int i, fd_idx = 0, dead_idx = 0;
1879
1880 if (svc_xports == NULL)
1881 return;
1882 for (;;) {
1883 (void) rw_wrlock(&svc_fd_lock);
1884 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1885 if ((xprt = svc_xports[fd_idx]) == NULL)
1886 continue;
1887 /* LINTED pointer alignment */
1888 if (svc_type(xprt) != SVC_CONNECTION)
1889 continue;
1890 if (fd_is_dead(fd_idx)) {
1891 dead_xprt[dead_idx++] = xprt;
1892 if (dead_idx >= CLEANUP_SIZE)
1893 break;
1894 }
1895 }
1896
1897 for (i = 0; i < dead_idx; i++) {
1898 /* Still holding svc_fd_lock */
1899 _svc_vc_destroy_private(dead_xprt[i], FALSE);
1900 }
1901 (void) rw_unlock(&svc_fd_lock);
1902 if (fd_idx++ >= svc_max_pollfd)
1903 return;
1904 }
1905 }
1906
1907 void
__svc_nisplus_enable_timestamps(void)1908 __svc_nisplus_enable_timestamps(void)
1909 {
1910 (void) mutex_lock(×tamp_lock);
1911 if (!timestamps) {
1912 timestamps = calloc(FD_INCREMENT, sizeof (long));
1913 if (timestamps != NULL)
1914 ntimestamps = FD_INCREMENT;
1915 else {
1916 (void) mutex_unlock(×tamp_lock);
1917 syslog(LOG_ERR,
1918 "__svc_nisplus_enable_timestamps: "
1919 "out of memory");
1920 return;
1921 }
1922 }
1923 (void) mutex_unlock(×tamp_lock);
1924 }
1925
1926 void
__svc_nisplus_purge_since(long since)1927 __svc_nisplus_purge_since(long since)
1928 {
1929 SVCXPRT *xprt;
1930 SVCXPRT *dead_xprt[CLEANUP_SIZE];
1931 int i, fd_idx = 0, dead_idx = 0;
1932
1933 if (svc_xports == NULL)
1934 return;
1935 for (;;) {
1936 (void) rw_wrlock(&svc_fd_lock);
1937 (void) mutex_lock(×tamp_lock);
1938 for (dead_idx = 0; fd_idx < svc_max_pollfd; fd_idx++) {
1939 if ((xprt = svc_xports[fd_idx]) == NULL) {
1940 continue;
1941 }
1942 /* LINTED pointer cast */
1943 if (svc_type(xprt) != SVC_CONNECTION) {
1944 continue;
1945 }
1946 if (fd_idx >= ntimestamps) {
1947 break;
1948 }
1949 if (timestamps[fd_idx] &&
1950 timestamps[fd_idx] < since) {
1951 dead_xprt[dead_idx++] = xprt;
1952 if (dead_idx >= CLEANUP_SIZE)
1953 break;
1954 }
1955 }
1956 (void) mutex_unlock(×tamp_lock);
1957
1958 for (i = 0; i < dead_idx; i++) {
1959 /* Still holding svc_fd_lock */
1960 _svc_vc_destroy_private(dead_xprt[i], FALSE);
1961 }
1962 (void) rw_unlock(&svc_fd_lock);
1963 if (fd_idx++ >= svc_max_pollfd)
1964 return;
1965 }
1966 }
1967
1968 /*
1969 * dup cache wrapper functions for vc requests. The set of dup
1970 * functions were written with the view that they may be expanded
1971 * during creation of a generic svc_vc_enablecache routine
1972 * which would have a size based cache, rather than a time based cache.
1973 * The real work is done in generic svc.c
1974 */
1975 bool_t
__svc_vc_dupcache_init(SVCXPRT * xprt,void * condition,int basis)1976 __svc_vc_dupcache_init(SVCXPRT *xprt, void *condition, int basis)
1977 {
1978 return (__svc_dupcache_init(condition, basis,
1979 /* LINTED pointer alignment */
1980 &(((struct cf_rendezvous *)xprt->xp_p1)->cf_cache)));
1981 }
1982
1983 int
__svc_vc_dup(struct svc_req * req,caddr_t * resp_buf,uint_t * resp_bufsz)1984 __svc_vc_dup(struct svc_req *req, caddr_t *resp_buf, uint_t *resp_bufsz)
1985 {
1986 return (__svc_dup(req, resp_buf, resp_bufsz,
1987 /* LINTED pointer alignment */
1988 ((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
1989 }
1990
1991 int
__svc_vc_dupdone(struct svc_req * req,caddr_t resp_buf,uint_t resp_bufsz,int status)1992 __svc_vc_dupdone(struct svc_req *req, caddr_t resp_buf, uint_t resp_bufsz,
1993 int status)
1994 {
1995 return (__svc_dupdone(req, resp_buf, resp_bufsz, status,
1996 /* LINTED pointer alignment */
1997 ((struct cf_conn *)req->rq_xprt->xp_p1)->cf_cache));
1998 }
1999