1 /* $NetBSD: clnt_vc.c,v 1.29 2024/01/23 17:24:38 christos Exp $ */
2
3 /*
4 * Copyright (c) 2010, Oracle America, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials
15 * provided with the distribution.
16 * * Neither the name of the "Oracle America, Inc." nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
27 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if defined(LIBC_SCCS) && !defined(lint)
36 #if 0
37 static char *sccsid = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
38 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC";
39 static char sccsid[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
40 #else
41 __RCSID("$NetBSD: clnt_vc.c,v 1.29 2024/01/23 17:24:38 christos Exp $");
42 #endif
43 #endif
44
45 /*
46 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
47 *
48 * Copyright (C) 1984, Sun Microsystems, Inc.
49 *
50 * TCP based RPC supports 'batched calls'.
51 * A sequence of calls may be batched-up in a send buffer. The rpc call
52 * return immediately to the client even though the call was not necessarily
53 * sent. The batching occurs if the results' xdr routine is NULL (0) AND
54 * the rpc timeout value is zero (see clnt.h, rpc).
55 *
56 * Clients should NOT casually batch calls that in fact return results; that is,
57 * the server side should be aware that a call is batched and not produce any
58 * return message. Batched calls that produce many result messages can
59 * deadlock (netlock) the client and the server....
60 *
61 * Now go hang yourself.
62 */
63
64 #include "namespace.h"
65 #include "reentrant.h"
66 #include <sys/types.h>
67 #include <sys/poll.h>
68 #include <sys/socket.h>
69
70 #include <assert.h>
71 #include <err.h>
72 #include <errno.h>
73 #include <netdb.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76 #include <string.h>
77 #include <unistd.h>
78 #include <signal.h>
79
80 #include <rpc/rpc.h>
81
82 #include "svc_fdset.h"
83 #include "rpc_internal.h"
84
85 #ifdef __weak_alias
86 __weak_alias(clnt_vc_create,_clnt_vc_create)
87 #endif
88
89 #define MCALL_MSG_SIZE 24
90
91 static enum clnt_stat clnt_vc_call(CLIENT *, rpcproc_t, xdrproc_t,
92 const char *, xdrproc_t, caddr_t, struct timeval);
93 static void clnt_vc_geterr(CLIENT *, struct rpc_err *);
94 static bool_t clnt_vc_freeres(CLIENT *, xdrproc_t, caddr_t);
95 static void clnt_vc_abort(CLIENT *);
96 static bool_t clnt_vc_control(CLIENT *, u_int, char *);
97 static void clnt_vc_destroy(CLIENT *);
98 static struct clnt_ops *clnt_vc_ops(void);
99 static bool_t time_not_ok(struct timeval *);
100 static int read_vc(caddr_t, caddr_t, int);
101 static int write_vc(caddr_t, caddr_t, int);
102
103 struct ct_data {
104 int ct_fd;
105 bool_t ct_closeit;
106 struct timeval ct_wait;
107 bool_t ct_waitset; /* wait set by clnt_control? */
108 struct netbuf ct_addr;
109 struct rpc_err ct_error;
110 union {
111 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */
112 u_int32_t ct_mcalli;
113 } ct_u;
114 u_int ct_mpos; /* pos after marshal */
115 XDR ct_xdrs;
116 };
117
118 /*
119 * This machinery implements per-fd locks for MT-safety. It is not
120 * sufficient to do per-CLIENT handle locks for MT-safety because a
121 * user may create more than one CLIENT handle with the same fd behind
122 * it. Therefore, we allocate an array of flags (vc_fd_locks), protected
123 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables
124 * similarly protected. Vc_fd_lock[fd] == 1 => a call is active on some
125 * CLIENT handle created for that fd.
126 * The current implementation holds locks across the entire RPC and reply.
127 * Yes, this is silly, and as soon as this code is proven to work, this
128 * should be the first thing fixed. One step at a time.
129 */
130 #ifdef _REENTRANT
131 static int *vc_fd_locks;
132 #define __rpc_lock_value __isthreaded;
133 static cond_t *vc_cv;
134 #define release_fd_lock(fd, mask) { \
135 mutex_lock(&clnt_fd_lock); \
136 vc_fd_locks[fd] = 0; \
137 mutex_unlock(&clnt_fd_lock); \
138 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
139 cond_signal(&vc_cv[fd]); \
140 }
141 #else
142 #define release_fd_lock(fd,mask)
143 #define __rpc_lock_value 0
144 #endif
145
146 static __inline void
htonlp(void * dst,const void * src,uint32_t incr)147 htonlp(void *dst, const void *src, uint32_t incr)
148 {
149 #if 0
150 uint32_t tmp;
151 memcpy(&tmp, src, sizeof(tmp));
152 tmp = htonl(tmp + incr);
153 memcpy(dst, &tmp, sizeof(tmp));
154 #else
155 /* We are aligned, so we think */
156 *(uint32_t *)dst = htonl(*(const uint32_t *)src + incr);
157 #endif
158 }
159
160 static __inline void
ntohlp(void * dst,const void * src)161 ntohlp(void *dst, const void *src)
162 {
163 #if 0
164 uint32_t tmp;
165 memcpy(&tmp, src, sizeof(tmp));
166 tmp = ntohl(tmp);
167 memcpy(dst, &tmp, sizeof(tmp));
168 #else
169 /* We are aligned, so we think */
170 *(uint32_t *)dst = htonl(*(const uint32_t *)src);
171 #endif
172 }
173
174 /*
175 * Create a client handle for a connection.
176 * Default options are set, which the user can change using clnt_control()'s.
177 * The rpc/vc package does buffering similar to stdio, so the client
178 * must pick send and receive buffer sizes, 0 => use the default.
179 * NB: fd is copied into a private area.
180 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
181 * set this something more useful.
182 *
183 * fd should be an open socket
184 */
185 CLIENT *
clnt_vc_create(int fd,const struct netbuf * raddr,rpcprog_t prog,rpcvers_t vers,u_int sendsz,u_int recvsz)186 clnt_vc_create(
187 int fd,
188 const struct netbuf *raddr,
189 rpcprog_t prog,
190 rpcvers_t vers,
191 u_int sendsz,
192 u_int recvsz
193 )
194 {
195 CLIENT *h;
196 struct ct_data *ct = NULL;
197 struct rpc_msg call_msg;
198 #ifdef _REENTRANT
199 sigset_t mask;
200 #endif
201 sigset_t newmask;
202 struct sockaddr_storage ss;
203 socklen_t slen;
204 struct __rpc_sockinfo si;
205
206 _DIAGASSERT(raddr != NULL);
207
208 h = mem_alloc(sizeof(*h));
209 if (h == NULL) {
210 warnx("clnt_vc_create: out of memory");
211 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
212 rpc_createerr.cf_error.re_errno = errno;
213 goto fooy;
214 }
215 ct = mem_alloc(sizeof(*ct));
216 if (ct == NULL) {
217 warnx("clnt_vc_create: out of memory");
218 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
219 rpc_createerr.cf_error.re_errno = errno;
220 goto fooy;
221 }
222
223 __clnt_sigfillset(&newmask);
224 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
225 #ifdef _REENTRANT
226 mutex_lock(&clnt_fd_lock);
227 if (vc_fd_locks == NULL) {
228 size_t cv_allocsz, fd_allocsz;
229 int dtbsize = __rpc_dtbsize();
230
231 fd_allocsz = dtbsize * sizeof (int);
232 vc_fd_locks = mem_alloc(fd_allocsz);
233 if (vc_fd_locks == NULL) {
234 goto blooy;
235 } else
236 memset(vc_fd_locks, '\0', fd_allocsz);
237
238 _DIAGASSERT(vc_cv == NULL);
239 cv_allocsz = dtbsize * sizeof (cond_t);
240 vc_cv = mem_alloc(cv_allocsz);
241 if (vc_cv == NULL) {
242 mem_free(vc_fd_locks, fd_allocsz);
243 vc_fd_locks = NULL;
244 goto blooy;
245 } else {
246 int i;
247
248 for (i = 0; i < dtbsize; i++)
249 cond_init(&vc_cv[i], 0, (void *) 0);
250 }
251 } else
252 _DIAGASSERT(vc_cv != NULL);
253 #endif
254
255 /*
256 * XXX - fvdl connecting while holding a mutex?
257 */
258 slen = sizeof ss;
259 if (getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) {
260 if (errno != ENOTCONN) {
261 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
262 rpc_createerr.cf_error.re_errno = errno;
263 goto blooy;
264 }
265 if (connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
266 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
267 rpc_createerr.cf_error.re_errno = errno;
268 goto blooy;
269 }
270 }
271 mutex_unlock(&clnt_fd_lock);
272 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
273 if (!__rpc_fd2sockinfo(fd, &si))
274 goto fooy;
275
276 ct->ct_closeit = FALSE;
277
278 /*
279 * Set up private data struct
280 */
281 ct->ct_fd = fd;
282 ct->ct_wait.tv_usec = 0;
283 ct->ct_waitset = FALSE;
284 ct->ct_addr.buf = malloc((size_t)raddr->maxlen);
285 if (ct->ct_addr.buf == NULL)
286 goto fooy;
287 memcpy(ct->ct_addr.buf, raddr->buf, (size_t)raddr->len);
288 ct->ct_addr.len = raddr->len;
289 ct->ct_addr.maxlen = raddr->maxlen;
290
291 /*
292 * Initialize call message
293 */
294 call_msg.rm_xid = __RPC_GETXID();
295 call_msg.rm_direction = CALL;
296 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
297 call_msg.rm_call.cb_prog = (u_int32_t)prog;
298 call_msg.rm_call.cb_vers = (u_int32_t)vers;
299
300 /*
301 * pre-serialize the static part of the call msg and stash it away
302 */
303 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE,
304 XDR_ENCODE);
305 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
306 if (ct->ct_closeit) {
307 (void)close(fd);
308 }
309 goto fooy;
310 }
311 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
312 XDR_DESTROY(&(ct->ct_xdrs));
313
314 /*
315 * Create a client handle which uses xdrrec for serialization
316 * and authnone for authentication.
317 */
318 h->cl_ops = clnt_vc_ops();
319 h->cl_private = ct;
320 h->cl_auth = authnone_create();
321 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
322 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
323 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz,
324 h->cl_private, read_vc, write_vc);
325 return (h);
326
327 blooy:
328 mutex_unlock(&clnt_fd_lock);
329 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
330 fooy:
331 /*
332 * Something goofed, free stuff and barf
333 */
334 if (ct)
335 mem_free(ct, sizeof(struct ct_data));
336 if (h)
337 mem_free(h, sizeof(CLIENT));
338 return (NULL);
339 }
340
341 static enum clnt_stat
clnt_vc_call(CLIENT * h,rpcproc_t proc,xdrproc_t xdr_args,const char * args_ptr,xdrproc_t xdr_results,caddr_t results_ptr,struct timeval timeout)342 clnt_vc_call(
343 CLIENT *h,
344 rpcproc_t proc,
345 xdrproc_t xdr_args,
346 const char *args_ptr,
347 xdrproc_t xdr_results,
348 caddr_t results_ptr,
349 struct timeval timeout
350 )
351 {
352 struct ct_data *ct;
353 XDR *xdrs;
354 struct rpc_msg reply_msg;
355 u_int32_t x_id;
356 u_int32_t *msg_x_id;
357 bool_t shipnow;
358 int refreshes = 2;
359 #ifdef _REENTRANT
360 sigset_t mask, newmask;
361 #endif
362
363 _DIAGASSERT(h != NULL);
364
365 ct = (struct ct_data *) h->cl_private;
366
367 #ifdef _REENTRANT
368 __clnt_sigfillset(&newmask);
369 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
370 mutex_lock(&clnt_fd_lock);
371 while (vc_fd_locks[ct->ct_fd])
372 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
373 vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
374 mutex_unlock(&clnt_fd_lock);
375 #endif
376
377 xdrs = &(ct->ct_xdrs);
378 msg_x_id = &ct->ct_u.ct_mcalli;
379
380 if (!ct->ct_waitset) {
381 if (time_not_ok(&timeout) == FALSE)
382 ct->ct_wait = timeout;
383 }
384
385 shipnow =
386 (xdr_results == NULL && timeout.tv_sec == 0
387 && timeout.tv_usec == 0) ? FALSE : TRUE;
388
389 call_again:
390 xdrs->x_op = XDR_ENCODE;
391 ct->ct_error.re_status = RPC_SUCCESS;
392 x_id = ntohl(--(*msg_x_id));
393 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) ||
394 (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
395 (! AUTH_MARSHALL(h->cl_auth, xdrs)) ||
396 (! (*xdr_args)(xdrs, __UNCONST(args_ptr)))) {
397 if (ct->ct_error.re_status == RPC_SUCCESS)
398 ct->ct_error.re_status = RPC_CANTENCODEARGS;
399 (void)xdrrec_endofrecord(xdrs, TRUE);
400 release_fd_lock(ct->ct_fd, mask);
401 return (ct->ct_error.re_status);
402 }
403 if (! xdrrec_endofrecord(xdrs, shipnow)) {
404 release_fd_lock(ct->ct_fd, mask);
405 return (ct->ct_error.re_status = RPC_CANTSEND);
406 }
407 if (! shipnow) {
408 release_fd_lock(ct->ct_fd, mask);
409 return (RPC_SUCCESS);
410 }
411 /*
412 * Hack to provide rpc-based message passing
413 */
414 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
415 release_fd_lock(ct->ct_fd, mask);
416 return(ct->ct_error.re_status = RPC_TIMEDOUT);
417 }
418
419
420 /*
421 * Keep receiving until we get a valid transaction id
422 */
423 xdrs->x_op = XDR_DECODE;
424 for (;;) {
425 reply_msg.acpted_rply.ar_verf = _null_auth;
426 reply_msg.acpted_rply.ar_results.where = NULL;
427 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
428 if (! xdrrec_skiprecord(xdrs)) {
429 release_fd_lock(ct->ct_fd, mask);
430 return (ct->ct_error.re_status);
431 }
432 /* now decode and validate the response header */
433 if (! xdr_replymsg(xdrs, &reply_msg)) {
434 if (ct->ct_error.re_status == RPC_SUCCESS)
435 continue;
436 release_fd_lock(ct->ct_fd, mask);
437 return (ct->ct_error.re_status);
438 }
439 if (reply_msg.rm_xid == x_id)
440 break;
441 }
442
443 /*
444 * process header
445 */
446 _seterr_reply(&reply_msg, &(ct->ct_error));
447 if (ct->ct_error.re_status == RPC_SUCCESS) {
448 if (! AUTH_VALIDATE(h->cl_auth,
449 &reply_msg.acpted_rply.ar_verf)) {
450 ct->ct_error.re_status = RPC_AUTHERROR;
451 ct->ct_error.re_why = AUTH_INVALIDRESP;
452 } else if (! (*xdr_results)(xdrs, results_ptr)) {
453 if (ct->ct_error.re_status == RPC_SUCCESS)
454 ct->ct_error.re_status = RPC_CANTDECODERES;
455 }
456 /* free verifier ... */
457 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
458 xdrs->x_op = XDR_FREE;
459 (void)xdr_opaque_auth(xdrs,
460 &(reply_msg.acpted_rply.ar_verf));
461 }
462 } /* end successful completion */
463 else {
464 /* maybe our credentials need to be refreshed ... */
465 if (refreshes-- && AUTH_REFRESH(h->cl_auth))
466 goto call_again;
467 } /* end of unsuccessful completion */
468 release_fd_lock(ct->ct_fd, mask);
469 return (ct->ct_error.re_status);
470 }
471
472 static void
clnt_vc_geterr(CLIENT * h,struct rpc_err * errp)473 clnt_vc_geterr(
474 CLIENT *h,
475 struct rpc_err *errp
476 )
477 {
478 struct ct_data *ct;
479
480 _DIAGASSERT(h != NULL);
481 _DIAGASSERT(errp != NULL);
482
483 ct = (struct ct_data *) h->cl_private;
484 *errp = ct->ct_error;
485 }
486
487 static bool_t
clnt_vc_freeres(CLIENT * cl,xdrproc_t xdr_res,caddr_t res_ptr)488 clnt_vc_freeres(
489 CLIENT *cl,
490 xdrproc_t xdr_res,
491 caddr_t res_ptr
492 )
493 {
494 struct ct_data *ct;
495 XDR *xdrs;
496 bool_t dummy;
497 #ifdef _REENTRANT
498 sigset_t mask;
499 #endif
500 sigset_t newmask;
501
502 _DIAGASSERT(cl != NULL);
503
504 ct = (struct ct_data *)cl->cl_private;
505 xdrs = &(ct->ct_xdrs);
506
507 __clnt_sigfillset(&newmask);
508 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
509 mutex_lock(&clnt_fd_lock);
510 #ifdef _REENTRANT
511 while (vc_fd_locks[ct->ct_fd])
512 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
513 #endif
514
515 xdrs->x_op = XDR_FREE;
516 dummy = (*xdr_res)(xdrs, res_ptr);
517 mutex_unlock(&clnt_fd_lock);
518 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
519 cond_signal(&vc_cv[ct->ct_fd]);
520
521 return dummy;
522 }
523
524 /*ARGSUSED*/
525 static void
clnt_vc_abort(CLIENT * cl)526 clnt_vc_abort(CLIENT *cl)
527 {
528 }
529
530 static bool_t
clnt_vc_control(CLIENT * cl,u_int request,char * info)531 clnt_vc_control(
532 CLIENT *cl,
533 u_int request,
534 char *info
535 )
536 {
537 struct ct_data *ct;
538 void *infop = info;
539 #ifdef _REENTRANT
540 sigset_t mask;
541 #endif
542 sigset_t newmask;
543
544 _DIAGASSERT(cl != NULL);
545
546 ct = (struct ct_data *)cl->cl_private;
547
548 __clnt_sigfillset(&newmask);
549 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
550 mutex_lock(&clnt_fd_lock);
551 #ifdef _REENTRANT
552 while (vc_fd_locks[ct->ct_fd])
553 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
554 vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
555 #endif
556 mutex_unlock(&clnt_fd_lock);
557
558 switch (request) {
559 case CLSET_FD_CLOSE:
560 ct->ct_closeit = TRUE;
561 release_fd_lock(ct->ct_fd, mask);
562 return (TRUE);
563 case CLSET_FD_NCLOSE:
564 ct->ct_closeit = FALSE;
565 release_fd_lock(ct->ct_fd, mask);
566 return (TRUE);
567 default:
568 break;
569 }
570
571 /* for other requests which use info */
572 if (info == NULL) {
573 release_fd_lock(ct->ct_fd, mask);
574 return (FALSE);
575 }
576 switch (request) {
577 case CLSET_TIMEOUT:
578 if (time_not_ok((struct timeval *)(void *)info)) {
579 release_fd_lock(ct->ct_fd, mask);
580 return (FALSE);
581 }
582 ct->ct_wait = *(struct timeval *)infop;
583 ct->ct_waitset = TRUE;
584 break;
585 case CLGET_TIMEOUT:
586 *(struct timeval *)infop = ct->ct_wait;
587 break;
588 case CLGET_SERVER_ADDR:
589 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
590 break;
591 case CLGET_FD:
592 *(int *)(void *)info = ct->ct_fd;
593 break;
594 case CLGET_SVC_ADDR:
595 /* The caller should not free this memory area */
596 *(struct netbuf *)(void *)info = ct->ct_addr;
597 break;
598 case CLSET_SVC_ADDR: /* set to new address */
599 release_fd_lock(ct->ct_fd, mask);
600 return (FALSE);
601 case CLGET_XID:
602 /*
603 * use the knowledge that xid is the
604 * first element in the call structure
605 * This will get the xid of the PREVIOUS call
606 */
607 ntohlp(info, &ct->ct_u.ct_mcalli);
608 break;
609 case CLSET_XID:
610 /* This will set the xid of the NEXT call */
611 /* increment by 1 as clnt_vc_call() decrements once */
612 htonlp(&ct->ct_u.ct_mcalli, info, 1);
613 break;
614 case CLGET_VERS:
615 /*
616 * This RELIES on the information that, in the call body,
617 * the version number field is the fifth field from the
618 * beginning of the RPC header. MUST be changed if the
619 * call_struct is changed
620 */
621 ntohlp(info, ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT);
622 break;
623
624 case CLSET_VERS:
625 htonlp(ct->ct_u.ct_mcallc + 4 * BYTES_PER_XDR_UNIT, info, 0);
626 break;
627
628 case CLGET_PROG:
629 /*
630 * This RELIES on the information that, in the call body,
631 * the program number field is the fourth field from the
632 * beginning of the RPC header. MUST be changed if the
633 * call_struct is changed
634 */
635 ntohlp(info, ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT);
636 break;
637
638 case CLSET_PROG:
639 htonlp(ct->ct_u.ct_mcallc + 3 * BYTES_PER_XDR_UNIT, info, 0);
640 break;
641
642 default:
643 release_fd_lock(ct->ct_fd, mask);
644 return (FALSE);
645 }
646 release_fd_lock(ct->ct_fd, mask);
647 return (TRUE);
648 }
649
650
651 static void
clnt_vc_destroy(CLIENT * cl)652 clnt_vc_destroy(CLIENT *cl)
653 {
654 struct ct_data *ct;
655 #ifdef _REENTRANT
656 int ct_fd;
657 sigset_t mask;
658 #endif
659 sigset_t newmask;
660
661 _DIAGASSERT(cl != NULL);
662
663 ct = (struct ct_data *) cl->cl_private;
664
665 __clnt_sigfillset(&newmask);
666 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
667 mutex_lock(&clnt_fd_lock);
668 #ifdef _REENTRANT
669 ct_fd = ct->ct_fd;
670 while (vc_fd_locks[ct_fd])
671 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock);
672 #endif
673 if (ct->ct_closeit && ct->ct_fd != -1) {
674 (void)close(ct->ct_fd);
675 }
676 XDR_DESTROY(&(ct->ct_xdrs));
677 if (ct->ct_addr.buf)
678 free(ct->ct_addr.buf);
679 mem_free(ct, sizeof(struct ct_data));
680 mem_free(cl, sizeof(CLIENT));
681 mutex_unlock(&clnt_fd_lock);
682 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
683
684 cond_signal(&vc_cv[ct_fd]);
685 }
686
687 /*
688 * Interface between xdr serializer and tcp connection.
689 * Behaves like the system calls, read & write, but keeps some error state
690 * around for the rpc level.
691 */
692 static int
read_vc(char * ctp,char * buf,int len)693 read_vc(char *ctp, char *buf, int len)
694 {
695 struct ct_data *ct = (struct ct_data *)(void *)ctp;
696 struct pollfd fd;
697 struct timespec ts;
698 ssize_t nread;
699
700 if (len == 0)
701 return (0);
702
703 TIMEVAL_TO_TIMESPEC(&ct->ct_wait, &ts);
704 fd.fd = ct->ct_fd;
705 fd.events = POLLIN;
706 for (;;) {
707 switch (pollts(&fd, 1, &ts, NULL)) {
708 case 0:
709 ct->ct_error.re_status = RPC_TIMEDOUT;
710 return (-1);
711
712 case -1:
713 if (errno == EINTR)
714 continue;
715 ct->ct_error.re_status = RPC_CANTRECV;
716 ct->ct_error.re_errno = errno;
717 return (-1);
718 }
719 break;
720 }
721 switch (nread = read(ct->ct_fd, buf, (size_t)len)) {
722
723 case 0:
724 /* premature eof */
725 ct->ct_error.re_errno = ECONNRESET;
726 ct->ct_error.re_status = RPC_CANTRECV;
727 nread = -1; /* it's really an error */
728 break;
729
730 case -1:
731 ct->ct_error.re_errno = errno;
732 ct->ct_error.re_status = RPC_CANTRECV;
733 break;
734 }
735 return (int)nread;
736 }
737
738 static int
write_vc(char * ctp,char * buf,int len)739 write_vc(char *ctp, char *buf, int len)
740 {
741 struct ct_data *ct = (struct ct_data *)(void *)ctp;
742 ssize_t i;
743 size_t cnt;
744
745 for (cnt = len; cnt > 0; cnt -= i, buf += i) {
746 if ((i = write(ct->ct_fd, buf, cnt)) == -1) {
747 ct->ct_error.re_errno = errno;
748 ct->ct_error.re_status = RPC_CANTSEND;
749 return (-1);
750 }
751 }
752 return len;
753 }
754
755 static struct clnt_ops *
clnt_vc_ops(void)756 clnt_vc_ops(void)
757 {
758 static struct clnt_ops ops;
759 #ifdef _REENTRANT
760 sigset_t mask;
761 #endif
762 sigset_t newmask;
763
764 /* VARIABLES PROTECTED BY ops_lock: ops */
765
766 __clnt_sigfillset(&newmask);
767 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
768 mutex_lock(&ops_lock);
769 if (ops.cl_call == NULL) {
770 ops.cl_call = clnt_vc_call;
771 ops.cl_abort = clnt_vc_abort;
772 ops.cl_geterr = clnt_vc_geterr;
773 ops.cl_freeres = clnt_vc_freeres;
774 ops.cl_destroy = clnt_vc_destroy;
775 ops.cl_control = clnt_vc_control;
776 }
777 mutex_unlock(&ops_lock);
778 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
779 return (&ops);
780 }
781
782 /*
783 * Make sure that the time is not garbage. -1 value is disallowed.
784 * Note this is different from time_not_ok in clnt_dg.c
785 */
786 static bool_t
time_not_ok(struct timeval * t)787 time_not_ok(struct timeval *t)
788 {
789
790 _DIAGASSERT(t != NULL);
791
792 return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
793 t->tv_usec <= -1 || t->tv_usec > 1000000);
794 }
795