xref: /netbsd-src/lib/libc/rpc/svc_dg.c (revision 85f5e301f5b0e5c7695c7222d16769f8f55f6aeb)
1 /*	$NetBSD: svc_dg.c,v 1.18 2024/01/23 17:24:38 christos Exp $	*/
2 
3 /*
4  * Copyright (c) 2010, Oracle America, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are
8  * met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above
13  *       copyright notice, this list of conditions and the following
14  *       disclaimer in the documentation and/or other materials
15  *       provided with the distribution.
16  *     * Neither the name of the "Oracle America, Inc." nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  *   FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  *   COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
25  *   INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  *   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
27  *   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  *   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  *   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  *   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * Copyright (c) 1986-1991 by Sun Microsystems Inc.
36  */
37 
38 /* #ident	"@(#)svc_dg.c	1.17	94/04/24 SMI" */
39 
40 
41 /*
42  * svc_dg.c, Server side for connectionless RPC.
43  *
44  * Does some caching in the hopes of achieving execute-at-most-once semantics.
45  */
46 
47 #include <sys/cdefs.h>
48 #if defined(LIBC_SCCS) && !defined(lint)
49 __RCSID("$NetBSD: svc_dg.c,v 1.18 2024/01/23 17:24:38 christos Exp $");
50 #endif
51 
52 #include "namespace.h"
53 #include "reentrant.h"
54 #include <sys/types.h>
55 #include <sys/socket.h>
56 #include <rpc/rpc.h>
57 #include <assert.h>
58 #include <errno.h>
59 #include <unistd.h>
60 #include <stdio.h>
61 #include <stdlib.h>
62 #include <string.h>
63 #ifdef RPC_CACHE_DEBUG
64 #include <netconfig.h>
65 #include <netdir.h>
66 #endif
67 #include <err.h>
68 
69 #include "svc_fdset.h"
70 #include "rpc_internal.h"
71 #include "svc_dg.h"
72 
73 #define	su_data(xprt)	((struct svc_dg_data *)(xprt->xp_p2))
74 #define	rpc_buffer(xprt) ((xprt)->xp_p1)
75 
76 #ifdef __weak_alias
77 __weak_alias(svc_dg_create,_svc_dg_create)
78 #endif
79 
80 #ifndef MAX
81 #define	MAX(a, b)	(((a) > (b)) ? (a) : (b))
82 #endif
83 
84 static void svc_dg_ops(SVCXPRT *);
85 static enum xprt_stat svc_dg_stat(SVCXPRT *);
86 static bool_t svc_dg_recv(SVCXPRT *, struct rpc_msg *);
87 static bool_t svc_dg_reply(SVCXPRT *, struct rpc_msg *);
88 static bool_t svc_dg_getargs(SVCXPRT *, xdrproc_t, caddr_t);
89 static bool_t svc_dg_freeargs(SVCXPRT *, xdrproc_t, caddr_t);
90 static void svc_dg_destroy(SVCXPRT *);
91 static bool_t svc_dg_control(SVCXPRT *, const u_int, void *);
92 static int cache_get(SVCXPRT *, struct rpc_msg *, char **, size_t *);
93 static void cache_set(SVCXPRT *, size_t);
94 
95 /*
96  * Usage:
97  *	xprt = svc_dg_create(sock, sendsize, recvsize);
98  * Does other connectionless specific initializations.
99  * Once *xprt is initialized, it is registered.
100  * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
101  * system defaults are chosen.
102  * The routines returns NULL if a problem occurred.
103  */
104 static const char svc_dg_str[] = "svc_dg_create: %s";
105 static const char svc_dg_err1[] = "could not get transport information";
106 static const char svc_dg_err2[] = " transport does not support data transfer";
107 static const char __no_mem_str[] = "out of memory";
108 
109 SVCXPRT *
svc_dg_create(int fd,u_int sendsize,u_int recvsize)110 svc_dg_create(int fd, u_int sendsize, u_int recvsize)
111 {
112 	SVCXPRT *xprt;
113 	struct svc_dg_data *su = NULL;
114 	struct __rpc_sockinfo si;
115 	struct sockaddr_storage ss;
116 	socklen_t slen;
117 
118 	if (!__rpc_fd2sockinfo(fd, &si)) {
119 		warnx(svc_dg_str, svc_dg_err1);
120 		return (NULL);
121 	}
122 	/*
123 	 * Find the receive and the send size
124 	 */
125 	sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
126 	recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
127 	if ((sendsize == 0) || (recvsize == 0)) {
128 		warnx(svc_dg_str, svc_dg_err2);
129 		return (NULL);
130 	}
131 
132 	xprt = mem_alloc(sizeof (SVCXPRT));
133 	if (xprt == NULL)
134 		goto outofmem;
135 	memset(xprt, 0, sizeof (SVCXPRT));
136 
137 	su = mem_alloc(sizeof (*su));
138 	if (su == NULL)
139 		goto outofmem;
140 	su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
141 	if ((rpc_buffer(xprt) = malloc(su->su_iosz)) == NULL)
142 		goto outofmem;
143 	_DIAGASSERT(__type_fit(u_int, su->su_iosz));
144 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), (u_int)su->su_iosz,
145 		XDR_DECODE);
146 	su->su_cache = NULL;
147 	xprt->xp_fd = fd;
148 	xprt->xp_p2 = (caddr_t)(void *)su;
149 	xprt->xp_verf.oa_base = su->su_verfbody;
150 	svc_dg_ops(xprt);
151 	xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
152 
153 	slen = sizeof ss;
154 	if (getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
155 		goto freedata;
156 	xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
157 	xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
158 	xprt->xp_ltaddr.len = slen;
159 	memcpy(xprt->xp_ltaddr.buf, &ss, slen);
160 
161 	if (!xprt_register(xprt))
162 		goto freedata;
163 	return (xprt);
164 
165 outofmem:
166 	(void) warnx(svc_dg_str, __no_mem_str);
167 freedata:
168 	if (xprt) {
169 		if (su)
170 			(void) mem_free(su, sizeof (*su));
171 		(void) mem_free(xprt, sizeof (SVCXPRT));
172 	}
173 	return (NULL);
174 }
175 
176 /*ARGSUSED*/
177 static enum xprt_stat
svc_dg_stat(SVCXPRT * xprt)178 svc_dg_stat(SVCXPRT *xprt)
179 {
180 	return (XPRT_IDLE);
181 }
182 
183 static bool_t
svc_dg_recv(SVCXPRT * xprt,struct rpc_msg * msg)184 svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg)
185 {
186 	struct svc_dg_data *su;
187 	XDR *xdrs;
188 	char *reply;
189 	struct sockaddr_storage ss;
190 	socklen_t alen;
191 	size_t replylen;
192 	ssize_t rlen;
193 
194 	_DIAGASSERT(xprt != NULL);
195 	_DIAGASSERT(msg != NULL);
196 
197 	su = su_data(xprt);
198 	xdrs = &(su->su_xdrs);
199 
200 again:
201 	alen = sizeof (struct sockaddr_storage);
202 	rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
203 	    (struct sockaddr *)(void *)&ss, &alen);
204 	if (rlen == -1 && errno == EINTR)
205 		goto again;
206 	if (rlen == -1 || (rlen < (ssize_t)(4 * sizeof (u_int32_t))))
207 		return (FALSE);
208 	if (xprt->xp_rtaddr.len < alen) {
209 		if (xprt->xp_rtaddr.len != 0)
210 			mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
211 		xprt->xp_rtaddr.buf = mem_alloc(alen);
212 		xprt->xp_rtaddr.len = alen;
213 	}
214 	memcpy(xprt->xp_rtaddr.buf, &ss, alen);
215 #ifdef PORTMAP
216 	if (ss.ss_family == AF_INET) {
217 		xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
218 		xprt->xp_addrlen = sizeof (struct sockaddr_in);
219 	}
220 #endif
221 	xdrs->x_op = XDR_DECODE;
222 	XDR_SETPOS(xdrs, 0);
223 	if (! xdr_callmsg(xdrs, msg)) {
224 		return (FALSE);
225 	}
226 	su->su_xid = msg->rm_xid;
227 	if (su->su_cache != NULL) {
228 		if (cache_get(xprt, msg, &reply, &replylen)) {
229 			(void)sendto(xprt->xp_fd, reply, replylen, 0,
230 			    (struct sockaddr *)(void *)&ss, alen);
231 			return (FALSE);
232 		}
233 	}
234 	return (TRUE);
235 }
236 
237 static bool_t
svc_dg_reply(SVCXPRT * xprt,struct rpc_msg * msg)238 svc_dg_reply(SVCXPRT *xprt, struct rpc_msg *msg)
239 {
240 	struct svc_dg_data *su;
241 	XDR *xdrs;
242 	bool_t stat = FALSE;
243 	size_t slen;
244 
245 	_DIAGASSERT(xprt != NULL);
246 	_DIAGASSERT(msg != NULL);
247 
248 	su = su_data(xprt);
249 	xdrs = &(su->su_xdrs);
250 
251 	xdrs->x_op = XDR_ENCODE;
252 	XDR_SETPOS(xdrs, 0);
253 	msg->rm_xid = su->su_xid;
254 	if (xdr_replymsg(xdrs, msg)) {
255 		slen = XDR_GETPOS(xdrs);
256 		if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
257 		    (struct sockaddr *)xprt->xp_rtaddr.buf,
258 		    (socklen_t)xprt->xp_rtaddr.len) == (ssize_t) slen) {
259 			stat = TRUE;
260 			if (su->su_cache)
261 				cache_set(xprt, slen);
262 		}
263 	}
264 	return (stat);
265 }
266 
267 static bool_t
svc_dg_getargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)268 svc_dg_getargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
269 {
270 	return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
271 }
272 
273 static bool_t
svc_dg_freeargs(SVCXPRT * xprt,xdrproc_t xdr_args,caddr_t args_ptr)274 svc_dg_freeargs(SVCXPRT *xprt, xdrproc_t xdr_args, caddr_t args_ptr)
275 {
276 	XDR *xdrs;
277 
278 	_DIAGASSERT(xprt != NULL);
279 
280 	xdrs = &(su_data(xprt)->su_xdrs);
281 	xdrs->x_op = XDR_FREE;
282 	return (*xdr_args)(xdrs, args_ptr);
283 }
284 
285 static void
svc_dg_destroy(SVCXPRT * xprt)286 svc_dg_destroy(SVCXPRT *xprt)
287 {
288 	struct svc_dg_data *su;
289 
290 	_DIAGASSERT(xprt != NULL);
291 
292 	su = su_data(xprt);
293 
294 	xprt_unregister(xprt);
295 	if (xprt->xp_fd != -1)
296 		(void)close(xprt->xp_fd);
297 	XDR_DESTROY(&(su->su_xdrs));
298 	(void) mem_free(rpc_buffer(xprt), su->su_iosz);
299 	(void) mem_free(su, sizeof (*su));
300 	if (xprt->xp_rtaddr.buf)
301 		(void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
302 	if (xprt->xp_ltaddr.buf)
303 		(void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
304 	if (xprt->xp_tp)
305 		(void) free(xprt->xp_tp);
306 	(void) mem_free(xprt, sizeof (SVCXPRT));
307 }
308 
309 static bool_t
310 /*ARGSUSED*/
svc_dg_control(SVCXPRT * xprt,const u_int rq,void * in)311 svc_dg_control(SVCXPRT *xprt, const u_int rq, void *in)
312 {
313 	return (FALSE);
314 }
315 
316 static void
svc_dg_ops(SVCXPRT * xprt)317 svc_dg_ops(SVCXPRT *xprt)
318 {
319 	static struct xp_ops ops;
320 	static struct xp_ops2 ops2;
321 
322 	_DIAGASSERT(xprt != NULL);
323 
324 /* VARIABLES PROTECTED BY ops_lock: ops */
325 
326 	mutex_lock(&ops_lock);
327 	if (ops.xp_recv == NULL) {
328 		ops.xp_recv = svc_dg_recv;
329 		ops.xp_stat = svc_dg_stat;
330 		ops.xp_getargs = svc_dg_getargs;
331 		ops.xp_reply = svc_dg_reply;
332 		ops.xp_freeargs = svc_dg_freeargs;
333 		ops.xp_destroy = svc_dg_destroy;
334 		ops2.xp_control = svc_dg_control;
335 	}
336 	xprt->xp_ops = &ops;
337 	xprt->xp_ops2 = &ops2;
338 	mutex_unlock(&ops_lock);
339 }
340 
341 /*  The CACHING COMPONENT */
342 
343 /*
344  * Could have been a separate file, but some part of it depends upon the
345  * private structure of the client handle.
346  *
347  * Fifo cache for cl server
348  * Copies pointers to reply buffers into fifo cache
349  * Buffers are sent again if retransmissions are detected.
350  */
351 
352 #define	SPARSENESS 4	/* 75% sparse */
353 
354 #define	ALLOC(type, size)	\
355 	mem_alloc((sizeof (type) * (size)))
356 
357 #define	MEMZERO(addr, type, size)	 \
358 	(void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
359 
360 #define	FREE(addr, type, size)	\
361 	mem_free((addr), (sizeof (type) * (size)))
362 
363 /*
364  * An entry in the cache
365  */
366 typedef struct cache_node *cache_ptr;
367 struct cache_node {
368 	/*
369 	 * Index into cache is xid, proc, vers, prog and address
370 	 */
371 	u_int32_t cache_xid;
372 	rpcproc_t cache_proc;
373 	rpcvers_t cache_vers;
374 	rpcprog_t cache_prog;
375 	struct netbuf cache_addr;
376 	/*
377 	 * The cached reply and length
378 	 */
379 	char *cache_reply;
380 	size_t cache_replylen;
381 	/*
382 	 * Next node on the list, if there is a collision
383 	 */
384 	cache_ptr cache_next;
385 };
386 
387 /*
388  * The entire cache
389  */
390 struct cl_cache {
391 	u_int uc_size;		/* size of cache */
392 	cache_ptr *uc_entries;	/* hash table of entries in cache */
393 	cache_ptr *uc_fifo;	/* fifo list of entries in cache */
394 	u_int uc_nextvictim;	/* points to next victim in fifo list */
395 	rpcprog_t uc_prog;	/* saved program number */
396 	rpcvers_t uc_vers;	/* saved version number */
397 	rpcproc_t uc_proc;	/* saved procedure number */
398 };
399 
400 
401 /*
402  * the hashing function
403  */
404 #define	CACHE_LOC(transp, xid)	\
405 	(xid % (SPARSENESS * ((struct cl_cache *) \
406 		su_data(transp)->su_cache)->uc_size))
407 
408 /*
409  * Enable use of the cache. Returns 1 on success, 0 on failure.
410  * Note: there is no disable.
411  */
412 static const char cache_enable_str[] = "svc_enablecache: %s %s";
413 static const char alloc_err[] = "could not allocate cache ";
414 static const char enable_err[] = "cache already enabled";
415 
416 int
svc_dg_enablecache(SVCXPRT * transp,u_int size)417 svc_dg_enablecache(SVCXPRT *transp, u_int size)
418 {
419 	struct svc_dg_data *su;
420 	struct cl_cache *uc;
421 
422 	_DIAGASSERT(transp != NULL);
423 
424 	su = su_data(transp);
425 
426 	mutex_lock(&dupreq_lock);
427 	if (su->su_cache != NULL) {
428 		(void) warnx(cache_enable_str, enable_err, " ");
429 		mutex_unlock(&dupreq_lock);
430 		return (0);
431 	}
432 	uc = ALLOC(struct cl_cache, 1);
433 	if (uc == NULL) {
434 		warnx(cache_enable_str, alloc_err, " ");
435 		mutex_unlock(&dupreq_lock);
436 		return (0);
437 	}
438 	uc->uc_size = size;
439 	uc->uc_nextvictim = 0;
440 	uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
441 	if (uc->uc_entries == NULL) {
442 		warnx(cache_enable_str, alloc_err, "data");
443 		FREE(uc, struct cl_cache, 1);
444 		mutex_unlock(&dupreq_lock);
445 		return (0);
446 	}
447 	MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
448 	uc->uc_fifo = ALLOC(cache_ptr, size);
449 	if (uc->uc_fifo == NULL) {
450 		warnx(cache_enable_str, alloc_err, "fifo");
451 		FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
452 		FREE(uc, struct cl_cache, 1);
453 		mutex_unlock(&dupreq_lock);
454 		return (0);
455 	}
456 	MEMZERO(uc->uc_fifo, cache_ptr, size);
457 	su->su_cache = (char *)(void *)uc;
458 	mutex_unlock(&dupreq_lock);
459 	return (1);
460 }
461 
462 /*
463  * Set an entry in the cache.  It assumes that the uc entry is set from
464  * the earlier call to cache_get() for the same procedure.  This will always
465  * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
466  * by svc_dg_reply().  All this hoopla because the right RPC parameters are
467  * not available at svc_dg_reply time.
468  */
469 
470 static const char cache_set_str[] = "cache_set: %s";
471 static const char cache_set_err1[] = "victim not found";
472 static const char cache_set_err2[] = "victim alloc failed";
473 static const char cache_set_err3[] = "could not allocate new rpc buffer";
474 
475 static void
cache_set(SVCXPRT * xprt,size_t replylen)476 cache_set(SVCXPRT *xprt, size_t replylen)
477 {
478 	cache_ptr victim;
479 	cache_ptr *vicp;
480 	struct svc_dg_data *su;
481 	struct cl_cache *uc;
482 	u_int loc;
483 	char *newbuf;
484 #ifdef RPC_CACHE_DEBUG
485 	struct netconfig *nconf;
486 	char *uaddr;
487 #endif
488 
489 	_DIAGASSERT(xprt != NULL);
490 
491 	su = su_data(xprt);
492 	uc = (struct cl_cache *) su->su_cache;
493 
494 	mutex_lock(&dupreq_lock);
495 	/*
496 	 * Find space for the new entry, either by
497 	 * reusing an old entry, or by mallocing a new one
498 	 */
499 	victim = uc->uc_fifo[uc->uc_nextvictim];
500 	if (victim != NULL) {
501 		loc = CACHE_LOC(xprt, victim->cache_xid);
502 		for (vicp = &uc->uc_entries[loc];
503 			*vicp != NULL && *vicp != victim;
504 			vicp = &(*vicp)->cache_next)
505 			;
506 		if (*vicp == NULL) {
507 			warnx(cache_set_str, cache_set_err1);
508 			mutex_unlock(&dupreq_lock);
509 			return;
510 		}
511 		*vicp = victim->cache_next;	/* remove from cache */
512 		newbuf = victim->cache_reply;
513 	} else {
514 		victim = ALLOC(struct cache_node, 1);
515 		if (victim == NULL) {
516 			warnx(cache_set_str, cache_set_err2);
517 			mutex_unlock(&dupreq_lock);
518 			return;
519 		}
520 		newbuf = mem_alloc(su->su_iosz);
521 		if (newbuf == NULL) {
522 			warnx(cache_set_str, cache_set_err3);
523 			FREE(victim, struct cache_node, 1);
524 			mutex_unlock(&dupreq_lock);
525 			return;
526 		}
527 	}
528 
529 	/*
530 	 * Store it away
531 	 */
532 #ifdef RPC_CACHE_DEBUG
533 	if (nconf = getnetconfigent(xprt->xp_netid)) {
534 		uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
535 		freenetconfigent(nconf);
536 		printf(
537 	"cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
538 			su->su_xid, uc->uc_prog, uc->uc_vers,
539 			uc->uc_proc, uaddr);
540 		free(uaddr);
541 	}
542 #endif
543 	victim->cache_replylen = replylen;
544 	victim->cache_reply = rpc_buffer(xprt);
545 	rpc_buffer(xprt) = newbuf;
546 	_DIAGASSERT(__type_fit(u_int, su->su_iosz));
547 	xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), (u_int)su->su_iosz,
548 	    XDR_ENCODE);
549 	victim->cache_xid = su->su_xid;
550 	victim->cache_proc = uc->uc_proc;
551 	victim->cache_vers = uc->uc_vers;
552 	victim->cache_prog = uc->uc_prog;
553 	victim->cache_addr = xprt->xp_rtaddr;
554 	victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
555 	(void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
556 	    (size_t)xprt->xp_rtaddr.len);
557 	loc = CACHE_LOC(xprt, victim->cache_xid);
558 	victim->cache_next = uc->uc_entries[loc];
559 	uc->uc_entries[loc] = victim;
560 	uc->uc_fifo[uc->uc_nextvictim++] = victim;
561 	uc->uc_nextvictim %= uc->uc_size;
562 	mutex_unlock(&dupreq_lock);
563 }
564 
565 /*
566  * Try to get an entry from the cache
567  * return 1 if found, 0 if not found and set the stage for cache_set()
568  */
569 static int
cache_get(SVCXPRT * xprt,struct rpc_msg * msg,char ** replyp,size_t * replylenp)570 cache_get(SVCXPRT *xprt, struct rpc_msg *msg, char **replyp, size_t *replylenp)
571 {
572 	u_int loc;
573 	cache_ptr ent;
574 	struct svc_dg_data *su;
575 	struct cl_cache *uc;
576 #ifdef RPC_CACHE_DEBUG
577 	struct netconfig *nconf;
578 	char *uaddr;
579 #endif
580 
581 	_DIAGASSERT(xprt != NULL);
582 	_DIAGASSERT(msg != NULL);
583 	_DIAGASSERT(replyp != NULL);
584 	_DIAGASSERT(replylenp != NULL);
585 
586 	su = su_data(xprt);
587 	uc = (struct cl_cache *) su->su_cache;
588 
589 	mutex_lock(&dupreq_lock);
590 	loc = CACHE_LOC(xprt, su->su_xid);
591 	for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
592 		if (ent->cache_xid == su->su_xid &&
593 			ent->cache_proc == msg->rm_call.cb_proc &&
594 			ent->cache_vers == msg->rm_call.cb_vers &&
595 			ent->cache_prog == msg->rm_call.cb_prog &&
596 			ent->cache_addr.len == xprt->xp_rtaddr.len &&
597 			(memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
598 				xprt->xp_rtaddr.len) == 0)) {
599 #ifdef RPC_CACHE_DEBUG
600 			if (nconf = getnetconfigent(xprt->xp_netid)) {
601 				uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
602 				freenetconfigent(nconf);
603 				printf(
604 	"cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
605 					su->su_xid, msg->rm_call.cb_prog,
606 					msg->rm_call.cb_vers,
607 					msg->rm_call.cb_proc, uaddr);
608 				free(uaddr);
609 			}
610 #endif
611 			*replyp = ent->cache_reply;
612 			*replylenp = ent->cache_replylen;
613 			mutex_unlock(&dupreq_lock);
614 			return (1);
615 		}
616 	}
617 	/*
618 	 * Failed to find entry
619 	 * Remember a few things so we can do a set later
620 	 */
621 	uc->uc_proc = msg->rm_call.cb_proc;
622 	uc->uc_vers = msg->rm_call.cb_vers;
623 	uc->uc_prog = msg->rm_call.cb_prog;
624 	mutex_unlock(&dupreq_lock);
625 	return (0);
626 }
627