xref: /netbsd-src/sys/nfs/nfs_srvcache.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: nfs_srvcache.c,v 1.27 2003/05/21 14:13:34 yamt Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Rick Macklem at The University of Guelph.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)nfs_srvcache.c	8.3 (Berkeley) 3/30/95
39  */
40 
41 /*
42  * Reference: Chet Juszczak, "Improving the Performance and Correctness
43  *		of an NFS Server", in Proc. Winter 1989 USENIX Conference,
44  *		pages 53-63. San Diego, February 1989.
45  */
46 
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: nfs_srvcache.c,v 1.27 2003/05/21 14:13:34 yamt Exp $");
49 
50 #include "opt_iso.h"
51 
52 #include <sys/param.h>
53 #include <sys/vnode.h>
54 #include <sys/mount.h>
55 #include <sys/kernel.h>
56 #include <sys/systm.h>
57 #include <sys/lock.h>
58 #include <sys/proc.h>
59 #include <sys/pool.h>
60 #include <sys/mbuf.h>
61 #include <sys/malloc.h>
62 #include <sys/socket.h>
63 #include <sys/socketvar.h>
64 
65 #include <netinet/in.h>
66 #ifdef ISO
67 #include <netiso/iso.h>
68 #endif
69 #include <nfs/nfsm_subs.h>
70 #include <nfs/rpcv2.h>
71 #include <nfs/nfsproto.h>
72 #include <nfs/nfs.h>
73 #include <nfs/nfsrvcache.h>
74 #include <nfs/nqnfs.h>
75 #include <nfs/nfs_var.h>
76 
77 extern struct nfsstats nfsstats;
78 extern const int nfsv2_procid[NFS_NPROCS];
79 long numnfsrvcache, desirednfsrvcache = NFSRVCACHESIZ;
80 struct pool nfs_reqcache_pool;
81 
82 #define	NFSRCHASH(xid) \
83 	(&nfsrvhashtbl[((xid) + ((xid) >> 24)) & nfsrvhash])
84 LIST_HEAD(nfsrvhash, nfsrvcache) *nfsrvhashtbl;
85 TAILQ_HEAD(nfsrvlru, nfsrvcache) nfsrvlruhead;
86 struct simplelock nfsrv_reqcache_lock = SIMPLELOCK_INITIALIZER;
87 u_long nfsrvhash;
88 
89 #define	NETFAMILY(rp) \
90 		(((rp)->rc_flag & RC_INETADDR) ? AF_INET : AF_ISO)
91 
92 static struct nfsrvcache *nfsrv_lookupcache(struct nfsrv_descript *nd);
93 static void nfsrv_unlockcache(struct nfsrvcache *rp);
94 
95 /*
96  * Static array that defines which nfs rpc's are nonidempotent
97  */
98 const int nonidempotent[NFS_NPROCS] = {
99 	FALSE,
100 	FALSE,
101 	TRUE,
102 	FALSE,
103 	FALSE,
104 	FALSE,
105 	FALSE,
106 	TRUE,
107 	TRUE,
108 	TRUE,
109 	TRUE,
110 	TRUE,
111 	TRUE,
112 	TRUE,
113 	TRUE,
114 	TRUE,
115 	FALSE,
116 	FALSE,
117 	FALSE,
118 	FALSE,
119 	FALSE,
120 	FALSE,
121 	FALSE,
122 	FALSE,
123 	FALSE,
124 	FALSE,
125 };
126 
127 /* True iff the rpc reply is an nfs status ONLY! */
128 static const int nfsv2_repstat[NFS_NPROCS] = {
129 	FALSE,
130 	FALSE,
131 	FALSE,
132 	FALSE,
133 	FALSE,
134 	FALSE,
135 	FALSE,
136 	FALSE,
137 	FALSE,
138 	FALSE,
139 	TRUE,
140 	TRUE,
141 	TRUE,
142 	TRUE,
143 	FALSE,
144 	TRUE,
145 	FALSE,
146 	FALSE,
147 };
148 
149 /*
150  * Initialize the server request cache list
151  */
152 void
153 nfsrv_initcache()
154 {
155 
156 	nfsrvhashtbl = hashinit(desirednfsrvcache, HASH_LIST, M_NFSD,
157 	    M_WAITOK, &nfsrvhash);
158 	TAILQ_INIT(&nfsrvlruhead);
159 	pool_init(&nfs_reqcache_pool, sizeof(struct nfsrvcache), 0, 0, 0,
160 	    "nfsreqcachepl", &pool_allocator_nointr);
161 }
162 
163 /*
164  * Lookup a cache and lock it
165  */
166 static struct nfsrvcache *
167 nfsrv_lookupcache(nd)
168 	struct nfsrv_descript *nd;
169 {
170 	struct nfsrvcache *rp;
171 
172 	LOCK_ASSERT(simple_lock_held(&nfsrv_reqcache_lock));
173 
174 loop:
175 	LIST_FOREACH(rp, NFSRCHASH(nd->nd_retxid), rc_hash) {
176 		if (nd->nd_retxid == rp->rc_xid &&
177 		    nd->nd_procnum == rp->rc_proc &&
178 		    netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
179 			if ((rp->rc_flag & RC_LOCKED) != 0) {
180 				rp->rc_flag |= RC_WANTED;
181 				(void) ltsleep(rp, PZERO - 1, "nfsrc", 0,
182 				    &nfsrv_reqcache_lock);
183 				goto loop;
184 			}
185 			rp->rc_flag |= RC_LOCKED;
186 			break;
187 		}
188 	}
189 
190 	return rp;
191 }
192 
193 /*
194  * Unlock a cache
195  */
196 static void
197 nfsrv_unlockcache(rp)
198 	struct nfsrvcache *rp;
199 {
200 
201 	LOCK_ASSERT(simple_lock_held(&nfsrv_reqcache_lock));
202 
203 	rp->rc_flag &= ~RC_LOCKED;
204 	if (rp->rc_flag & RC_WANTED) {
205 		rp->rc_flag &= ~RC_WANTED;
206 		wakeup(rp);
207 	}
208 }
209 
210 /*
211  * Look for the request in the cache
212  * If found then
213  *    return action and optionally reply
214  * else
215  *    insert it in the cache
216  *
217  * The rules are as follows:
218  * - if in progress, return DROP request
219  * - if completed within DELAY of the current time, return DROP it
220  * - if completed a longer time ago return REPLY if the reply was cached or
221  *   return DOIT
222  * Update/add new request at end of lru list
223  */
224 int
225 nfsrv_getcache(nd, slp, repp)
226 	struct nfsrv_descript *nd;
227 	struct nfssvc_sock *slp;
228 	struct mbuf **repp;
229 {
230 	struct nfsrvcache *rp;
231 	struct mbuf *mb;
232 	struct sockaddr_in *saddr;
233 	caddr_t bpos;
234 	int ret;
235 
236 	/*
237 	 * Don't cache recent requests for reliable transport protocols.
238 	 * (Maybe we should for the case of a reconnect, but..)
239 	 */
240 	if (!nd->nd_nam2)
241 		return RC_DOIT;
242 	simple_lock(&nfsrv_reqcache_lock);
243 	rp = nfsrv_lookupcache(nd);
244 	if (rp) {
245 		simple_unlock(&nfsrv_reqcache_lock);
246 found:
247 		/* If not at end of LRU chain, move it there */
248 		if (TAILQ_NEXT(rp, rc_lru)) { /* racy but ok */
249 			simple_lock(&nfsrv_reqcache_lock);
250 			TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
251 			TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
252 			simple_unlock(&nfsrv_reqcache_lock);
253 		}
254 		if (rp->rc_state == RC_UNUSED)
255 			panic("nfsrv cache");
256 		if (rp->rc_state == RC_INPROG) {
257 			nfsstats.srvcache_inproghits++;
258 			ret = RC_DROPIT;
259 		} else if (rp->rc_flag & RC_REPSTATUS) {
260 			nfsstats.srvcache_nonidemdonehits++;
261 			nfs_rephead(0, nd, slp, rp->rc_status,
262 			   0, (u_quad_t *)0, repp, &mb, &bpos);
263 			ret = RC_REPLY;
264 		} else if (rp->rc_flag & RC_REPMBUF) {
265 			nfsstats.srvcache_nonidemdonehits++;
266 			*repp = m_copym(rp->rc_reply, 0, M_COPYALL,
267 					M_WAIT);
268 			ret = RC_REPLY;
269 		} else {
270 			nfsstats.srvcache_idemdonehits++;
271 			rp->rc_state = RC_INPROG;
272 			ret = RC_DOIT;
273 		}
274 		simple_lock(&nfsrv_reqcache_lock);
275 		nfsrv_unlockcache(rp);
276 		simple_unlock(&nfsrv_reqcache_lock);
277 		return ret;
278 	}
279 	nfsstats.srvcache_misses++;
280 	if (numnfsrvcache < desirednfsrvcache) {
281 		numnfsrvcache++;
282 		simple_unlock(&nfsrv_reqcache_lock);
283 		rp = pool_get(&nfs_reqcache_pool, PR_WAITOK);
284 		memset(rp, 0, sizeof *rp);
285 		rp->rc_flag = RC_LOCKED;
286 	} else {
287 		rp = TAILQ_FIRST(&nfsrvlruhead);
288 		while ((rp->rc_flag & RC_LOCKED) != 0) {
289 			rp->rc_flag |= RC_WANTED;
290 			(void) ltsleep(rp, PZERO-1, "nfsrc", 0,
291 			    &nfsrv_reqcache_lock);
292 			rp = TAILQ_FIRST(&nfsrvlruhead);
293 		}
294 		rp->rc_flag |= RC_LOCKED;
295 		LIST_REMOVE(rp, rc_hash);
296 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
297 		simple_unlock(&nfsrv_reqcache_lock);
298 		if (rp->rc_flag & RC_REPMBUF)
299 			m_freem(rp->rc_reply);
300 		if (rp->rc_flag & RC_NAM)
301 			(void) m_free(rp->rc_nam);
302 		rp->rc_flag &= (RC_LOCKED | RC_WANTED);
303 	}
304 	rp->rc_state = RC_INPROG;
305 	rp->rc_xid = nd->nd_retxid;
306 	saddr = mtod(nd->nd_nam, struct sockaddr_in *);
307 	switch (saddr->sin_family) {
308 	case AF_INET:
309 		rp->rc_flag |= RC_INETADDR;
310 		rp->rc_inetaddr = saddr->sin_addr.s_addr;
311 		break;
312 	case AF_ISO:
313 	default:
314 		rp->rc_flag |= RC_NAM;
315 		rp->rc_nam = m_copym(nd->nd_nam, 0, M_COPYALL, M_WAIT);
316 		break;
317 	};
318 	rp->rc_proc = nd->nd_procnum;
319 	simple_lock(&nfsrv_reqcache_lock);
320 	if (nfsrv_lookupcache(nd)) {
321 		/*
322 		 * other thread made duplicate cache entry.
323 		 */
324 		simple_unlock(&nfsrv_reqcache_lock);
325 		pool_put(&nfs_reqcache_pool, rp);
326 		goto found;
327 	}
328 	TAILQ_INSERT_TAIL(&nfsrvlruhead, rp, rc_lru);
329 	LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash);
330 	nfsrv_unlockcache(rp);
331 	simple_unlock(&nfsrv_reqcache_lock);
332 	return RC_DOIT;
333 }
334 
335 /*
336  * Update a request cache entry after the rpc has been done
337  */
338 void
339 nfsrv_updatecache(nd, repvalid, repmbuf)
340 	struct nfsrv_descript *nd;
341 	int repvalid;
342 	struct mbuf *repmbuf;
343 {
344 	struct nfsrvcache *rp;
345 
346 	if (!nd->nd_nam2)
347 		return;
348 	simple_lock(&nfsrv_reqcache_lock);
349 	rp = nfsrv_lookupcache(nd);
350 	simple_unlock(&nfsrv_reqcache_lock);
351 	if (rp) {
352 		rp->rc_state = RC_DONE;
353 		/*
354 		 * If we have a valid reply update status and save
355 		 * the reply for non-idempotent rpc's.
356 		 */
357 		if (repvalid && nonidempotent[nd->nd_procnum]) {
358 			if ((nd->nd_flag & ND_NFSV3) == 0 &&
359 			  nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) {
360 				rp->rc_status = nd->nd_repstat;
361 				rp->rc_flag |= RC_REPSTATUS;
362 			} else {
363 				rp->rc_reply = m_copym(repmbuf,
364 					0, M_COPYALL, M_WAIT);
365 				rp->rc_flag |= RC_REPMBUF;
366 			}
367 		}
368 		simple_lock(&nfsrv_reqcache_lock);
369 		nfsrv_unlockcache(rp);
370 		simple_unlock(&nfsrv_reqcache_lock);
371 	}
372 }
373 
374 /*
375  * Clean out the cache. Called when the last nfsd terminates.
376  */
377 void
378 nfsrv_cleancache()
379 {
380 	struct nfsrvcache *rp, *nextrp;
381 
382 	simple_lock(&nfsrv_reqcache_lock);
383 	for (rp = TAILQ_FIRST(&nfsrvlruhead); rp != 0; rp = nextrp) {
384 		nextrp = TAILQ_NEXT(rp, rc_lru);
385 		LIST_REMOVE(rp, rc_hash);
386 		TAILQ_REMOVE(&nfsrvlruhead, rp, rc_lru);
387 		pool_put(&nfs_reqcache_pool, rp);
388 	}
389 	numnfsrvcache = 0;
390 	simple_unlock(&nfsrv_reqcache_lock);
391 }
392