xref: /csrg-svn/lib/libc/db/btree/lrucache.c (revision 45875)
1*45875Sbostic /*-
2*45875Sbostic  * Copyright (c) 1990 The Regents of the University of California.
3*45875Sbostic  * All rights reserved.
4*45875Sbostic  *
5*45875Sbostic  * This code is derived from software contributed to Berkeley by
6*45875Sbostic  * Mike Olson.
7*45875Sbostic  *
8*45875Sbostic  * %sccs.include.redist.c%
9*45875Sbostic  */
10*45875Sbostic 
11*45875Sbostic #if defined(LIBC_SCCS) && !defined(lint)
12*45875Sbostic static char sccsid[] = "@(#)lrucache.c	5.1 (Berkeley) 01/07/91";
13*45875Sbostic #endif /* LIBC_SCCS and not lint */
14*45875Sbostic 
15*45875Sbostic /*
16*45875Sbostic  *  lrucache.c -- LRU cache for disk-based btree pages.
17*45875Sbostic  *
18*45875Sbostic  *	This file implements an LRU cache in user space for disk-based
19*45875Sbostic  *	btrees.
20*45875Sbostic  */
21*45875Sbostic #include <sys/param.h>
22*45875Sbostic #include <sys/file.h>
23*45875Sbostic #include <sys/signal.h>
24*45875Sbostic 
25*45875Sbostic /*
26*45875Sbostic  *  LRU list entries.  The head of the list is the most-recently requested
27*45875Sbostic  *  block; the tail is the least-recently requested one.
28*45875Sbostic  */
29*45875Sbostic 
30*45875Sbostic typedef struct LRU_ENT {
31*45875Sbostic 	char	*l_buffer;		/* buffer we return to user */
32*45875Sbostic 	int	l_pgno;			/* logical page number */
33*45875Sbostic 	int	l_flags;		/* FREE and DIRTY bits */
34*45875Sbostic 	struct LRU_ENT	*l_prev;	/* predecessor in LRU list */
35*45875Sbostic 	struct LRU_ENT	*l_next;	/* successor in LRU list */
36*45875Sbostic } LRU_ENT;
37*45875Sbostic 
38*45875Sbostic /*
39*45875Sbostic  *  Cache entries.  We use a hash table to avoid a linear walk of the LRU
40*45875Sbostic  *  list when we need to look up blocks by number.  The hash table is
41*45875Sbostic  *  chained.
42*45875Sbostic  */
43*45875Sbostic 
44*45875Sbostic typedef struct CACHE_ENT {
45*45875Sbostic 	int			c_pgno;
46*45875Sbostic 	LRU_ENT			*c_lruent;
47*45875Sbostic 	struct CACHE_ENT	*c_chain;
48*45875Sbostic } CACHE_ENT;
49*45875Sbostic 
50*45875Sbostic /*
51*45875Sbostic  *  The LRU cache structure.  The cache size (lru_csize) is the largest size
52*45875Sbostic  *  the user wants us to grow to; current size (lru_cursz) is always less than
53*45875Sbostic  *  or equal to lru_csize.  Note that we will grow the cache (lru_csize) if
54*45875Sbostic  *  it's the only way that we can satisfy a user's block request.
55*45875Sbostic  */
56*45875Sbostic 
57*45875Sbostic typedef struct LRUCACHE {
58*45875Sbostic 	int		lru_fd;
59*45875Sbostic 	int		lru_csize;
60*45875Sbostic 	int		lru_psize;
61*45875Sbostic 	int		lru_cursz;
62*45875Sbostic 	char		*lru_opaque;		/* passed to inproc, outproc */
63*45875Sbostic 	int		(*lru_inproc)();
64*45875Sbostic 	int		(*lru_outproc)();
65*45875Sbostic 	LRU_ENT		*lru_head;
66*45875Sbostic 	LRU_ENT		*lru_tail;
67*45875Sbostic 	CACHE_ENT	**lru_cache;
68*45875Sbostic } LRUCACHE;
69*45875Sbostic 
70*45875Sbostic /* this is the opaque type we return for LRU caches */
71*45875Sbostic typedef	char	*LRU;
72*45875Sbostic 
73*45875Sbostic /* bits for l_flags in LRU_ENT structure */
74*45875Sbostic #define	F_DIRTY		(1 << 0)
75*45875Sbostic #define F_FREE		(1 << 1)
76*45875Sbostic 
77*45875Sbostic #define HASH(l, pgno)	(pgno % l->lru_csize)
78*45875Sbostic 
79*45875Sbostic /* all of these are defined in this file */
80*45875Sbostic static CACHE_ENT	*lruhashget();
81*45875Sbostic static CACHE_ENT	*lruhashput();
82*45875Sbostic static int 		lruhashdel();
83*45875Sbostic static void		lruhead();
84*45875Sbostic static int 		lrugrow();
85*45875Sbostic extern LRU		lruinit();
86*45875Sbostic extern int		lruwrite();
87*45875Sbostic extern int		lrusync();
88*45875Sbostic extern char		*lruget();
89*45875Sbostic extern char		*lrugetnew();
90*45875Sbostic static char		*lrugetpg();
91*45875Sbostic extern int		lrurelease();
92*45875Sbostic extern void		lrufree();
93*45875Sbostic 
94*45875Sbostic /*
95*45875Sbostic  *  LRUINIT -- Initialize a new LRU cache.
96*45875Sbostic  *
97*45875Sbostic  *	There's a separate LRU cache for every open file descriptor on which
98*45875Sbostic  *	the user wants caching.  The desired cache size and logical page
99*45875Sbostic  *	size are passed in.  We try to avoid growing the cache beyond the
100*45875Sbostic  *	limit specified by the user, but if we cannot satisfy a block request
101*45875Sbostic  *	without growing the cache, we do so.
102*45875Sbostic  *
103*45875Sbostic  *	Note that the page size passed in is the logical page size for
104*45875Sbostic  *	use with this file descriptor, and doesn't necessarily have anything
105*45875Sbostic  *	to do with the underlying hardware page size.
106*45875Sbostic  *
107*45875Sbostic  *	Parameters:
108*45875Sbostic  *		fd -- file descriptor for this cache
109*45875Sbostic  *		cachesz -- number of buffers in cache (suggested)
110*45875Sbostic  *		pagesz -- logical page size inside this file
111*45875Sbostic  *		inproc -- routine to call when a buffer is read
112*45875Sbostic  *		outproc -- routine to call when a buffer is written
113*45875Sbostic  *
114*45875Sbostic  *	Returns:
115*45875Sbostic  *		Opaque pointer to the LRU cache on success, NULL on failure.
116*45875Sbostic  *
117*45875Sbostic  *	Side Effects:
118*45875Sbostic  *		Allocates memory for the hash table and LRU cache.  Buffers
119*45875Sbostic  *		are allocated on demand, later.
120*45875Sbostic  */
121*45875Sbostic LRU
122*45875Sbostic lruinit(fd, cachesz, pagesz, opaque, inproc, outproc)
123*45875Sbostic 	int fd;
124*45875Sbostic 	int cachesz;
125*45875Sbostic 	int pagesz;
126*45875Sbostic 	char *opaque;
127*45875Sbostic 	int (*inproc)();
128*45875Sbostic 	int (*outproc)();
129*45875Sbostic {
130*45875Sbostic 	register LRUCACHE *l;
131*45875Sbostic 	int nbytes;
132*45875Sbostic 
133*45875Sbostic 	/* allocate the LRU cache struct */
134*45875Sbostic 	if ((l = (LRUCACHE *) malloc((unsigned) sizeof(LRUCACHE)))
135*45875Sbostic 	    == (LRUCACHE *) NULL)
136*45875Sbostic 		return ((LRU) NULL);
137*45875Sbostic 
138*45875Sbostic 	/* allocate the hash table */
139*45875Sbostic 	nbytes = cachesz * sizeof(CACHE_ENT *);
140*45875Sbostic 	if ((l->lru_cache = (CACHE_ENT **) malloc((unsigned) nbytes))
141*45875Sbostic 	    == (CACHE_ENT **) NULL) {
142*45875Sbostic 		(void) free(l);
143*45875Sbostic 		return ((LRU) NULL);
144*45875Sbostic 	}
145*45875Sbostic 
146*45875Sbostic 	/* init memory */
147*45875Sbostic 	bzero(l->lru_cache, nbytes);
148*45875Sbostic 	l->lru_fd = fd;
149*45875Sbostic 	l->lru_psize = pagesz;
150*45875Sbostic 	l->lru_csize = cachesz;
151*45875Sbostic 	l->lru_cursz = 0;
152*45875Sbostic 	l->lru_opaque = opaque;
153*45875Sbostic 	l->lru_head = l->lru_tail = (LRU_ENT *) NULL;
154*45875Sbostic 	l->lru_inproc = inproc;
155*45875Sbostic 	l->lru_outproc = outproc;
156*45875Sbostic 
157*45875Sbostic 	return ((LRU) l);
158*45875Sbostic }
159*45875Sbostic 
160*45875Sbostic /*
161*45875Sbostic  *  LRUGET -- Get a buffer from an LRU cache.
162*45875Sbostic  *
163*45875Sbostic  *	If the buffer is not in the cache at present, this routine will
164*45875Sbostic  *	instantiate it from the file.  This REQUIRES that the desired
165*45875Sbostic  *	block actually be on disk; we don't do non-blocking reads, so
166*45875Sbostic  *	if it's not actually out there, this routine won't return for
167*45875Sbostic  *	a very long time.  In order to instantiate a new buffer, use
168*45875Sbostic  *	lrugetnew().
169*45875Sbostic  *
170*45875Sbostic  *	Parameters:
171*45875Sbostic  *		lru -- the LRU cache to use.
172*45875Sbostic  *		pgno -- the logical block number to get.
173*45875Sbostic  *		nread -- pointer to an int, in which the number of bytes
174*45875Sbostic  *			 read is returned.
175*45875Sbostic  *
176*45875Sbostic  *	Returns:
177*45875Sbostic  *		(char *) pointer to the buffer for the desired block.  The
178*45875Sbostic  *		number of bytes actually read is returned in *nread.
179*45875Sbostic  *
180*45875Sbostic  *	Warnings:
181*45875Sbostic  *		The requested buffer is locked down until the user does
182*45875Sbostic  *		an explicit lrurelease() on it.
183*45875Sbostic  */
184*45875Sbostic 
185*45875Sbostic char *
186*45875Sbostic lruget(lru, pgno, nread)
187*45875Sbostic 	LRU lru;
188*45875Sbostic 	int pgno;
189*45875Sbostic 	int *nread;
190*45875Sbostic {
191*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
192*45875Sbostic 	CACHE_ENT *ce;
193*45875Sbostic 	int hashind;
194*45875Sbostic 	LRU_ENT *lruent;
195*45875Sbostic 	char *buffer;
196*45875Sbostic 	long pos;
197*45875Sbostic 
198*45875Sbostic 	/* is it already in the cache? */
199*45875Sbostic 	if ((ce = lruhashget(l, pgno)) != (CACHE_ENT *) NULL) {
200*45875Sbostic 
201*45875Sbostic 		/* yes, move it to the head and return it */
202*45875Sbostic 		lruent = ce->c_lruent;
203*45875Sbostic 		lruent->l_flags &= ~F_FREE;
204*45875Sbostic 		lruhead(l, ce->c_lruent);
205*45875Sbostic 		*nread = l->lru_psize;
206*45875Sbostic 		return (ce->c_lruent->l_buffer);
207*45875Sbostic 	}
208*45875Sbostic 
209*45875Sbostic 	/* not there, get a free page */
210*45875Sbostic 	if ((buffer = lrugetpg(l, pgno, nread, lruget)) == (char *) NULL)
211*45875Sbostic 		return ((char *) NULL);
212*45875Sbostic 
213*45875Sbostic 	/* okay, got a buffer -- fill it */
214*45875Sbostic 	pos = (long) (l->lru_psize * pgno);
215*45875Sbostic 	if (lseek(l->lru_fd, pos, L_SET) != pos)
216*45875Sbostic 		return ((char *) NULL);
217*45875Sbostic 
218*45875Sbostic 	*nread = read(l->lru_fd, buffer, l->lru_psize);
219*45875Sbostic 
220*45875Sbostic 	if (l->lru_inproc)
221*45875Sbostic 		(*(l->lru_inproc))(buffer, l->lru_opaque);
222*45875Sbostic 
223*45875Sbostic 	return (buffer);
224*45875Sbostic }
225*45875Sbostic 
226*45875Sbostic /*
227*45875Sbostic  *  LRUGETNEW -- Get a page for a new block in an LRU cache.
228*45875Sbostic  *
229*45875Sbostic  *	This routine allows users to instantiate pages for a file if they
230*45875Sbostic  *	don't exist on disk yet.  It's used to make a file bigger.
231*45875Sbostic  *
232*45875Sbostic  *	Parameters:
233*45875Sbostic  *		lru -- the LRU cache to use
234*45875Sbostic  *		pgno -- the (new) logical page number to instantiate
235*45875Sbostic  *		nread -- ptr to int to get number of bytes read (this is
236*45875Sbostic  *			 guaranteed to be zero, since the page isn't on disk)
237*45875Sbostic  *
238*45875Sbostic  *	Returns:
239*45875Sbostic  *		Pointer to a buffer for the associated page, or NULL on
240*45875Sbostic  *		failure.
241*45875Sbostic  *
242*45875Sbostic  *	Warnings:
243*45875Sbostic  *		The associated buffer is locked down until the user
244*45875Sbostic  *		explicitly does an lrurelease() on it.
245*45875Sbostic  */
246*45875Sbostic 
247*45875Sbostic char *
248*45875Sbostic lrugetnew(lru, pgno, nread)
249*45875Sbostic 	LRU lru;
250*45875Sbostic 	int pgno;
251*45875Sbostic 	int *nread;
252*45875Sbostic {
253*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
254*45875Sbostic 
255*45875Sbostic 	*nread = 0;
256*45875Sbostic 	return (lrugetpg(l, pgno, nread, lrugetnew));
257*45875Sbostic }
258*45875Sbostic 
259*45875Sbostic /*
260*45875Sbostic  *  LRUGETPG -- Get a free page from the LRU cache.
261*45875Sbostic  *
262*45875Sbostic  *	This routine grows the cache if necessary, finds an unused page if
263*45875Sbostic  *	it can, and handles flushing dirty buffers to disk.
264*45875Sbostic  *
265*45875Sbostic  *	One of the parameters to this routine (f) is the routine that called
266*45875Sbostic  *	us.  If we have to grow the cache, we call this routine recursively
267*45875Sbostic  *	in order to fill the buffer.  The reason for this is that we have
268*45875Sbostic  *	two interfaces that call lrugetpg().  Lruget() fills a page from disk,
269*45875Sbostic  *	and lrugetnew() just allocates a new (empty) page.
270*45875Sbostic  *
271*45875Sbostic  *	Parameters:
272*45875Sbostic  *		l -- LRU cache to use.
273*45875Sbostic  *		pgno -- page number for which we want a buffer
274*45875Sbostic  *		nread -- pointer to an int to get number of bytes read
275*45875Sbostic  *		f -- who called us
276*45875Sbostic  *
277*45875Sbostic  *	Returns:
278*45875Sbostic  *		(char *) pointer to buffer to use, or NULL on failure.
279*45875Sbostic  *
280*45875Sbostic  *	Warnings:
281*45875Sbostic  *		The buffer returned is locked down until the user does an
282*45875Sbostic  *		explicit lrurelease() on it.
283*45875Sbostic  */
284*45875Sbostic 
285*45875Sbostic static char *
286*45875Sbostic lrugetpg(l, pgno, nread, f)
287*45875Sbostic 	LRUCACHE *l;
288*45875Sbostic 	int pgno;
289*45875Sbostic 	int *nread;
290*45875Sbostic 	char *(*f)();
291*45875Sbostic {
292*45875Sbostic 	CACHE_ENT *ce;
293*45875Sbostic 	LRU_ENT *lruent;
294*45875Sbostic 	char *buffer;
295*45875Sbostic 
296*45875Sbostic 	/* if we're allowed to grow the cache, do so */
297*45875Sbostic 	if (l->lru_cursz < l->lru_csize) {
298*45875Sbostic 
299*45875Sbostic 		/* get a buffer */
300*45875Sbostic 		if ((buffer = (char *) malloc((unsigned) l->lru_psize))
301*45875Sbostic 		    == (char *) NULL)
302*45875Sbostic 			return ((char *) NULL);
303*45875Sbostic 
304*45875Sbostic 		/* get and LRU list entry */
305*45875Sbostic 		if ((lruent = (LRU_ENT *) malloc((unsigned) sizeof(LRU_ENT)))
306*45875Sbostic 		    == (LRU_ENT *) NULL)
307*45875Sbostic 			return ((char *) NULL);
308*45875Sbostic 		lruent->l_buffer = buffer;
309*45875Sbostic 		lruent->l_pgno = pgno;
310*45875Sbostic 		lruent->l_flags = NULL;
311*45875Sbostic 
312*45875Sbostic 		/* manage spaghetti */
313*45875Sbostic 		lruent->l_prev = (LRU_ENT *) NULL;
314*45875Sbostic 		lruent->l_next = l->lru_head;
315*45875Sbostic 		if (l->lru_head != (LRU_ENT *) NULL)
316*45875Sbostic 			l->lru_head->l_prev = lruent;
317*45875Sbostic 		l->lru_head = lruent;
318*45875Sbostic 		if (l->lru_tail == (LRU_ENT *) NULL)
319*45875Sbostic 			l->lru_tail = lruent;
320*45875Sbostic 
321*45875Sbostic 		/* add it to the hash table */
322*45875Sbostic 		ce = lruhashput(l, pgno, lruent);
323*45875Sbostic 		l->lru_cursz++;
324*45875Sbostic 	} else {
325*45875Sbostic 		lruent = l->lru_tail;
326*45875Sbostic 
327*45875Sbostic 		/* find the oldest unused buffer */
328*45875Sbostic 		while (lruent != (LRU_ENT *) NULL
329*45875Sbostic 		       && !(lruent->l_flags & F_FREE))
330*45875Sbostic 			lruent = lruent->l_prev;
331*45875Sbostic 
332*45875Sbostic 		/* if no free buffer was found, we have to grow the cache */
333*45875Sbostic 		if (lruent == (LRU_ENT *) NULL) {
334*45875Sbostic 			if (lrugrow(l) < 0)
335*45875Sbostic 				return ((char *) NULL);
336*45875Sbostic 			return ((*f)((LRU) l, pgno, nread));
337*45875Sbostic 		}
338*45875Sbostic 
339*45875Sbostic 		/* okay, found a free buffer -- update hash table and list */
340*45875Sbostic 		ce = lruhashget(l, lruent->l_pgno);
341*45875Sbostic 		if (lruhashdel(l, lruent->l_pgno) < 0)
342*45875Sbostic 			return ((char *) NULL);
343*45875Sbostic 
344*45875Sbostic 		/* flush the old page to disk, if necessary */
345*45875Sbostic 		if (lruent->l_flags & F_DIRTY)
346*45875Sbostic 			if (lruflush(l, lruent) < 0)
347*45875Sbostic 				return ((char *) NULL);
348*45875Sbostic 
349*45875Sbostic 		/* update stats, hash table, and list */
350*45875Sbostic 		lruent->l_pgno = pgno;
351*45875Sbostic 		lruhead(l, lruent);
352*45875Sbostic 		ce = lruhashput(l, pgno, lruent);
353*45875Sbostic 		buffer = lruent->l_buffer;
354*45875Sbostic 	}
355*45875Sbostic 
356*45875Sbostic 	/* lock this page down */
357*45875Sbostic 	lruent->l_flags &= ~F_FREE;
358*45875Sbostic 
359*45875Sbostic 	return (buffer);
360*45875Sbostic }
361*45875Sbostic 
362*45875Sbostic /*
363*45875Sbostic  *  LRUFLUSH -- flush an LRU page to disk.
364*45875Sbostic  *
365*45875Sbostic  *	This routine forces a page to disk.  Users should use lruwrite(),
366*45875Sbostic  *	which simply marks a page dirty and does late writing.
367*45875Sbostic  *
368*45875Sbostic  *	Parameters:
369*45875Sbostic  *		l -- LRU cache
370*45875Sbostic  *		lruent -- the LRU cache entry whose page we should flush
371*45875Sbostic  *
372*45875Sbostic  *	Returns:
373*45875Sbostic  *		Zero on success, -1 on failure.
374*45875Sbostic  */
375*45875Sbostic 
376*45875Sbostic lruflush(l, lruent)
377*45875Sbostic 	LRUCACHE *l;
378*45875Sbostic 	LRU_ENT *lruent;
379*45875Sbostic {
380*45875Sbostic 	long pos;
381*45875Sbostic 	int nbytes;
382*45875Sbostic 
383*45875Sbostic 	if (l->lru_outproc)
384*45875Sbostic 		(*(l->lru_outproc))(lruent->l_buffer, l->lru_opaque);
385*45875Sbostic 
386*45875Sbostic 	pos = (long) (l->lru_psize * lruent->l_pgno);
387*45875Sbostic 	if (lseek(l->lru_fd, pos, L_SET) != pos)
388*45875Sbostic 		return (-1);
389*45875Sbostic 	if (write(l->lru_fd, lruent->l_buffer, l->lru_psize) != l->lru_psize)
390*45875Sbostic 		return (-1);
391*45875Sbostic 
392*45875Sbostic 	if (l->lru_inproc)
393*45875Sbostic 		(*(l->lru_inproc))(lruent->l_buffer, l->lru_opaque);
394*45875Sbostic 
395*45875Sbostic 	lruent->l_flags &= ~F_DIRTY;
396*45875Sbostic 	return (0);
397*45875Sbostic }
398*45875Sbostic 
399*45875Sbostic /*
400*45875Sbostic  *  LRUHASHGET -- Look up a block in the LRU cache by page number.
401*45875Sbostic  *
402*45875Sbostic  *	Parameters:
403*45875Sbostic  *		l -- LRU cache
404*45875Sbostic  *		pgno -- number of the logical page to get
405*45875Sbostic  *
406*45875Sbostic  *	Returns:
407*45875Sbostic  *		(CACHE_ENT *) pointer to the associated hash table entry
408*45875Sbostic  *		(if any), or NULL (if none).
409*45875Sbostic  */
410*45875Sbostic 
411*45875Sbostic static CACHE_ENT *
412*45875Sbostic lruhashget(l, pgno)
413*45875Sbostic 	LRUCACHE *l;
414*45875Sbostic 	int pgno;
415*45875Sbostic {
416*45875Sbostic 	int hashind;
417*45875Sbostic 	CACHE_ENT *ce;
418*45875Sbostic 
419*45875Sbostic 	hashind = HASH(l, pgno);
420*45875Sbostic 
421*45875Sbostic 	/* walk the chain */
422*45875Sbostic 	for (ce = l->lru_cache[hashind];
423*45875Sbostic 	     ce != (CACHE_ENT *) NULL;
424*45875Sbostic 	     ce = ce->c_chain) {
425*45875Sbostic 		if (ce->c_pgno == pgno)
426*45875Sbostic 			return (ce);
427*45875Sbostic 	}
428*45875Sbostic 
429*45875Sbostic 	return ((CACHE_ENT *) NULL);
430*45875Sbostic }
431*45875Sbostic 
432*45875Sbostic /*
433*45875Sbostic  *  LRUHASHPUT -- Add an entry for a given page to the cache hash table.
434*45875Sbostic  *
435*45875Sbostic  *	This routine assumes that the given page does not yet have an entry
436*45875Sbostic  *	in the table.
437*45875Sbostic  *
438*45875Sbostic  *	Parameters:
439*45875Sbostic  *		l -- LRU cache
440*45875Sbostic  *		pgno -- logical page number for this entry
441*45875Sbostic  *		lruent -- LRU list entry at which hash table entry should
442*45875Sbostic  *			  point
443*45875Sbostic  *
444*45875Sbostic  *	Returns:
445*45875Sbostic  *		(CACHE_ENT *) pointer to the new hash table entry on success,
446*45875Sbostic  *		or NULL on failure.
447*45875Sbostic  *
448*45875Sbostic  *	Side Effects:
449*45875Sbostic  *		Allocates memory which should be freed when the hash table
450*45875Sbostic  *		entry is removed.
451*45875Sbostic  */
452*45875Sbostic 
453*45875Sbostic static CACHE_ENT *
454*45875Sbostic lruhashput(l, pgno, lruent)
455*45875Sbostic 	LRUCACHE *l;
456*45875Sbostic 	int pgno;
457*45875Sbostic 	LRU_ENT *lruent;
458*45875Sbostic {
459*45875Sbostic 	int hashind;
460*45875Sbostic 	CACHE_ENT *ce;
461*45875Sbostic 
462*45875Sbostic 	if ((ce = (CACHE_ENT *) malloc((unsigned) sizeof(CACHE_ENT)))
463*45875Sbostic 	    == (CACHE_ENT *) NULL)
464*45875Sbostic 		return ((CACHE_ENT *) NULL);
465*45875Sbostic 
466*45875Sbostic 	hashind = HASH(l, pgno);
467*45875Sbostic 
468*45875Sbostic 	ce->c_pgno = pgno;
469*45875Sbostic 	ce->c_lruent = lruent;
470*45875Sbostic 	ce->c_chain = l->lru_cache[hashind];
471*45875Sbostic 	l->lru_cache[hashind] = ce;
472*45875Sbostic 
473*45875Sbostic 	return (ce);
474*45875Sbostic }
475*45875Sbostic 
476*45875Sbostic /*
477*45875Sbostic  *  LRUHASHDEL -- Delete the entry for a given page from the LRU cache
478*45875Sbostic  *		  hash table.
479*45875Sbostic  *
480*45875Sbostic  *	Parameters:
481*45875Sbostic  *		l -- LRU cache
482*45875Sbostic  *		pgno -- page number for which we should delete htable entry
483*45875Sbostic  *
484*45875Sbostic  *	Returns:
485*45875Sbostic  *		Zero on success, -1 on failure.
486*45875Sbostic  *
487*45875Sbostic  *	Side Effects:
488*45875Sbostic  *		Releases the memory occupied by the hash table entry.
489*45875Sbostic  */
490*45875Sbostic 
491*45875Sbostic static int
492*45875Sbostic lruhashdel(l, pgno)
493*45875Sbostic 	LRUCACHE *l;
494*45875Sbostic 	int pgno;
495*45875Sbostic {
496*45875Sbostic 	CACHE_ENT *ce;
497*45875Sbostic 	CACHE_ENT *sce;
498*45875Sbostic 	int hashind;
499*45875Sbostic 
500*45875Sbostic 	sce = (CACHE_ENT *) NULL;
501*45875Sbostic 	hashind = HASH(l, pgno);
502*45875Sbostic 
503*45875Sbostic 	/* find the entry we want to delete */
504*45875Sbostic 	for (ce = l->lru_cache[hashind];
505*45875Sbostic 	     ce != (CACHE_ENT *) NULL;
506*45875Sbostic 	     ce = ce->c_chain) {
507*45875Sbostic 		if (ce->c_pgno == pgno)
508*45875Sbostic 			break;
509*45875Sbostic 		sce = ce;
510*45875Sbostic 	}
511*45875Sbostic 
512*45875Sbostic 	if (ce == (CACHE_ENT *) NULL)
513*45875Sbostic 		return (-1);
514*45875Sbostic 
515*45875Sbostic 	/* remove it from the chain */
516*45875Sbostic 	if (sce == (CACHE_ENT *) NULL)
517*45875Sbostic 		l->lru_cache[hashind] = ce->c_chain;
518*45875Sbostic 	else
519*45875Sbostic 		sce->c_chain = ce->c_chain;
520*45875Sbostic 
521*45875Sbostic 	/* release it */
522*45875Sbostic 	free (ce);
523*45875Sbostic 
524*45875Sbostic 	/* success */
525*45875Sbostic 	return (0);
526*45875Sbostic }
527*45875Sbostic 
528*45875Sbostic /*
529*45875Sbostic  *  LRUHEAD -- Move an LRU list entry to the head of the list.
530*45875Sbostic  *
531*45875Sbostic  *	The head of the list is where the most recently used item goes.
532*45875Sbostic  *
533*45875Sbostic  *	Parameters:
534*45875Sbostic  *		l -- LRU cache
535*45875Sbostic  *		lruent -- entry to move to the head of the list
536*45875Sbostic  *
537*45875Sbostic  *	Returns:
538*45875Sbostic  *		None
539*45875Sbostic  *
540*45875Sbostic  *	Side Effects:
541*45875Sbostic  *		Updates the cache's head and tail pointers as required.
542*45875Sbostic  */
543*45875Sbostic 
544*45875Sbostic static void
545*45875Sbostic lruhead(l, lruent)
546*45875Sbostic 	LRUCACHE *l;
547*45875Sbostic 	LRU_ENT *lruent;
548*45875Sbostic {
549*45875Sbostic 	LRU_ENT *next;
550*45875Sbostic 	LRU_ENT *prev;
551*45875Sbostic 
552*45875Sbostic 	if (l->lru_head == lruent)
553*45875Sbostic 		return;
554*45875Sbostic 
555*45875Sbostic 	next = lruent->l_next;
556*45875Sbostic 	prev = lruent->l_prev;
557*45875Sbostic 	lruent->l_prev = (LRU_ENT *) NULL;
558*45875Sbostic 	lruent->l_next = l->lru_head;
559*45875Sbostic 	l->lru_head->l_prev = lruent;
560*45875Sbostic 	l->lru_head = lruent;
561*45875Sbostic 
562*45875Sbostic 	prev->l_next = next;
563*45875Sbostic 	if (next != (LRU_ENT *) NULL)
564*45875Sbostic 		next->l_prev = prev;
565*45875Sbostic 
566*45875Sbostic 	if (l->lru_tail == lruent)
567*45875Sbostic 		l->lru_tail = prev;
568*45875Sbostic }
569*45875Sbostic 
570*45875Sbostic /*
571*45875Sbostic  *  LRUWRITE -- Mark an LRU cache buffer dirty
572*45875Sbostic  *
573*45875Sbostic  *	This routine is how users should move their changes to disk.  The
574*45875Sbostic  *	cache code marks the associated buffer dirty, and flushes it to
575*45875Sbostic  *	disk if we need to reuse the buffer for some other page.
576*45875Sbostic  *
577*45875Sbostic  *	Parameters:
578*45875Sbostic  *		lru -- LRU cache
579*45875Sbostic  *		pgno -- page number to flush
580*45875Sbostic  *
581*45875Sbostic  *	Returns:
582*45875Sbostic  *		Zero on success, -1 on failure.
583*45875Sbostic  */
584*45875Sbostic 
585*45875Sbostic int
586*45875Sbostic lruwrite(lru, pgno)
587*45875Sbostic 	LRU lru;
588*45875Sbostic 	int pgno;
589*45875Sbostic {
590*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
591*45875Sbostic 	LRU_ENT *lruent;
592*45875Sbostic 	CACHE_ENT *ce;
593*45875Sbostic 
594*45875Sbostic 	if ((ce = lruhashget(l, pgno)) == (CACHE_ENT *) NULL)
595*45875Sbostic 		return (-1);
596*45875Sbostic 
597*45875Sbostic 	/* mark the entry dirty */
598*45875Sbostic 	ce->c_lruent->l_flags |= F_DIRTY;
599*45875Sbostic 
600*45875Sbostic 	return (0);
601*45875Sbostic }
602*45875Sbostic 
603*45875Sbostic /*
604*45875Sbostic  *  LRUSYNC -- Flush all changes to disk
605*45875Sbostic  *
606*45875Sbostic  *	This routine allows users to force all changes to buffers currently
607*45875Sbostic  *	in memory to disk.  This is expensive.
608*45875Sbostic  *
609*45875Sbostic  *	Parameters:
610*45875Sbostic  *		lru -- LRU cache to flush
611*45875Sbostic  *
612*45875Sbostic  *	Returns:
613*45875Sbostic  *		Zero on success, -1 on failure
614*45875Sbostic  *
615*45875Sbostic  *	Side Effects:
616*45875Sbostic  *		After this call, all buffers are clean.
617*45875Sbostic  */
618*45875Sbostic 
619*45875Sbostic int
620*45875Sbostic lrusync(lru)
621*45875Sbostic 	LRU lru;
622*45875Sbostic {
623*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
624*45875Sbostic 	LRU_ENT *le;
625*45875Sbostic 
626*45875Sbostic 	for (le = l->lru_head; le != (LRU_ENT *) NULL; le = le->l_next)
627*45875Sbostic 		if (le->l_flags & F_DIRTY)
628*45875Sbostic 			if (lruflush(l, le) < 0)
629*45875Sbostic 				return (-1);
630*45875Sbostic 	return (0);
631*45875Sbostic }
632*45875Sbostic 
633*45875Sbostic /*
634*45875Sbostic  *  LRUGROW -- Grow the LRU cache
635*45875Sbostic  *
636*45875Sbostic  *	This routine is called only if we can't satisfy a user's get() request
637*45875Sbostic  *	using an existing buffer.  We need to rebuild the hash table so that
638*45875Sbostic  *	subsequent lookups work correctly, since the cache size is used to
639*45875Sbostic  *	compute hash keys.
640*45875Sbostic  *
641*45875Sbostic  *	Parameters:
642*45875Sbostic  *		l -- LRU cache to grow
643*45875Sbostic  *
644*45875Sbostic  *	Returns:
645*45875Sbostic  *		Zero on success, -1 on failure
646*45875Sbostic  */
647*45875Sbostic 
648*45875Sbostic static int
649*45875Sbostic lrugrow(l)
650*45875Sbostic 	LRUCACHE *l;
651*45875Sbostic {
652*45875Sbostic 	int oldsz, newsz;
653*45875Sbostic 	CACHE_ENT **new;
654*45875Sbostic 	CACHE_ENT *ce, *nce;
655*45875Sbostic 	int h;
656*45875Sbostic 	int i;
657*45875Sbostic 
658*45875Sbostic 	oldsz = l->lru_csize;
659*45875Sbostic 	newsz = l->lru_csize + 1;
660*45875Sbostic 
661*45875Sbostic 	/* get a new hash table */
662*45875Sbostic 	if ((new = (CACHE_ENT **) malloc((unsigned)newsz * sizeof(CACHE_ENT *)))
663*45875Sbostic 	    == (CACHE_ENT **) NULL)
664*45875Sbostic 		return (-1);
665*45875Sbostic 
666*45875Sbostic 	/* build the new hash table */
667*45875Sbostic 	bzero(new, (newsz * sizeof(CACHE_ENT *)));
668*45875Sbostic 	for (i = oldsz; --i >= 0; ) {
669*45875Sbostic 		for (ce = l->lru_cache[i]; ce != (CACHE_ENT *) NULL; ) {
670*45875Sbostic 			nce = ce->c_chain;
671*45875Sbostic 			h = ce->c_pgno % newsz;
672*45875Sbostic 			ce->c_chain = new[h];
673*45875Sbostic 			new[h] = ce;
674*45875Sbostic 			ce = nce;
675*45875Sbostic 		}
676*45875Sbostic 	}
677*45875Sbostic 
678*45875Sbostic 	/* get rid of the old hash table, and update the cache */
679*45875Sbostic 	free (l->lru_cache);
680*45875Sbostic 	l->lru_cache = new;
681*45875Sbostic 	l->lru_csize = newsz;
682*45875Sbostic 
683*45875Sbostic 	return (0);
684*45875Sbostic }
685*45875Sbostic 
686*45875Sbostic /*
687*45875Sbostic  *  LRURELEASE -- Release a buffer in the LRU cache for reuse
688*45875Sbostic  *
689*45875Sbostic  *	When the user does an lruget() or lrugetnew(), the buffer we return
690*45875Sbostic  *	is locked down, to guarantee that it's not reused while the user
691*45875Sbostic  *	still needs it.  Once a buffer is no longer needed, it should be
692*45875Sbostic  *	released (via this routine) so that we can use it for other pages
693*45875Sbostic  *	if necessary.
694*45875Sbostic  *
695*45875Sbostic  *	Parameters:
696*45875Sbostic  *		lru -- LRU cache
697*45875Sbostic  *		pgno -- page number of buffer to release
698*45875Sbostic  *
699*45875Sbostic  *	Returns:
700*45875Sbostic  *		Zero on success, -1 on failure
701*45875Sbostic  */
702*45875Sbostic 
703*45875Sbostic int
704*45875Sbostic lrurelease(lru, pgno)
705*45875Sbostic 	LRU lru;
706*45875Sbostic 	int pgno;
707*45875Sbostic {
708*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
709*45875Sbostic 	CACHE_ENT *ce;
710*45875Sbostic 
711*45875Sbostic 	if ((ce = lruhashget(l, pgno)) == (CACHE_ENT *) NULL)
712*45875Sbostic 		return (-1);
713*45875Sbostic 	ce->c_lruent->l_flags |= F_FREE;
714*45875Sbostic 	return (0);
715*45875Sbostic }
716*45875Sbostic 
717*45875Sbostic /*
718*45875Sbostic  *  LRUFREE -- Free an entire LRU cache
719*45875Sbostic  *
720*45875Sbostic  *	This routine releases an LRU cache.  The cache should not be
721*45875Sbostic  *	used again.
722*45875Sbostic  *
723*45875Sbostic  *	Parameters:
724*45875Sbostic  *		lru -- LRU cache to free
725*45875Sbostic  *
726*45875Sbostic  *	Returns:
727*45875Sbostic  *		None.
728*45875Sbostic  *
729*45875Sbostic  *	Side Effects:
730*45875Sbostic  *		Frees a lot of memory.
731*45875Sbostic  */
732*45875Sbostic 
733*45875Sbostic void
734*45875Sbostic lrufree(lru)
735*45875Sbostic 	LRU lru;
736*45875Sbostic {
737*45875Sbostic 	LRUCACHE *l = (LRUCACHE *) lru;
738*45875Sbostic 	LRU_ENT *le;
739*45875Sbostic 	LRU_ENT *sle;
740*45875Sbostic 
741*45875Sbostic 	for (le = l->lru_head; le != (LRU_ENT *) NULL; ) {
742*45875Sbostic 		free(le->l_buffer);
743*45875Sbostic 		sle = le;
744*45875Sbostic 		le = le->l_next;
745*45875Sbostic 		free(sle);
746*45875Sbostic 	}
747*45875Sbostic 	free (l);
748*45875Sbostic }
749