xref: /netbsd-src/usr.bin/systat/bufcache.c (revision 4472dbe5e3bd91ef2540bada7a7ca7384627ff9b)
1 /*	$NetBSD: bufcache.c,v 1.7 2000/04/27 00:30:51 jdc Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Simon Burge.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 #ifndef lint
41 __RCSID("$NetBSD: bufcache.c,v 1.7 2000/04/27 00:30:51 jdc Exp $");
42 #endif /* not lint */
43 
44 #include <sys/param.h>
45 #include <sys/buf.h>
46 #include <sys/mount.h>
47 #include <sys/queue.h>
48 #include <sys/time.h>
49 #include <sys/vnode.h>
50 
51 #include <err.h>
52 #include <kvm.h>
53 #include <nlist.h>
54 #include <paths.h>
55 #include <stdlib.h>
56 #include <unistd.h>
57 
58 #include "systat.h"
59 #include "extern.h"
60 
61 
62 /*
63  * Definitions for the buffer free lists (from sys/kern/vfs_bio.c).
64  */
65 #define	BQUEUES		4		/* number of free buffer queues */
66 
67 #define	BQ_LOCKED	0		/* super-blocks &c */
68 #define	BQ_LRU		1		/* lru, useful buffers */
69 #define	BQ_AGE		2		/* rubbish */
70 #define	BQ_EMPTY	3		/* buffer headers with no memory */
71 
72 #define VCACHE_SIZE	50
73 
74 struct vcache {
75 	int vc_age;
76 	struct vnode *vc_addr;
77 	struct vnode vc_node;
78 };
79 
80 struct ml_entry {
81 	int ml_count;
82 	long ml_size;
83 	long ml_valid;
84 	struct mount *ml_addr;
85 	struct mount ml_mount;
86 	LIST_ENTRY(ml_entry) ml_entries;
87 };
88 
89 static struct nlist namelist[] = {
90 #define	X_NBUF		0
91 	{ "_nbuf" },
92 #define	X_BUF		1
93 	{ "_buf" },
94 #define	X_BUFQUEUES	2
95 	{ "_bufqueues" },
96 #define	X_BUFPAGES	3
97 	{ "_bufpages" },
98 	{ "" },
99 };
100 
101 static struct vcache vcache[VCACHE_SIZE];
102 static LIST_HEAD(mount_list, ml_entry) mount_list;
103 
104 static int nbuf, bufpages, bufkb;
105 static void *bufaddr;
106 static struct buf *buf = NULL;
107 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
108 
109 static void	vc_init __P((void));
110 static void	ml_init __P((void));
111 static struct 	vnode *vc_lookup __P((struct vnode *));
112 static struct 	mount *ml_lookup __P((struct mount *, int, int));
113 
114 
115 WINDOW *
116 openbufcache()
117 {
118 
119 	return (subwin(stdscr, LINES-5-1, 0, 5, 0));
120 }
121 
122 void
123 closebufcache(w)
124 	WINDOW *w;
125 {
126 
127 	if (w == NULL)
128 		return;
129 	wclear(w);
130 	wrefresh(w);
131 	delwin(w);
132 	ml_init();		/* Clear out mount list */
133 }
134 
135 void
136 labelbufcache()
137 {
138 	mvwprintw(wnd, 0, 0, "There are %d buffers using %d kBytes of memory.",
139 	    nbuf, bufkb);
140 	wclrtoeol(wnd);
141 	wmove(wnd, 1, 0);
142 	wclrtoeol(wnd);
143 	mvwaddstr(wnd, 2, 0,
144 "File System          Bufs used   %   kB in use   %  Bufsize kB   %  Util %");
145 	wclrtoeol(wnd);
146 }
147 
148 void
149 showbufcache()
150 {
151 	int tbuf, i, lastrow;
152 	long tvalid, tsize;
153 	struct ml_entry *ml;
154 
155 	tbuf = tvalid = tsize = 0;
156 	lastrow = 3;	/* Leave room for header. */
157 	for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
158 	    i++, ml = LIST_NEXT(ml, ml_entries)) {
159 
160 		/* Display in window if enough room. */
161 		if (i < getmaxy(wnd) - 2) {
162 			mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
163 			    "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
164 			wprintw(wnd,
165 			    "    %6d %3d    %8ld %3ld    %8ld %3ld     %3ld",
166 			    ml->ml_count, (100 * ml->ml_count) / nbuf,
167 			    ml->ml_valid, (100 * ml->ml_valid) / bufkb,
168 			    ml->ml_size, (100 * ml->ml_size) / bufkb,
169 			    (100 * ml->ml_valid) / ml->ml_size);
170 			wclrtoeol(wnd);
171 			lastrow = i;
172 		}
173 
174 		/* Update statistics. */
175 		tbuf += ml->ml_count;
176 		tvalid += ml->ml_valid;
177 		tsize += ml->ml_size;
178 	}
179 
180 	wclrtobot(wnd);
181 	mvwprintw(wnd, lastrow + 2, 0,
182 	    "%-20s    %6d %3d    %8ld %3ld    %8ld %3ld     %3ld",
183 	    "Total:", tbuf, (100 * tbuf) / nbuf,
184 	    tvalid, (100 * tvalid) / bufkb,
185 	    tsize, (100 * tsize) / bufkb, (100 * tvalid) / tsize);
186 }
187 
188 int
189 initbufcache()
190 {
191 	if (namelist[X_NBUF].n_type == 0) {
192 		if (kvm_nlist(kd, namelist)) {
193 			nlisterr(namelist);
194 			return(0);
195 		}
196 		if (namelist[X_NBUF].n_type == 0) {
197 			error("namelist on %s failed", _PATH_UNIX);
198 			return(0);
199 		}
200 	}
201 
202 	NREAD(X_NBUF, &nbuf, sizeof(nbuf));
203 	NREAD(X_BUFPAGES, &bufpages, sizeof(bufpages));
204 	bufkb = bufpages * sysconf(_SC_PAGESIZE) / 1024;
205 
206 	if ((buf = malloc(nbuf * sizeof(struct buf))) == NULL) {
207 		error("malloc failed");
208 		die(0);
209 	}
210 	NREAD(X_BUF, &bufaddr, sizeof(bufaddr));
211 
212 	return(1);
213 }
214 
215 void
216 fetchbufcache()
217 {
218 	int i, count;
219 	struct buf *bp;
220 	struct vnode *vn;
221 	struct mount *mt;
222 	struct ml_entry *ml;
223 
224 	/* Re-read bufqueues lists and buffer cache headers */
225 	NREAD(X_BUFQUEUES, bufqueues, sizeof(bufqueues));
226 	KREAD(bufaddr, buf, sizeof(struct buf) * nbuf);
227 
228 	/* Initialise vnode cache and mount list. */
229 	vc_init();
230 	ml_init();
231 	for (i = 0; i < BQUEUES; i++) {
232 		for (bp = bufqueues[i].tqh_first; bp != NULL;
233 		    bp = bp->b_freelist.tqe_next) {
234 			if (bp != NULL) {
235 				bp = (struct buf *)((u_long)bp + (u_long)buf -
236 				    (u_long)bufaddr);
237 
238 				if (bp->b_vp != NULL) {
239 					vn = vc_lookup(bp->b_vp);
240 					if (vn == NULL)
241 						errx(1,
242 						    "vc_lookup returns NULL!\n");
243 					if (vn->v_mount != NULL)
244 						mt = ml_lookup(vn->v_mount,
245 						    bp->b_bufsize,
246 						    bp->b_bcount);
247 				}
248 			}
249 		}
250 	}
251 
252 	/* simple sort - there's not that many entries */
253 	do {
254 		if ((ml = LIST_FIRST(&mount_list)) == NULL ||
255 		    LIST_NEXT(ml, ml_entries) == NULL)
256 			break;
257 
258 		count = 0;
259 		for (ml = LIST_FIRST(&mount_list); ml != NULL;
260 		    ml = LIST_NEXT(ml, ml_entries)) {
261 			if (LIST_NEXT(ml, ml_entries) == NULL)
262 				break;
263 			if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
264 				ml = LIST_NEXT(ml, ml_entries);
265 				LIST_REMOVE(ml, ml_entries);
266 				LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
267 				count++;
268 			}
269 		}
270 	} while (count != 0);
271 }
272 
273 static void
274 vc_init()
275 {
276 	int i;
277 
278 	/* vc_addr == NULL for unused cache entry. */
279 	for (i = 0; i < VCACHE_SIZE; i++)
280 		vcache[i].vc_addr = NULL;
281 }
282 
283 static void
284 ml_init()
285 {
286 	struct ml_entry *ml;
287 
288 	/* Throw out the current mount list and start again. */
289 	while ((ml = LIST_FIRST(&mount_list)) != NULL) {
290 		LIST_REMOVE(ml, ml_entries);
291 		free(ml);
292 	}
293 }
294 
295 
296 static struct vnode *
297 vc_lookup(vaddr)
298 	struct vnode *vaddr;
299 {
300 	struct vnode *ret;
301 	int i, oldest, match;
302 
303 	ret = NULL;
304 	oldest = match = 0;
305 	for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
306 		vcache[i].vc_age++;
307 		if (vcache[i].vc_addr == NULL)
308 			break;
309 		if (vcache[i].vc_age < vcache[oldest].vc_age)
310 			oldest = i;
311 		if (vcache[i].vc_addr == vaddr) {
312 			vcache[i].vc_age = 0;
313 			match = i;
314 			ret = &vcache[i].vc_node;
315 		}
316 	}
317 
318 	/* Find an entry in the cache? */
319 	if (ret != NULL)
320 		return(ret);
321 
322 	/* Go past the end of the cache? */
323 	if  (i >= VCACHE_SIZE)
324 		i = oldest;
325 
326 	/* Read in new vnode and reset age counter. */
327 	KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode));
328 	vcache[i].vc_addr = vaddr;
329 	vcache[i].vc_age = 0;
330 
331 	return(&vcache[i].vc_node);
332 }
333 
334 static struct mount *
335 ml_lookup(maddr, size, valid)
336 	struct mount *maddr;
337 	int size, valid;
338 {
339 	struct ml_entry *ml;
340 
341 	for (ml = LIST_FIRST(&mount_list); ml != NULL;
342 	    ml = LIST_NEXT(ml, ml_entries))
343 		if (ml->ml_addr == maddr) {
344 			ml->ml_count++;
345 			ml->ml_size += size / 1024;
346 			ml->ml_valid += valid / 1024;
347 			if (ml->ml_addr == NULL)
348 				return(NULL);
349 			else
350 				return(&ml->ml_mount);
351 		}
352 
353 	if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
354 		error("out of memory");
355 		die(0);
356 	}
357 	LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
358 	ml->ml_count = 1;
359 	ml->ml_size = size / 1024;
360 	ml->ml_valid = valid / 1024;
361 	ml->ml_addr = maddr;
362 	if (maddr == NULL)
363 		return(NULL);
364 
365 	KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
366 	return(&ml->ml_mount);
367 }
368