1 /* $NetBSD: bufcache.c,v 1.30 2020/03/02 09:50:12 mrg Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Simon Burge.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 #ifndef lint
34 __RCSID("$NetBSD: bufcache.c,v 1.30 2020/03/02 09:50:12 mrg Exp $");
35 #endif /* not lint */
36
37 #include <sys/param.h>
38 #include <sys/buf.h>
39 #define __EXPOSE_MOUNT
40 #include <sys/mount.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43
44 #include <uvm/uvm_extern.h>
45
46 #include <err.h>
47 #include <errno.h>
48 #include <inttypes.h>
49 #include <math.h>
50 #include <stdlib.h>
51 #include <string.h>
52 #include <unistd.h>
53 #include <stdbool.h>
54
55 #include <miscfs/specfs/specdev.h>
56
57 #include "systat.h"
58 #include "extern.h"
59
60 #define VCACHE_SIZE 50
61 #define PAGEINFO_ROWS 5
62
63 struct vcache {
64 int vc_age;
65 struct vnode *vc_addr;
66 struct vnode vc_node;
67 };
68
69 struct ml_entry {
70 u_int ml_count;
71 u_long ml_size;
72 u_long ml_valid;
73 struct mount *ml_addr;
74 LIST_ENTRY(ml_entry) ml_entries;
75 struct mount ml_mount;
76 };
77
78 static struct vcache vcache[VCACHE_SIZE];
79 static LIST_HEAD(mount_list, ml_entry) mount_list;
80
81 static uint64_t bufmem;
82 static u_int nbuf, pgwidth, kbwidth;
83 static struct uvmexp_sysctl uvmexp;
84
85 static void vc_init(void);
86 static void ml_init(void);
87 static struct vnode *vc_lookup(struct vnode *);
88 static struct mount *ml_lookup(struct mount *, int, int);
89 static void fetchuvmexp(void);
90
91
92 WINDOW *
openbufcache(void)93 openbufcache(void)
94 {
95
96 return (subwin(stdscr, -1, 0, 5, 0));
97 }
98
99 void
closebufcache(WINDOW * w)100 closebufcache(WINDOW *w)
101 {
102
103 if (w == NULL)
104 return;
105 wclear(w);
106 wrefresh(w);
107 delwin(w);
108 ml_init(); /* Clear out mount list */
109 }
110
111 void
labelbufcache(void)112 labelbufcache(void)
113 {
114 int i;
115
116 for (i = 0; i <= PAGEINFO_ROWS; i++) {
117 wmove(wnd, i, 0);
118 wclrtoeol(wnd);
119 }
120 mvwaddstr(wnd, PAGEINFO_ROWS + 1, 0,
121 "File System Bufs used % kB in use % Bufsize kB % Util %");
122 wclrtoeol(wnd);
123 }
124
125 void
showbufcache(void)126 showbufcache(void)
127 {
128 int tbuf, i, lastrow;
129 double tvalid, tsize;
130 struct ml_entry *ml;
131 size_t len;
132 static int mib[] = { -1, 0 };
133
134 if (mib[0] == -1) {
135 len = __arraycount(mib);
136 if (sysctlnametomib("vm.bufmem", mib, &len) == -1)
137 error("can't get \"vm.bufmem\" mib: %s",
138 strerror(errno));
139 }
140 len = sizeof(bufmem);
141 if (sysctl(mib, 2, &bufmem, &len, NULL, 0) == -1)
142 error("can't get \"vm.bufmem\": %s", strerror(errno));
143
144 mvwprintw(wnd, 0, 0,
145 " %*d metadata buffers using %*"PRIu64" kBytes of "
146 "memory (%2.0f%%).",
147 pgwidth, nbuf, kbwidth, bufmem / 1024,
148 ((bufmem * 100.0) + 0.5) / getpagesize() / uvmexp.npages);
149 wclrtoeol(wnd);
150 mvwprintw(wnd, 1, 0,
151 " %*" PRIu64 " pages for cached file data using %*"
152 PRIu64 " kBytes of memory (%2.0f%%).",
153 pgwidth, uvmexp.filepages,
154 kbwidth, uvmexp.filepages * getpagesize() / 1024,
155 (uvmexp.filepages * 100 + 0.5) / uvmexp.npages);
156 wclrtoeol(wnd);
157 mvwprintw(wnd, 2, 0,
158 " %*" PRIu64 " pages for executables using %*"
159 PRIu64 " kBytes of memory (%2.0f%%).",
160 pgwidth, uvmexp.execpages,
161 kbwidth, uvmexp.execpages * getpagesize() / 1024,
162 (uvmexp.execpages * 100 + 0.5) / uvmexp.npages);
163 wclrtoeol(wnd);
164 mvwprintw(wnd, 3, 0,
165 " %*" PRIu64 " pages for anon (non-file) data %*"
166 PRIu64 " kBytes of memory (%2.0f%%).",
167 pgwidth, uvmexp.anonpages,
168 kbwidth, uvmexp.anonpages * getpagesize() / 1024,
169 (uvmexp.anonpages * 100 + 0.5) / uvmexp.npages);
170 wclrtoeol(wnd);
171 mvwprintw(wnd, 4, 0,
172 " %*" PRIu64 " free pages %*"
173 PRIu64 " kBytes of memory (%2.0f%%).",
174 pgwidth, uvmexp.free,
175 kbwidth, uvmexp.free * getpagesize() / 1024,
176 (uvmexp.free * 100 + 0.5) / uvmexp.npages);
177 wclrtoeol(wnd);
178
179 if (nbuf == 0 || bufmem == 0) {
180 wclrtobot(wnd);
181 return;
182 }
183
184 tbuf = 0;
185 tvalid = tsize = 0;
186 lastrow = PAGEINFO_ROWS + 2; /* Leave room for header. */
187 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
188 i++, ml = LIST_NEXT(ml, ml_entries)) {
189
190 int cnt = ml->ml_count;
191 double v = ml->ml_valid;
192 double s = ml->ml_size;
193
194 /* Display in window if enough room. */
195 if (i < getmaxy(wnd) - 2) {
196 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
197 "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
198 wprintw(wnd,
199 " %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
200 cnt, (100 * cnt) / nbuf,
201 (long)(v/1024), 100 * v / bufmem,
202 (long)(s/1024), 100 * s / bufmem,
203 100 * v / s);
204 wclrtoeol(wnd);
205 lastrow = i;
206 }
207
208 /* Update statistics. */
209 tbuf += cnt;
210 tvalid += v;
211 tsize += s;
212 }
213
214 wclrtobot(wnd);
215 mvwprintw(wnd, lastrow + 2, 0,
216 "%-20s %6d %3d %8ld %3.0f %8ld %3.0f %3.0f",
217 "Total:", tbuf, (100 * tbuf) / nbuf,
218 (long)(tvalid/1024), 100 * tvalid / bufmem,
219 (long)(tsize/1024), 100 * tsize / bufmem,
220 tsize != 0 ? ((100 * tvalid) / tsize) : 0);
221 }
222
223 int
initbufcache(void)224 initbufcache(void)
225 {
226 fetchuvmexp();
227 pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1);
228 kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) +
229 1);
230
231 return(1);
232 }
233
234 static void
fetchuvmexp(void)235 fetchuvmexp(void)
236 {
237 int mib[2];
238 size_t size;
239
240 /* Re-read pages used for vnodes & executables */
241 size = sizeof(uvmexp);
242 mib[0] = CTL_VM;
243 mib[1] = VM_UVMEXP2;
244 if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
245 error("can't get uvmexp: %s\n", strerror(errno));
246 memset(&uvmexp, 0, sizeof(uvmexp));
247 }
248 }
249
250 void
fetchbufcache(void)251 fetchbufcache(void)
252 {
253 int count;
254 struct buf_sysctl *bp, *buffers;
255 struct vnode *vn;
256 struct ml_entry *ml;
257 int mib[6];
258 size_t size;
259 int extraslop = 0;
260
261 /* Re-read pages used for vnodes & executables */
262 fetchuvmexp();
263
264 /* Initialise vnode cache and mount list. */
265 vc_init();
266 ml_init();
267
268 /* Get metadata buffers */
269 size = 0;
270 buffers = NULL;
271 mib[0] = CTL_KERN;
272 mib[1] = KERN_BUF;
273 mib[2] = KERN_BUF_ALL;
274 mib[3] = KERN_BUF_ALL;
275 mib[4] = (int)sizeof(struct buf_sysctl);
276 mib[5] = INT_MAX; /* we want them all */
277 again:
278 if (sysctl(mib, 6, NULL, &size, NULL, 0) < 0) {
279 error("can't get buffers size: %s\n", strerror(errno));
280 return;
281 }
282 if (size == 0)
283 return;
284
285 size += extraslop * sizeof(struct buf_sysctl);
286 buffers = malloc(size);
287 if (buffers == NULL) {
288 error("can't allocate buffers: %s\n", strerror(errno));
289 return;
290 }
291 if (sysctl(mib, 6, buffers, &size, NULL, 0) < 0) {
292 free(buffers);
293 if (extraslop < 1000) {
294 extraslop += 100;
295 goto again;
296 }
297 error("can't get buffers: %s\n", strerror(errno));
298 return;
299 }
300
301 nbuf = size / sizeof(struct buf_sysctl);
302 for (bp = buffers; bp < buffers + nbuf; bp++) {
303 if (UINT64TOPTR(bp->b_vp) != NULL) {
304 struct mount *mp;
305 vn = vc_lookup(UINT64TOPTR(bp->b_vp));
306 if (vn == NULL)
307 break;
308
309 mp = vn->v_mount;
310 /*
311 * References to mounted-on vnodes should be
312 * counted towards the mounted filesystem.
313 */
314 if (vn->v_type == VBLK && vn->v_specnode != NULL) {
315 specnode_t sn;
316 specdev_t sd;
317 if (!KREAD(vn->v_specnode, &sn, sizeof(sn)))
318 continue;
319 if (!KREAD(sn.sn_dev, &sd, sizeof(sd)))
320 continue;
321 if (sd.sd_mountpoint)
322 mp = sd.sd_mountpoint;
323 }
324 if (mp != NULL)
325 (void)ml_lookup(mp, bp->b_bufsize,
326 bp->b_bcount);
327 }
328 }
329
330 /* simple sort - there's not that many entries */
331 do {
332 if ((ml = LIST_FIRST(&mount_list)) == NULL ||
333 LIST_NEXT(ml, ml_entries) == NULL)
334 break;
335
336 count = 0;
337 for (ml = LIST_FIRST(&mount_list); ml != NULL;
338 ml = LIST_NEXT(ml, ml_entries)) {
339 if (LIST_NEXT(ml, ml_entries) == NULL)
340 break;
341 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
342 ml = LIST_NEXT(ml, ml_entries);
343 LIST_REMOVE(ml, ml_entries);
344 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
345 count++;
346 }
347 }
348 } while (count != 0);
349
350 free(buffers);
351 }
352
353 static void
vc_init(void)354 vc_init(void)
355 {
356 int i;
357
358 /* vc_addr == NULL for unused cache entry. */
359 for (i = 0; i < VCACHE_SIZE; i++)
360 vcache[i].vc_addr = NULL;
361 }
362
363 static void
ml_init(void)364 ml_init(void)
365 {
366 struct ml_entry *ml;
367
368 /* Throw out the current mount list and start again. */
369 while ((ml = LIST_FIRST(&mount_list)) != NULL) {
370 LIST_REMOVE(ml, ml_entries);
371 free(ml);
372 }
373 }
374
375
376 static struct vnode *
vc_lookup(struct vnode * vaddr)377 vc_lookup(struct vnode *vaddr)
378 {
379 struct vnode *ret;
380 size_t i, oldest;
381
382 ret = NULL;
383 oldest = 0;
384 for (i = 0; i < VCACHE_SIZE; i++) {
385 if (vcache[i].vc_addr == NULL)
386 break;
387 vcache[i].vc_age++;
388 if (vcache[i].vc_age < vcache[oldest].vc_age)
389 oldest = i;
390 if (vcache[i].vc_addr == vaddr) {
391 vcache[i].vc_age = 0;
392 ret = &vcache[i].vc_node;
393 }
394 }
395
396 /* Find an entry in the cache? */
397 if (ret != NULL)
398 return(ret);
399
400 /* Go past the end of the cache? */
401 if (i >= VCACHE_SIZE)
402 i = oldest;
403
404 /* Read in new vnode and reset age counter. */
405 if (KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode)) == 0)
406 return NULL;
407 vcache[i].vc_addr = vaddr;
408 vcache[i].vc_age = 0;
409
410 return(&vcache[i].vc_node);
411 }
412
413 static struct mount *
ml_lookup(struct mount * maddr,int size,int valid)414 ml_lookup(struct mount *maddr, int size, int valid)
415 {
416 struct ml_entry *ml;
417
418 for (ml = LIST_FIRST(&mount_list); ml != NULL;
419 ml = LIST_NEXT(ml, ml_entries))
420 if (ml->ml_addr == maddr) {
421 ml->ml_count++;
422 ml->ml_size += size;
423 ml->ml_valid += valid;
424 if (ml->ml_addr == NULL)
425 return(NULL);
426 else
427 return(&ml->ml_mount);
428 }
429
430 if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
431 error("out of memory");
432 die(0);
433 }
434 LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
435 ml->ml_count = 1;
436 ml->ml_size = size;
437 ml->ml_valid = valid;
438 ml->ml_addr = maddr;
439 if (maddr == NULL)
440 return(NULL);
441
442 KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
443 return(&ml->ml_mount);
444 }
445