1 /* $NetBSD: bufcache.c,v 1.26 2013/10/18 20:47:07 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Simon Burge. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 #ifndef lint 34 __RCSID("$NetBSD: bufcache.c,v 1.26 2013/10/18 20:47:07 christos Exp $"); 35 #endif /* not lint */ 36 37 #include <sys/param.h> 38 #include <sys/buf.h> 39 #include <sys/mount.h> 40 #include <sys/sysctl.h> 41 #include <sys/vnode.h> 42 43 #include <uvm/uvm_extern.h> 44 45 #include <err.h> 46 #include <errno.h> 47 #include <inttypes.h> 48 #include <math.h> 49 #include <stdlib.h> 50 #include <string.h> 51 #include <unistd.h> 52 #include <stdbool.h> 53 54 #include <miscfs/specfs/specdev.h> 55 56 #include "systat.h" 57 #include "extern.h" 58 59 #define VCACHE_SIZE 50 60 #define PAGEINFO_ROWS 5 61 62 struct vcache { 63 int vc_age; 64 struct vnode *vc_addr; 65 struct vnode vc_node; 66 }; 67 68 struct ml_entry { 69 u_int ml_count; 70 u_long ml_size; 71 u_long ml_valid; 72 struct mount *ml_addr; 73 LIST_ENTRY(ml_entry) ml_entries; 74 struct mount ml_mount; 75 }; 76 77 static struct vcache vcache[VCACHE_SIZE]; 78 static LIST_HEAD(mount_list, ml_entry) mount_list; 79 80 static uint64_t bufmem; 81 static u_int nbuf, pgwidth, kbwidth; 82 static struct uvmexp_sysctl uvmexp; 83 84 static void vc_init(void); 85 static void ml_init(void); 86 static struct vnode *vc_lookup(struct vnode *); 87 static struct mount *ml_lookup(struct mount *, int, int); 88 static void fetchuvmexp(void); 89 90 91 WINDOW * 92 openbufcache(void) 93 { 94 95 return (subwin(stdscr, -1, 0, 5, 0)); 96 } 97 98 void 99 closebufcache(WINDOW *w) 100 { 101 102 if (w == NULL) 103 return; 104 wclear(w); 105 wrefresh(w); 106 delwin(w); 107 ml_init(); /* Clear out mount list */ 108 } 109 110 void 111 labelbufcache(void) 112 { 113 int i; 114 115 for (i = 0; i <= PAGEINFO_ROWS; i++) { 116 wmove(wnd, i, 0); 117 wclrtoeol(wnd); 118 } 119 mvwaddstr(wnd, PAGEINFO_ROWS + 1, 0, "File System Bufs used" 120 " % kB in use % Bufsize kB % Util %"); 121 wclrtoeol(wnd); 122 } 123 124 void 125 showbufcache(void) 126 { 127 int tbuf, i, lastrow; 128 double tvalid, tsize; 129 struct ml_entry *ml; 130 size_t len; 131 132 len = sizeof(bufmem); 133 if (sysctlbyname("vm.bufmem", &bufmem, &len, NULL, 0)) 134 error("can't get \"vm.bufmmem\": %s", strerror(errno)); 135 136 mvwprintw(wnd, 0, 0, 137 " %*d metadata buffers using %*"PRIu64" kBytes of " 138 "memory (%2.0f%%).", 139 pgwidth, nbuf, kbwidth, bufmem / 1024, 140 ((bufmem * 100.0) + 0.5) / getpagesize() / uvmexp.npages); 141 wclrtoeol(wnd); 142 mvwprintw(wnd, 1, 0, 143 " %*" PRIu64 " pages for cached file data using %*" 144 PRIu64 " kBytes of memory (%2.0f%%).", 145 pgwidth, uvmexp.filepages, 146 kbwidth, uvmexp.filepages * getpagesize() / 1024, 147 (uvmexp.filepages * 100 + 0.5) / uvmexp.npages); 148 wclrtoeol(wnd); 149 mvwprintw(wnd, 2, 0, 150 " %*" PRIu64 " pages for executables using %*" 151 PRIu64 " kBytes of memory (%2.0f%%).", 152 pgwidth, uvmexp.execpages, 153 kbwidth, uvmexp.execpages * getpagesize() / 1024, 154 (uvmexp.execpages * 100 + 0.5) / uvmexp.npages); 155 wclrtoeol(wnd); 156 mvwprintw(wnd, 3, 0, 157 " %*" PRIu64 " pages for anon (non-file) data %*" 158 PRIu64 " kBytes of memory (%2.0f%%).", 159 pgwidth, uvmexp.anonpages, 160 kbwidth, uvmexp.anonpages * getpagesize() / 1024, 161 (uvmexp.anonpages * 100 + 0.5) / uvmexp.npages); 162 wclrtoeol(wnd); 163 mvwprintw(wnd, 4, 0, 164 " %*" PRIu64 " free pages %*" 165 PRIu64 " kBytes of memory (%2.0f%%).", 166 pgwidth, uvmexp.free, 167 kbwidth, uvmexp.free * getpagesize() / 1024, 168 (uvmexp.free * 100 + 0.5) / uvmexp.npages); 169 wclrtoeol(wnd); 170 171 if (nbuf == 0 || bufmem == 0) { 172 wclrtobot(wnd); 173 return; 174 } 175 176 tbuf = 0; 177 tvalid = tsize = 0; 178 lastrow = PAGEINFO_ROWS + 2; /* Leave room for header. */ 179 for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL; 180 i++, ml = LIST_NEXT(ml, ml_entries)) { 181 182 int cnt = ml->ml_count; 183 double v = ml->ml_valid; 184 double s = ml->ml_size; 185 186 /* Display in window if enough room. */ 187 if (i < getmaxy(wnd) - 2) { 188 mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ? 189 "NULL" : ml->ml_mount.mnt_stat.f_mntonname); 190 wprintw(wnd, 191 " %6d %3d %8ld %3.0f %8ld %3.0f %3.0f", 192 cnt, (100 * cnt) / nbuf, 193 (long)(v/1024), 100 * v / bufmem, 194 (long)(s/1024), 100 * s / bufmem, 195 100 * v / s); 196 wclrtoeol(wnd); 197 lastrow = i; 198 } 199 200 /* Update statistics. */ 201 tbuf += cnt; 202 tvalid += v; 203 tsize += s; 204 } 205 206 wclrtobot(wnd); 207 mvwprintw(wnd, lastrow + 2, 0, 208 "%-20s %6d %3d %8ld %3.0f %8ld %3.0f %3.0f", 209 "Total:", tbuf, (100 * tbuf) / nbuf, 210 (long)(tvalid/1024), 100 * tvalid / bufmem, 211 (long)(tsize/1024), 100 * tsize / bufmem, 212 tsize != 0 ? ((100 * tvalid) / tsize) : 0); 213 } 214 215 int 216 initbufcache(void) 217 { 218 fetchuvmexp(); 219 pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1); 220 kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) + 221 1); 222 223 return(1); 224 } 225 226 static void 227 fetchuvmexp(void) 228 { 229 int mib[2]; 230 size_t size; 231 232 /* Re-read pages used for vnodes & executables */ 233 size = sizeof(uvmexp); 234 mib[0] = CTL_VM; 235 mib[1] = VM_UVMEXP2; 236 if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) { 237 error("can't get uvmexp: %s\n", strerror(errno)); 238 memset(&uvmexp, 0, sizeof(uvmexp)); 239 } 240 } 241 242 void 243 fetchbufcache(void) 244 { 245 int count; 246 struct buf_sysctl *bp, *buffers; 247 struct vnode *vn; 248 struct ml_entry *ml; 249 int mib[6]; 250 size_t size; 251 int extraslop = 0; 252 253 /* Re-read pages used for vnodes & executables */ 254 fetchuvmexp(); 255 256 /* Initialise vnode cache and mount list. */ 257 vc_init(); 258 ml_init(); 259 260 /* Get metadata buffers */ 261 size = 0; 262 buffers = NULL; 263 mib[0] = CTL_KERN; 264 mib[1] = KERN_BUF; 265 mib[2] = KERN_BUF_ALL; 266 mib[3] = KERN_BUF_ALL; 267 mib[4] = (int)sizeof(struct buf_sysctl); 268 mib[5] = INT_MAX; /* we want them all */ 269 again: 270 if (sysctl(mib, 6, NULL, &size, NULL, 0) < 0) { 271 error("can't get buffers size: %s\n", strerror(errno)); 272 return; 273 } 274 if (size == 0) 275 return; 276 277 size += extraslop * sizeof(struct buf_sysctl); 278 buffers = malloc(size); 279 if (buffers == NULL) { 280 error("can't allocate buffers: %s\n", strerror(errno)); 281 return; 282 } 283 if (sysctl(mib, 6, buffers, &size, NULL, 0) < 0) { 284 free(buffers); 285 if (extraslop == 0) { 286 extraslop = 100; 287 goto again; 288 } 289 error("can't get buffers: %s\n", strerror(errno)); 290 return; 291 } 292 293 nbuf = size / sizeof(struct buf_sysctl); 294 for (bp = buffers; bp < buffers + nbuf; bp++) { 295 if (UINT64TOPTR(bp->b_vp) != NULL) { 296 struct mount *mp; 297 vn = vc_lookup(UINT64TOPTR(bp->b_vp)); 298 if (vn == NULL) 299 break; 300 301 mp = vn->v_mount; 302 /* 303 * References to mounted-on vnodes should be 304 * counted towards the mounted filesystem. 305 */ 306 if (vn->v_type == VBLK && vn->v_specnode != NULL) { 307 specnode_t sn; 308 specdev_t sd; 309 if (!KREAD(vn->v_specnode, &sn, sizeof(sn))) 310 continue; 311 if (!KREAD(sn.sn_dev, &sd, sizeof(sd))) 312 continue; 313 if (sd.sd_mountpoint) 314 mp = sd.sd_mountpoint; 315 } 316 if (mp != NULL) 317 (void)ml_lookup(mp, bp->b_bufsize, 318 bp->b_bcount); 319 } 320 } 321 322 /* simple sort - there's not that many entries */ 323 do { 324 if ((ml = LIST_FIRST(&mount_list)) == NULL || 325 LIST_NEXT(ml, ml_entries) == NULL) 326 break; 327 328 count = 0; 329 for (ml = LIST_FIRST(&mount_list); ml != NULL; 330 ml = LIST_NEXT(ml, ml_entries)) { 331 if (LIST_NEXT(ml, ml_entries) == NULL) 332 break; 333 if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) { 334 ml = LIST_NEXT(ml, ml_entries); 335 LIST_REMOVE(ml, ml_entries); 336 LIST_INSERT_HEAD(&mount_list, ml, ml_entries); 337 count++; 338 } 339 } 340 } while (count != 0); 341 342 free(buffers); 343 } 344 345 static void 346 vc_init(void) 347 { 348 int i; 349 350 /* vc_addr == NULL for unused cache entry. */ 351 for (i = 0; i < VCACHE_SIZE; i++) 352 vcache[i].vc_addr = NULL; 353 } 354 355 static void 356 ml_init(void) 357 { 358 struct ml_entry *ml; 359 360 /* Throw out the current mount list and start again. */ 361 while ((ml = LIST_FIRST(&mount_list)) != NULL) { 362 LIST_REMOVE(ml, ml_entries); 363 free(ml); 364 } 365 } 366 367 368 static struct vnode * 369 vc_lookup(struct vnode *vaddr) 370 { 371 struct vnode *ret; 372 size_t i, oldest; 373 374 ret = NULL; 375 oldest = 0; 376 for (i = 0; i < VCACHE_SIZE; i++) { 377 if (vcache[i].vc_addr == NULL) 378 break; 379 vcache[i].vc_age++; 380 if (vcache[i].vc_age < vcache[oldest].vc_age) 381 oldest = i; 382 if (vcache[i].vc_addr == vaddr) { 383 vcache[i].vc_age = 0; 384 ret = &vcache[i].vc_node; 385 } 386 } 387 388 /* Find an entry in the cache? */ 389 if (ret != NULL) 390 return(ret); 391 392 /* Go past the end of the cache? */ 393 if (i >= VCACHE_SIZE) 394 i = oldest; 395 396 /* Read in new vnode and reset age counter. */ 397 if (KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode)) == 0) 398 return NULL; 399 vcache[i].vc_addr = vaddr; 400 vcache[i].vc_age = 0; 401 402 return(&vcache[i].vc_node); 403 } 404 405 static struct mount * 406 ml_lookup(struct mount *maddr, int size, int valid) 407 { 408 struct ml_entry *ml; 409 410 for (ml = LIST_FIRST(&mount_list); ml != NULL; 411 ml = LIST_NEXT(ml, ml_entries)) 412 if (ml->ml_addr == maddr) { 413 ml->ml_count++; 414 ml->ml_size += size; 415 ml->ml_valid += valid; 416 if (ml->ml_addr == NULL) 417 return(NULL); 418 else 419 return(&ml->ml_mount); 420 } 421 422 if ((ml = malloc(sizeof(struct ml_entry))) == NULL) { 423 error("out of memory"); 424 die(0); 425 } 426 LIST_INSERT_HEAD(&mount_list, ml, ml_entries); 427 ml->ml_count = 1; 428 ml->ml_size = size; 429 ml->ml_valid = valid; 430 ml->ml_addr = maddr; 431 if (maddr == NULL) 432 return(NULL); 433 434 KREAD(maddr, &ml->ml_mount, sizeof(struct mount)); 435 return(&ml->ml_mount); 436 } 437