1 /* $NetBSD: uvm_meter.c,v 1.63 2014/02/26 20:33:53 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 35 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.63 2014/02/26 20:33:53 martin Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/cpu.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sysctl.h> 47 48 #include <uvm/uvm.h> 49 #include <uvm/uvm_pdpolicy.h> 50 51 /* 52 * maxslp: ???? XXXCDC 53 */ 54 55 int maxslp = MAXSLP; /* patchable ... */ 56 struct loadavg averunnable; 57 58 static void uvm_total(struct vmtotal *); 59 60 /* 61 * sysctl helper routine for the vm.vmmeter node. 62 */ 63 static int 64 sysctl_vm_meter(SYSCTLFN_ARGS) 65 { 66 struct sysctlnode node; 67 struct vmtotal vmtotals; 68 69 node = *rnode; 70 node.sysctl_data = &vmtotals; 71 uvm_total(&vmtotals); 72 73 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 74 } 75 76 /* 77 * sysctl helper routine for the vm.uvmexp node. 78 */ 79 static int 80 sysctl_vm_uvmexp(SYSCTLFN_ARGS) 81 { 82 struct sysctlnode node; 83 84 node = *rnode; 85 if (oldlenp) 86 node.sysctl_size = min(*oldlenp, node.sysctl_size); 87 88 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 89 } 90 91 static int 92 sysctl_vm_uvmexp2(SYSCTLFN_ARGS) 93 { 94 struct sysctlnode node; 95 struct uvmexp_sysctl u; 96 int active, inactive; 97 CPU_INFO_ITERATOR cii; 98 struct cpu_info *ci; 99 100 uvm_estimatepageable(&active, &inactive); 101 102 memset(&u, 0, sizeof(u)); 103 104 /* Entries here are in order of uvmexp_sysctl, not uvmexp */ 105 u.pagesize = uvmexp.pagesize; 106 u.pagemask = uvmexp.pagemask; 107 u.pageshift = uvmexp.pageshift; 108 u.npages = uvmexp.npages; 109 u.free = uvmexp.free; 110 u.active = active; 111 u.inactive = inactive; 112 u.paging = uvmexp.paging; 113 u.wired = uvmexp.wired; 114 u.zeropages = uvmexp.zeropages; 115 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon; 116 u.reserve_kernel = uvmexp.reserve_kernel; 117 u.freemin = uvmexp.freemin; 118 u.freetarg = uvmexp.freetarg; 119 u.inactarg = 0; /* unused */ 120 u.wiredmax = uvmexp.wiredmax; 121 u.nswapdev = uvmexp.nswapdev; 122 u.swpages = uvmexp.swpages; 123 u.swpginuse = uvmexp.swpginuse; 124 u.swpgonly = uvmexp.swpgonly; 125 u.nswget = uvmexp.nswget; 126 for (CPU_INFO_FOREACH(cii, ci)) { 127 u.faults += ci->ci_data.cpu_nfault; 128 u.traps += ci->ci_data.cpu_ntrap; 129 u.intrs += ci->ci_data.cpu_nintr; 130 u.swtch += ci->ci_data.cpu_nswtch; 131 u.softs += ci->ci_data.cpu_nsoft; 132 u.syscalls += ci->ci_data.cpu_nsyscall; 133 } 134 u.pageins = uvmexp.pageins; 135 u.pgswapin = uvmexp.pgswapin; 136 u.pgswapout = uvmexp.pgswapout; 137 u.forks = uvmexp.forks; 138 u.forks_ppwait = uvmexp.forks_ppwait; 139 u.forks_sharevm = uvmexp.forks_sharevm; 140 u.pga_zerohit = uvmexp.pga_zerohit; 141 u.pga_zeromiss = uvmexp.pga_zeromiss; 142 u.zeroaborts = uvmexp.zeroaborts; 143 u.fltnoram = uvmexp.fltnoram; 144 u.fltnoanon = uvmexp.fltnoanon; 145 u.fltpgwait = uvmexp.fltpgwait; 146 u.fltpgrele = uvmexp.fltpgrele; 147 u.fltrelck = uvmexp.fltrelck; 148 u.fltrelckok = uvmexp.fltrelckok; 149 u.fltanget = uvmexp.fltanget; 150 u.fltanretry = uvmexp.fltanretry; 151 u.fltamcopy = uvmexp.fltamcopy; 152 u.fltnamap = uvmexp.fltnamap; 153 u.fltnomap = uvmexp.fltnomap; 154 u.fltlget = uvmexp.fltlget; 155 u.fltget = uvmexp.fltget; 156 u.flt_anon = uvmexp.flt_anon; 157 u.flt_acow = uvmexp.flt_acow; 158 u.flt_obj = uvmexp.flt_obj; 159 u.flt_prcopy = uvmexp.flt_prcopy; 160 u.flt_przero = uvmexp.flt_przero; 161 u.pdwoke = uvmexp.pdwoke; 162 u.pdrevs = uvmexp.pdrevs; 163 u.pdfreed = uvmexp.pdfreed; 164 u.pdscans = uvmexp.pdscans; 165 u.pdanscan = uvmexp.pdanscan; 166 u.pdobscan = uvmexp.pdobscan; 167 u.pdreact = uvmexp.pdreact; 168 u.pdbusy = uvmexp.pdbusy; 169 u.pdpageouts = uvmexp.pdpageouts; 170 u.pdpending = uvmexp.pdpending; 171 u.pddeact = uvmexp.pddeact; 172 u.anonpages = uvmexp.anonpages; 173 u.filepages = uvmexp.filepages; 174 u.execpages = uvmexp.execpages; 175 u.colorhit = uvmexp.colorhit; 176 u.colormiss = uvmexp.colormiss; 177 u.cpuhit = uvmexp.cpuhit; 178 u.cpumiss = uvmexp.cpumiss; 179 180 node = *rnode; 181 node.sysctl_data = &u; 182 node.sysctl_size = sizeof(u); 183 if (oldlenp) 184 node.sysctl_size = min(*oldlenp, node.sysctl_size); 185 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 186 } 187 188 /* 189 * sysctl helper routine for uvm_pctparam. 190 */ 191 static int 192 uvm_sysctlpctparam(SYSCTLFN_ARGS) 193 { 194 int t, error; 195 struct sysctlnode node; 196 struct uvm_pctparam *pct; 197 198 pct = rnode->sysctl_data; 199 t = pct->pct_pct; 200 201 node = *rnode; 202 node.sysctl_data = &t; 203 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 204 if (error || newp == NULL) 205 return error; 206 207 if (t < 0 || t > 100) 208 return EINVAL; 209 210 error = uvm_pctparam_check(pct, t); 211 if (error) { 212 return error; 213 } 214 uvm_pctparam_set(pct, t); 215 216 return (0); 217 } 218 219 /* 220 * uvm_sysctl: sysctl hook into UVM system. 221 */ 222 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup") 223 { 224 225 sysctl_createv(clog, 0, NULL, NULL, 226 CTLFLAG_PERMANENT, 227 CTLTYPE_STRUCT, "vmmeter", 228 SYSCTL_DESCR("Simple system-wide virtual memory " 229 "statistics"), 230 sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal), 231 CTL_VM, VM_METER, CTL_EOL); 232 sysctl_createv(clog, 0, NULL, NULL, 233 CTLFLAG_PERMANENT, 234 CTLTYPE_STRUCT, "loadavg", 235 SYSCTL_DESCR("System load average history"), 236 NULL, 0, &averunnable, sizeof(averunnable), 237 CTL_VM, VM_LOADAVG, CTL_EOL); 238 sysctl_createv(clog, 0, NULL, NULL, 239 CTLFLAG_PERMANENT, 240 CTLTYPE_STRUCT, "uvmexp", 241 SYSCTL_DESCR("Detailed system-wide virtual memory " 242 "statistics"), 243 sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp), 244 CTL_VM, VM_UVMEXP, CTL_EOL); 245 sysctl_createv(clog, 0, NULL, NULL, 246 CTLFLAG_PERMANENT, 247 CTLTYPE_STRUCT, "uvmexp2", 248 SYSCTL_DESCR("Detailed system-wide virtual memory " 249 "statistics (MI)"), 250 sysctl_vm_uvmexp2, 0, NULL, 0, 251 CTL_VM, VM_UVMEXP2, CTL_EOL); 252 sysctl_createv(clog, 0, NULL, NULL, 253 CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp", 254 SYSCTL_DESCR("Maximum process sleep time before being " 255 "swapped"), 256 NULL, 0, &maxslp, 0, 257 CTL_VM, VM_MAXSLP, CTL_EOL); 258 sysctl_createv(clog, 0, NULL, NULL, 259 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 260 CTLTYPE_INT, "uspace", 261 SYSCTL_DESCR("Number of bytes allocated for a kernel " 262 "stack"), 263 NULL, USPACE, NULL, 0, 264 CTL_VM, VM_USPACE, CTL_EOL); 265 sysctl_createv(clog, 0, NULL, NULL, 266 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 267 CTLTYPE_BOOL, "idlezero", 268 SYSCTL_DESCR("Whether try to zero pages in idle loop"), 269 NULL, 0, &vm_page_zero_enable, 0, 270 CTL_VM, CTL_CREATE, CTL_EOL); 271 sysctl_createv(clog, 0, NULL, NULL, 272 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 273 CTLTYPE_LONG, "minaddress", 274 SYSCTL_DESCR("Minimum user address"), 275 NULL, VM_MIN_ADDRESS, NULL, 0, 276 CTL_VM, VM_MINADDRESS, CTL_EOL); 277 sysctl_createv(clog, 0, NULL, NULL, 278 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 279 CTLTYPE_LONG, "maxaddress", 280 SYSCTL_DESCR("Maximum user address"), 281 NULL, VM_MAX_ADDRESS, NULL, 0, 282 CTL_VM, VM_MAXADDRESS, CTL_EOL); 283 284 uvmpdpol_sysctlsetup(); 285 } 286 287 /* 288 * uvm_total: calculate the current state of the system. 289 */ 290 static void 291 uvm_total(struct vmtotal *totalp) 292 { 293 struct lwp *l; 294 #if 0 295 struct vm_map_entry * entry; 296 struct vm_map *map; 297 int paging; 298 #endif 299 int active; 300 301 memset(totalp, 0, sizeof *totalp); 302 303 /* 304 * calculate process statistics 305 */ 306 mutex_enter(proc_lock); 307 LIST_FOREACH(l, &alllwp, l_list) { 308 if (l->l_proc->p_flag & PK_SYSTEM) 309 continue; 310 switch (l->l_stat) { 311 case 0: 312 continue; 313 314 case LSSLEEP: 315 case LSSTOP: 316 if ((l->l_flag & LW_SINTR) == 0) { 317 totalp->t_dw++; 318 } else if (l->l_slptime < maxslp) { 319 totalp->t_sl++; 320 } 321 if (l->l_slptime >= maxslp) 322 continue; 323 break; 324 325 case LSRUN: 326 case LSONPROC: 327 case LSIDL: 328 totalp->t_rq++; 329 if (l->l_stat == LSIDL) 330 continue; 331 break; 332 } 333 /* 334 * note active objects 335 */ 336 #if 0 337 /* 338 * XXXCDC: BOGUS! rethink this. in the mean time 339 * don't do it. 340 */ 341 paging = 0; 342 vm_map_lock(map); 343 for (map = &p->p_vmspace->vm_map, entry = map->header.next; 344 entry != &map->header; entry = entry->next) { 345 if (entry->is_a_map || entry->is_sub_map || 346 entry->object.uvm_obj == NULL) 347 continue; 348 /* XXX how to do this with uvm */ 349 } 350 vm_map_unlock(map); 351 if (paging) 352 totalp->t_pw++; 353 #endif 354 } 355 mutex_exit(proc_lock); 356 357 /* 358 * Calculate object memory usage statistics. 359 */ 360 uvm_estimatepageable(&active, NULL); 361 totalp->t_free = uvmexp.free; 362 totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse; 363 totalp->t_avm = active + uvmexp.swpginuse; /* XXX */ 364 totalp->t_rm = uvmexp.npages - uvmexp.free; 365 totalp->t_arm = active; 366 totalp->t_vmshr = 0; /* XXX */ 367 totalp->t_avmshr = 0; /* XXX */ 368 totalp->t_rmshr = 0; /* XXX */ 369 totalp->t_armshr = 0; /* XXX */ 370 } 371 372 void 373 uvm_pctparam_set(struct uvm_pctparam *pct, int val) 374 { 375 376 pct->pct_pct = val; 377 pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100; 378 } 379 380 int 381 uvm_pctparam_get(struct uvm_pctparam *pct) 382 { 383 384 return pct->pct_pct; 385 } 386 387 int 388 uvm_pctparam_check(struct uvm_pctparam *pct, int val) 389 { 390 391 if (pct->pct_check == NULL) { 392 return 0; 393 } 394 return (*pct->pct_check)(pct, val); 395 } 396 397 void 398 uvm_pctparam_init(struct uvm_pctparam *pct, int val, 399 int (*fn)(struct uvm_pctparam *, int)) 400 { 401 402 pct->pct_check = fn; 403 uvm_pctparam_set(pct, val); 404 } 405 406 int 407 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name, 408 const char *desc) 409 { 410 411 return sysctl_createv(NULL, 0, NULL, NULL, 412 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 413 CTLTYPE_INT, name, SYSCTL_DESCR(desc), 414 uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL); 415 } 416