1 /* $NetBSD: uvm_meter.c,v 1.65 2014/12/01 04:11:14 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1982, 1986, 1989, 1993 6 * The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vm_meter.c 8.4 (Berkeley) 1/4/94 35 * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.65 2014/12/01 04:11:14 msaitoh Exp $"); 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/cpu.h> 44 #include <sys/proc.h> 45 #include <sys/kernel.h> 46 #include <sys/sysctl.h> 47 48 #include <uvm/uvm.h> 49 #include <uvm/uvm_pdpolicy.h> 50 51 /* 52 * maxslp: ???? XXXCDC 53 */ 54 55 int maxslp = MAXSLP; /* patchable ... */ 56 struct loadavg averunnable; 57 58 static void uvm_total(struct vmtotal *); 59 60 /* 61 * sysctl helper routine for the vm.vmmeter node. 62 */ 63 static int 64 sysctl_vm_meter(SYSCTLFN_ARGS) 65 { 66 struct sysctlnode node; 67 struct vmtotal vmtotals; 68 69 node = *rnode; 70 node.sysctl_data = &vmtotals; 71 uvm_total(&vmtotals); 72 73 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 74 } 75 76 /* 77 * sysctl helper routine for the vm.uvmexp node. 78 */ 79 static int 80 sysctl_vm_uvmexp(SYSCTLFN_ARGS) 81 { 82 struct sysctlnode node; 83 84 node = *rnode; 85 if (oldlenp) 86 node.sysctl_size = min(*oldlenp, node.sysctl_size); 87 88 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 89 } 90 91 static int 92 sysctl_vm_uvmexp2(SYSCTLFN_ARGS) 93 { 94 struct sysctlnode node; 95 struct uvmexp_sysctl u; 96 int active, inactive; 97 CPU_INFO_ITERATOR cii; 98 struct cpu_info *ci; 99 100 uvm_estimatepageable(&active, &inactive); 101 102 memset(&u, 0, sizeof(u)); 103 104 /* Entries here are in order of uvmexp_sysctl, not uvmexp */ 105 u.pagesize = uvmexp.pagesize; 106 u.pagemask = uvmexp.pagemask; 107 u.pageshift = uvmexp.pageshift; 108 u.npages = uvmexp.npages; 109 u.free = uvmexp.free; 110 u.active = active; 111 u.inactive = inactive; 112 u.paging = uvmexp.paging; 113 u.wired = uvmexp.wired; 114 u.zeropages = uvmexp.zeropages; 115 u.reserve_pagedaemon = uvmexp.reserve_pagedaemon; 116 u.reserve_kernel = uvmexp.reserve_kernel; 117 u.freemin = uvmexp.freemin; 118 u.freetarg = uvmexp.freetarg; 119 u.inactarg = 0; /* unused */ 120 u.wiredmax = uvmexp.wiredmax; 121 u.nswapdev = uvmexp.nswapdev; 122 u.swpages = uvmexp.swpages; 123 u.swpginuse = uvmexp.swpginuse; 124 u.swpgonly = uvmexp.swpgonly; 125 u.nswget = uvmexp.nswget; 126 u.cpuhit = uvmexp.cpuhit; 127 u.cpumiss = uvmexp.cpumiss; 128 for (CPU_INFO_FOREACH(cii, ci)) { 129 u.faults += ci->ci_data.cpu_nfault; 130 u.traps += ci->ci_data.cpu_ntrap; 131 u.intrs += ci->ci_data.cpu_nintr; 132 u.swtch += ci->ci_data.cpu_nswtch; 133 u.softs += ci->ci_data.cpu_nsoft; 134 u.syscalls += ci->ci_data.cpu_nsyscall; 135 } 136 u.pageins = uvmexp.pageins; 137 u.pgswapin = uvmexp.pgswapin; 138 u.pgswapout = uvmexp.pgswapout; 139 u.forks = uvmexp.forks; 140 u.forks_ppwait = uvmexp.forks_ppwait; 141 u.forks_sharevm = uvmexp.forks_sharevm; 142 u.pga_zerohit = uvmexp.pga_zerohit; 143 u.pga_zeromiss = uvmexp.pga_zeromiss; 144 u.zeroaborts = uvmexp.zeroaborts; 145 u.fltnoram = uvmexp.fltnoram; 146 u.fltnoanon = uvmexp.fltnoanon; 147 u.fltpgwait = uvmexp.fltpgwait; 148 u.fltpgrele = uvmexp.fltpgrele; 149 u.fltrelck = uvmexp.fltrelck; 150 u.fltrelckok = uvmexp.fltrelckok; 151 u.fltanget = uvmexp.fltanget; 152 u.fltanretry = uvmexp.fltanretry; 153 u.fltamcopy = uvmexp.fltamcopy; 154 u.fltnamap = uvmexp.fltnamap; 155 u.fltnomap = uvmexp.fltnomap; 156 u.fltlget = uvmexp.fltlget; 157 u.fltget = uvmexp.fltget; 158 u.flt_anon = uvmexp.flt_anon; 159 u.flt_acow = uvmexp.flt_acow; 160 u.flt_obj = uvmexp.flt_obj; 161 u.flt_prcopy = uvmexp.flt_prcopy; 162 u.flt_przero = uvmexp.flt_przero; 163 u.pdwoke = uvmexp.pdwoke; 164 u.pdrevs = uvmexp.pdrevs; 165 u.pdfreed = uvmexp.pdfreed; 166 u.pdscans = uvmexp.pdscans; 167 u.pdanscan = uvmexp.pdanscan; 168 u.pdobscan = uvmexp.pdobscan; 169 u.pdreact = uvmexp.pdreact; 170 u.pdbusy = uvmexp.pdbusy; 171 u.pdpageouts = uvmexp.pdpageouts; 172 u.pdpending = uvmexp.pdpending; 173 u.pddeact = uvmexp.pddeact; 174 u.anonpages = uvmexp.anonpages; 175 u.filepages = uvmexp.filepages; 176 u.execpages = uvmexp.execpages; 177 u.colorhit = uvmexp.colorhit; 178 u.colormiss = uvmexp.colormiss; 179 u.ncolors = uvmexp.ncolors; 180 181 node = *rnode; 182 node.sysctl_data = &u; 183 node.sysctl_size = sizeof(u); 184 if (oldlenp) 185 node.sysctl_size = min(*oldlenp, node.sysctl_size); 186 return (sysctl_lookup(SYSCTLFN_CALL(&node))); 187 } 188 189 /* 190 * sysctl helper routine for uvm_pctparam. 191 */ 192 static int 193 uvm_sysctlpctparam(SYSCTLFN_ARGS) 194 { 195 int t, error; 196 struct sysctlnode node; 197 struct uvm_pctparam *pct; 198 199 pct = rnode->sysctl_data; 200 t = pct->pct_pct; 201 202 node = *rnode; 203 node.sysctl_data = &t; 204 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 205 if (error || newp == NULL) 206 return error; 207 208 if (t < 0 || t > 100) 209 return EINVAL; 210 211 error = uvm_pctparam_check(pct, t); 212 if (error) { 213 return error; 214 } 215 uvm_pctparam_set(pct, t); 216 217 return (0); 218 } 219 220 /* 221 * uvm_sysctl: sysctl hook into UVM system. 222 */ 223 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup") 224 { 225 226 sysctl_createv(clog, 0, NULL, NULL, 227 CTLFLAG_PERMANENT, 228 CTLTYPE_STRUCT, "vmmeter", 229 SYSCTL_DESCR("Simple system-wide virtual memory " 230 "statistics"), 231 sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal), 232 CTL_VM, VM_METER, CTL_EOL); 233 sysctl_createv(clog, 0, NULL, NULL, 234 CTLFLAG_PERMANENT, 235 CTLTYPE_STRUCT, "loadavg", 236 SYSCTL_DESCR("System load average history"), 237 NULL, 0, &averunnable, sizeof(averunnable), 238 CTL_VM, VM_LOADAVG, CTL_EOL); 239 sysctl_createv(clog, 0, NULL, NULL, 240 CTLFLAG_PERMANENT, 241 CTLTYPE_STRUCT, "uvmexp", 242 SYSCTL_DESCR("Detailed system-wide virtual memory " 243 "statistics"), 244 sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp), 245 CTL_VM, VM_UVMEXP, CTL_EOL); 246 sysctl_createv(clog, 0, NULL, NULL, 247 CTLFLAG_PERMANENT, 248 CTLTYPE_STRUCT, "uvmexp2", 249 SYSCTL_DESCR("Detailed system-wide virtual memory " 250 "statistics (MI)"), 251 sysctl_vm_uvmexp2, 0, NULL, 0, 252 CTL_VM, VM_UVMEXP2, CTL_EOL); 253 sysctl_createv(clog, 0, NULL, NULL, 254 CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp", 255 SYSCTL_DESCR("Maximum process sleep time before being " 256 "swapped"), 257 NULL, 0, &maxslp, 0, 258 CTL_VM, VM_MAXSLP, CTL_EOL); 259 sysctl_createv(clog, 0, NULL, NULL, 260 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 261 CTLTYPE_INT, "uspace", 262 SYSCTL_DESCR("Number of bytes allocated for a kernel " 263 "stack"), 264 NULL, USPACE, NULL, 0, 265 CTL_VM, VM_USPACE, CTL_EOL); 266 sysctl_createv(clog, 0, NULL, NULL, 267 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 268 CTLTYPE_BOOL, "idlezero", 269 SYSCTL_DESCR("Whether try to zero pages in idle loop"), 270 NULL, 0, &vm_page_zero_enable, 0, 271 CTL_VM, CTL_CREATE, CTL_EOL); 272 sysctl_createv(clog, 0, NULL, NULL, 273 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 274 CTLTYPE_LONG, "minaddress", 275 SYSCTL_DESCR("Minimum user address"), 276 NULL, VM_MIN_ADDRESS, NULL, 0, 277 CTL_VM, VM_MINADDRESS, CTL_EOL); 278 sysctl_createv(clog, 0, NULL, NULL, 279 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE, 280 CTLTYPE_LONG, "maxaddress", 281 SYSCTL_DESCR("Maximum user address"), 282 NULL, VM_MAX_ADDRESS, NULL, 0, 283 CTL_VM, VM_MAXADDRESS, CTL_EOL); 284 285 uvmpdpol_sysctlsetup(); 286 } 287 288 /* 289 * uvm_total: calculate the current state of the system. 290 */ 291 static void 292 uvm_total(struct vmtotal *totalp) 293 { 294 struct lwp *l; 295 #if 0 296 struct vm_map_entry * entry; 297 struct vm_map *map; 298 int paging; 299 #endif 300 int active; 301 302 memset(totalp, 0, sizeof *totalp); 303 304 /* 305 * calculate process statistics 306 */ 307 mutex_enter(proc_lock); 308 LIST_FOREACH(l, &alllwp, l_list) { 309 if (l->l_proc->p_flag & PK_SYSTEM) 310 continue; 311 switch (l->l_stat) { 312 case 0: 313 continue; 314 315 case LSSLEEP: 316 case LSSTOP: 317 if ((l->l_flag & LW_SINTR) == 0) { 318 totalp->t_dw++; 319 } else if (l->l_slptime < maxslp) { 320 totalp->t_sl++; 321 } 322 if (l->l_slptime >= maxslp) 323 continue; 324 break; 325 326 case LSRUN: 327 case LSONPROC: 328 case LSIDL: 329 totalp->t_rq++; 330 if (l->l_stat == LSIDL) 331 continue; 332 break; 333 } 334 /* 335 * note active objects 336 */ 337 #if 0 338 /* 339 * XXXCDC: BOGUS! rethink this. in the mean time 340 * don't do it. 341 */ 342 paging = 0; 343 vm_map_lock(map); 344 for (map = &p->p_vmspace->vm_map, entry = map->header.next; 345 entry != &map->header; entry = entry->next) { 346 if (entry->is_a_map || entry->is_sub_map || 347 entry->object.uvm_obj == NULL) 348 continue; 349 /* XXX how to do this with uvm */ 350 } 351 vm_map_unlock(map); 352 if (paging) 353 totalp->t_pw++; 354 #endif 355 } 356 mutex_exit(proc_lock); 357 358 /* 359 * Calculate object memory usage statistics. 360 */ 361 uvm_estimatepageable(&active, NULL); 362 totalp->t_free = uvmexp.free; 363 totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse; 364 totalp->t_avm = active + uvmexp.swpginuse; /* XXX */ 365 totalp->t_rm = uvmexp.npages - uvmexp.free; 366 totalp->t_arm = active; 367 totalp->t_vmshr = 0; /* XXX */ 368 totalp->t_avmshr = 0; /* XXX */ 369 totalp->t_rmshr = 0; /* XXX */ 370 totalp->t_armshr = 0; /* XXX */ 371 } 372 373 void 374 uvm_pctparam_set(struct uvm_pctparam *pct, int val) 375 { 376 377 pct->pct_pct = val; 378 pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100; 379 } 380 381 int 382 uvm_pctparam_get(struct uvm_pctparam *pct) 383 { 384 385 return pct->pct_pct; 386 } 387 388 int 389 uvm_pctparam_check(struct uvm_pctparam *pct, int val) 390 { 391 392 if (pct->pct_check == NULL) { 393 return 0; 394 } 395 return (*pct->pct_check)(pct, val); 396 } 397 398 void 399 uvm_pctparam_init(struct uvm_pctparam *pct, int val, 400 int (*fn)(struct uvm_pctparam *, int)) 401 { 402 403 pct->pct_check = fn; 404 uvm_pctparam_set(pct, val); 405 } 406 407 int 408 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name, 409 const char *desc) 410 { 411 412 return sysctl_createv(NULL, 0, NULL, NULL, 413 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 414 CTLTYPE_INT, name, SYSCTL_DESCR(desc), 415 uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL); 416 } 417