xref: /openbsd-src/sys/uvm/uvm_meter.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: uvm_meter.c,v 1.39 2019/12/05 15:19:33 mpi Exp $	*/
2 /*	$NetBSD: uvm_meter.c,v 1.21 2001/07/14 06:36:03 matt Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *      The Regents of the University of California.
8  *
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
36  * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/proc.h>
43 #include <sys/sysctl.h>
44 #include <sys/vmmeter.h>
45 #include <uvm/uvm.h>
46 #include <uvm/uvm_ddb.h>
47 
48 #ifdef UVM_SWAP_ENCRYPT
49 #include <uvm/uvm_swap.h>
50 #include <uvm/uvm_swap_encrypt.h>
51 #endif
52 
53 /*
54  * The time for a process to be blocked before being very swappable.
55  * This is a number of seconds which the system takes as being a non-trivial
56  * amount of real time.  You probably shouldn't change this;
57  * it is used in subtle ways (fractions and multiples of it are, that is, like
58  * half of a ``long time'', almost a long time, etc.)
59  * It is related to human patience and other factors which don't really
60  * change over time.
61  */
62 #define	MAXSLP	20
63 
64 int maxslp = MAXSLP;	/* patchable ... */
65 struct loadavg averunnable;
66 
67 /*
68  * constants for averages over 1, 5, and 15 minutes when sampling at
69  * 5 second intervals.
70  */
71 
72 static fixpt_t cexp[3] = {
73 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
74 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
75 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
76 };
77 
78 
79 static void uvm_loadav(struct loadavg *);
80 void uvm_total(struct vmtotal *);
81 
82 /*
83  * uvm_meter: calculate load average and wake up the swapper (if needed)
84  */
85 void
86 uvm_meter(void)
87 {
88 	if ((time_second % 5) == 0)
89 		uvm_loadav(&averunnable);
90 	if (proc0.p_slptime > (maxslp / 2))
91 		wakeup(&proc0);
92 }
93 
94 /*
95  * uvm_loadav: compute a tenex style load average of a quantity on
96  * 1, 5, and 15 minute intervals.
97  */
98 static void
99 uvm_loadav(struct loadavg *avg)
100 {
101 	CPU_INFO_ITERATOR cii;
102 	struct cpu_info *ci;
103 	int i, nrun;
104 	struct proc *p;
105 	int nrun_cpu[MAXCPUS];
106 
107 	nrun = 0;
108 	memset(nrun_cpu, 0, sizeof(nrun_cpu));
109 
110 	LIST_FOREACH(p, &allproc, p_list) {
111 		switch (p->p_stat) {
112 		case SSTOP:
113 		case SSLEEP:
114 			break;
115 		case SRUN:
116 		case SONPROC:
117 			if (p == p->p_cpu->ci_schedstate.spc_idleproc)
118 				continue;
119 		/* FALLTHROUGH */
120 		case SIDL:
121 			nrun++;
122 			if (p->p_cpu)
123 				nrun_cpu[CPU_INFO_UNIT(p->p_cpu)]++;
124 		}
125 	}
126 
127 	for (i = 0; i < 3; i++) {
128 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
129 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
130 	}
131 
132 	CPU_INFO_FOREACH(cii, ci) {
133 		struct schedstate_percpu *spc = &ci->ci_schedstate;
134 
135 		if (nrun_cpu[CPU_INFO_UNIT(ci)] == 0)
136 			continue;
137 		spc->spc_ldavg = (cexp[0] * spc->spc_ldavg +
138 		    nrun_cpu[CPU_INFO_UNIT(ci)] * FSCALE *
139 		    (FSCALE - cexp[0])) >> FSHIFT;
140 	}
141 }
142 
143 char malloc_conf[16];
144 
145 /*
146  * uvm_sysctl: sysctl hook into UVM system.
147  */
148 int
149 uvm_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
150     size_t newlen, struct proc *p)
151 {
152 	struct process *pr = p->p_p;
153 	struct vmtotal vmtotals;
154 	int rv, t;
155 
156 	switch (name[0]) {
157 	case VM_SWAPENCRYPT:
158 #ifdef UVM_SWAP_ENCRYPT
159 		return (swap_encrypt_ctl(name + 1, namelen - 1, oldp, oldlenp,
160 					 newp, newlen, p));
161 #else
162 		return (EOPNOTSUPP);
163 #endif
164 	default:
165 		/* all sysctl names at this level are terminal */
166 		if (namelen != 1)
167 			return (ENOTDIR);		/* overloaded */
168 		break;
169 	}
170 
171 	switch (name[0]) {
172 	case VM_LOADAVG:
173 		return (sysctl_rdstruct(oldp, oldlenp, newp, &averunnable,
174 		    sizeof(averunnable)));
175 
176 	case VM_METER:
177 		uvm_total(&vmtotals);
178 		return (sysctl_rdstruct(oldp, oldlenp, newp, &vmtotals,
179 		    sizeof(vmtotals)));
180 
181 	case VM_UVMEXP:
182 		return (sysctl_rdstruct(oldp, oldlenp, newp, &uvmexp,
183 		    sizeof(uvmexp)));
184 
185 	case VM_NKMEMPAGES:
186 		return (sysctl_rdint(oldp, oldlenp, newp, nkmempages));
187 
188 	case VM_PSSTRINGS:
189 		return (sysctl_rdstruct(oldp, oldlenp, newp, &pr->ps_strings,
190 		    sizeof(pr->ps_strings)));
191 
192 	case VM_ANONMIN:
193 		t = uvmexp.anonminpct;
194 		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
195 		if (rv) {
196 			return rv;
197 		}
198 		if (t + uvmexp.vtextminpct + uvmexp.vnodeminpct > 95 || t < 0) {
199 			return EINVAL;
200 		}
201 		uvmexp.anonminpct = t;
202 		uvmexp.anonmin = t * 256 / 100;
203 		return rv;
204 
205 	case VM_VTEXTMIN:
206 		t = uvmexp.vtextminpct;
207 		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
208 		if (rv) {
209 			return rv;
210 		}
211 		if (uvmexp.anonminpct + t + uvmexp.vnodeminpct > 95 || t < 0) {
212 			return EINVAL;
213 		}
214 		uvmexp.vtextminpct = t;
215 		uvmexp.vtextmin = t * 256 / 100;
216 		return rv;
217 
218 	case VM_VNODEMIN:
219 		t = uvmexp.vnodeminpct;
220 		rv = sysctl_int(oldp, oldlenp, newp, newlen, &t);
221 		if (rv) {
222 			return rv;
223 		}
224 		if (uvmexp.anonminpct + uvmexp.vtextminpct + t > 95 || t < 0) {
225 			return EINVAL;
226 		}
227 		uvmexp.vnodeminpct = t;
228 		uvmexp.vnodemin = t * 256 / 100;
229 		return rv;
230 
231 	case VM_MAXSLP:
232 		return (sysctl_rdint(oldp, oldlenp, newp, maxslp));
233 
234 	case VM_USPACE:
235 		return (sysctl_rdint(oldp, oldlenp, newp, USPACE));
236 
237 	case VM_MALLOC_CONF:
238 		return (sysctl_string(oldp, oldlenp, newp, newlen,
239 		    malloc_conf, sizeof(malloc_conf)));
240 	default:
241 		return (EOPNOTSUPP);
242 	}
243 	/* NOTREACHED */
244 }
245 
246 /*
247  * uvm_total: calculate the current state of the system.
248  */
249 void
250 uvm_total(struct vmtotal *totalp)
251 {
252 	struct proc *p;
253 #if 0
254 	struct vm_map_entry *	entry;
255 	struct vm_map *map;
256 	int paging;
257 #endif
258 
259 	memset(totalp, 0, sizeof *totalp);
260 
261 	/* calculate process statistics */
262 	LIST_FOREACH(p, &allproc, p_list) {
263 		switch (p->p_stat) {
264 		case 0:
265 			continue;
266 
267 		case SSLEEP:
268 		case SSTOP:
269 			totalp->t_sl++;
270 			break;
271 		case SRUN:
272 		case SONPROC:
273 			if (p == p->p_cpu->ci_schedstate.spc_idleproc)
274 				continue;
275 		case SIDL:
276 			totalp->t_rq++;
277 			if (p->p_stat == SIDL)
278 				continue;
279 			break;
280 		}
281 		/*
282 		 * note active objects
283 		 */
284 #if 0
285 		/*
286 		 * XXXCDC: BOGUS!  rethink this.   in the mean time
287 		 * don't do it.
288 		 */
289 		paging = 0;
290 		vm_map_lock(map);
291 		for (map = &p->p_vmspace->vm_map, entry = map->header.next;
292 		    entry != &map->header; entry = entry->next) {
293 			if (entry->is_a_map || entry->is_sub_map ||
294 			    entry->object.uvm_obj == NULL)
295 				continue;
296 			/* XXX how to do this with uvm */
297 		}
298 		vm_map_unlock(map);
299 		if (paging)
300 			totalp->t_pw++;
301 #endif
302 	}
303 	/*
304 	 * Calculate object memory usage statistics.
305 	 */
306 	totalp->t_free = uvmexp.free;
307 	totalp->t_vm = uvmexp.npages - uvmexp.free + uvmexp.swpginuse;
308 	totalp->t_avm = uvmexp.active + uvmexp.swpginuse;	/* XXX */
309 	totalp->t_rm = uvmexp.npages - uvmexp.free;
310 	totalp->t_arm = uvmexp.active;
311 	totalp->t_vmshr = 0;		/* XXX */
312 	totalp->t_avmshr = 0;		/* XXX */
313 	totalp->t_rmshr = 0;		/* XXX */
314 	totalp->t_armshr = 0;		/* XXX */
315 }
316 
317 #ifdef DDB
318 
319 /*
320  * uvmexp_print: ddb hook to print interesting uvm counters
321  */
322 void
323 uvmexp_print(int (*pr)(const char *, ...))
324 {
325 
326 	(*pr)("Current UVM status:\n");
327 	(*pr)("  pagesize=%d (0x%x), pagemask=0x%x, pageshift=%d\n",
328 	    uvmexp.pagesize, uvmexp.pagesize, uvmexp.pagemask,
329 	    uvmexp.pageshift);
330 	(*pr)("  %d VM pages: %d active, %d inactive, %d wired, %d free (%d zero)\n",
331 	    uvmexp.npages, uvmexp.active, uvmexp.inactive, uvmexp.wired,
332 	    uvmexp.free, uvmexp.zeropages);
333 	(*pr)("  min  %d%% (%d) anon, %d%% (%d) vnode, %d%% (%d) vtext\n",
334 	    uvmexp.anonminpct, uvmexp.anonmin, uvmexp.vnodeminpct,
335 	    uvmexp.vnodemin, uvmexp.vtextminpct, uvmexp.vtextmin);
336 	(*pr)("  freemin=%d, free-target=%d, inactive-target=%d, "
337 	    "wired-max=%d\n", uvmexp.freemin, uvmexp.freetarg, uvmexp.inactarg,
338 	    uvmexp.wiredmax);
339 	(*pr)("  faults=%d, traps=%d, intrs=%d, ctxswitch=%d fpuswitch=%d\n",
340 	    uvmexp.faults, uvmexp.traps, uvmexp.intrs, uvmexp.swtch,
341 	    uvmexp.fpswtch);
342 	(*pr)("  softint=%d, syscalls=%d, kmapent=%d\n",
343 	    uvmexp.softs, uvmexp.syscalls, uvmexp.kmapent);
344 
345 	(*pr)("  fault counts:\n");
346 	(*pr)("    noram=%d, noanon=%d, noamap=%d, pgwait=%d, pgrele=%d\n",
347 	    uvmexp.fltnoram, uvmexp.fltnoanon, uvmexp.fltnoamap,
348 	    uvmexp.fltpgwait, uvmexp.fltpgrele);
349 	(*pr)("    ok relocks(total)=%d(%d), anget(retries)=%d(%d), "
350 	    "amapcopy=%d\n", uvmexp.fltrelckok, uvmexp.fltrelck,
351 	    uvmexp.fltanget, uvmexp.fltanretry, uvmexp.fltamcopy);
352 	(*pr)("    neighbor anon/obj pg=%d/%d, gets(lock/unlock)=%d/%d\n",
353 	    uvmexp.fltnamap, uvmexp.fltnomap, uvmexp.fltlget, uvmexp.fltget);
354 	(*pr)("    cases: anon=%d, anoncow=%d, obj=%d, prcopy=%d, przero=%d\n",
355 	    uvmexp.flt_anon, uvmexp.flt_acow, uvmexp.flt_obj, uvmexp.flt_prcopy,
356 	    uvmexp.flt_przero);
357 
358 	(*pr)("  daemon and swap counts:\n");
359 	(*pr)("    woke=%d, revs=%d, scans=%d, obscans=%d, anscans=%d\n",
360 	    uvmexp.pdwoke, uvmexp.pdrevs, uvmexp.pdscans, uvmexp.pdobscan,
361 	    uvmexp.pdanscan);
362 	(*pr)("    busy=%d, freed=%d, reactivate=%d, deactivate=%d\n",
363 	    uvmexp.pdbusy, uvmexp.pdfreed, uvmexp.pdreact, uvmexp.pddeact);
364 	(*pr)("    pageouts=%d, pending=%d, nswget=%d\n", uvmexp.pdpageouts,
365 	    uvmexp.pdpending, uvmexp.nswget);
366 	(*pr)("    nswapdev=%d\n",
367 	    uvmexp.nswapdev);
368 	(*pr)("    swpages=%d, swpginuse=%d, swpgonly=%d paging=%d\n",
369 	    uvmexp.swpages, uvmexp.swpginuse, uvmexp.swpgonly, uvmexp.paging);
370 
371 	(*pr)("  kernel pointers:\n");
372 	(*pr)("    objs(kern)=%p\n", uvm.kernel_object);
373 }
374 #endif
375