xref: /netbsd-src/sys/uvm/uvm_meter.c (revision e6c7e151de239c49d2e38720a061ed9d1fa99309)
1 /*	$NetBSD: uvm_meter.c,v 1.76 2020/03/22 18:32:42 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *      The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
35  * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.76 2020/03/22 18:32:42 ad Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/cpu.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_pdpolicy.h>
50 
51 /*
52  * maxslp: ???? XXXCDC
53  */
54 
55 int maxslp = MAXSLP;	/* patchable ... */
56 struct loadavg averunnable;
57 
58 static void uvm_total(struct vmtotal *);
59 
60 /*
61  * sysctl helper routine for the vm.vmmeter node.
62  */
63 static int
64 sysctl_vm_meter(SYSCTLFN_ARGS)
65 {
66 	struct sysctlnode node;
67 	struct vmtotal vmtotals;
68 
69 	node = *rnode;
70 	node.sysctl_data = &vmtotals;
71 	uvm_total(&vmtotals);
72 
73 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
74 }
75 
76 /*
77  * sysctl helper routine for the vm.uvmexp node.
78  */
79 static int
80 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
81 {
82 	struct sysctlnode node;
83 
84 	uvm_update_uvmexp();
85 
86 	node = *rnode;
87 	if (oldlenp)
88 		node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
89 
90 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
91 }
92 
93 static int
94 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
95 {
96 	struct sysctlnode node;
97 	struct uvmexp_sysctl u;
98 	int active, inactive;
99 
100 	cpu_count_sync_all();
101 	uvm_estimatepageable(&active, &inactive);
102 
103 	memset(&u, 0, sizeof(u));
104 
105 	/* Entries here are in order of uvmexp_sysctl, not uvmexp */
106 	u.pagesize = uvmexp.pagesize;
107 	u.pagemask = uvmexp.pagemask;
108 	u.pageshift = uvmexp.pageshift;
109 	u.npages = uvmexp.npages;
110 	u.free = uvm_availmem();
111 	u.active = active;
112 	u.inactive = inactive;
113 	u.paging = uvmexp.paging;
114 	u.wired = uvmexp.wired;
115 	u.zeropages = cpu_count_get(CPU_COUNT_ZEROPAGES);
116 	u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
117 	u.reserve_kernel = uvmexp.reserve_kernel;
118 	u.freemin = uvmexp.freemin;
119 	u.freetarg = uvmexp.freetarg;
120 	u.inactarg = 0; /* unused */
121 	u.wiredmax = uvmexp.wiredmax;
122 	u.nswapdev = uvmexp.nswapdev;
123 	u.swpages = uvmexp.swpages;
124 	u.swpginuse = uvmexp.swpginuse;
125 	u.swpgonly = uvmexp.swpgonly;
126 	u.nswget = uvmexp.nswget;
127 	u.cpuhit = cpu_count_get(CPU_COUNT_CPUHIT);
128 	u.cpumiss = cpu_count_get(CPU_COUNT_CPUMISS);
129 	u.faults = cpu_count_get(CPU_COUNT_NFAULT);
130 	u.traps = cpu_count_get(CPU_COUNT_NTRAP);
131 	u.intrs = cpu_count_get(CPU_COUNT_NINTR);
132 	u.swtch = cpu_count_get(CPU_COUNT_NSWTCH);
133 	u.softs = cpu_count_get(CPU_COUNT_NSOFT);
134 	u.syscalls = cpu_count_get(CPU_COUNT_NSYSCALL);
135 	u.pageins = cpu_count_get(CPU_COUNT_PAGEINS);
136 	u.pgswapin = 0; /* unused */
137 	u.pgswapout = uvmexp.pgswapout;
138 	u.forks = cpu_count_get(CPU_COUNT_FORKS);
139 	u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
140 	u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
141 	u.pga_zerohit = cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
142 	u.pga_zeromiss = cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
143 	u.zeroaborts = uvmexp.zeroaborts;
144 	u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
145 	u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
146 	u.fltpgwait = cpu_count_get(CPU_COUNT_FLTPGWAIT);
147 	u.fltpgrele = cpu_count_get(CPU_COUNT_FLTPGRELE);
148 	u.fltrelck = cpu_count_get(CPU_COUNT_FLTRELCK);
149 	u.fltrelckok = cpu_count_get(CPU_COUNT_FLTRELCKOK);
150 	u.fltanget = cpu_count_get(CPU_COUNT_FLTANGET);
151 	u.fltanretry = cpu_count_get(CPU_COUNT_FLTANRETRY);
152 	u.fltamcopy = cpu_count_get(CPU_COUNT_FLTAMCOPY);
153 	u.fltnamap = cpu_count_get(CPU_COUNT_FLTNAMAP);
154 	u.fltnomap = cpu_count_get(CPU_COUNT_FLTNOMAP);
155 	u.fltlget = cpu_count_get(CPU_COUNT_FLTLGET);
156 	u.fltget = cpu_count_get(CPU_COUNT_FLTGET);
157 	u.flt_anon = cpu_count_get(CPU_COUNT_FLT_ANON);
158 	u.flt_acow = cpu_count_get(CPU_COUNT_FLT_ACOW);
159 	u.flt_obj = cpu_count_get(CPU_COUNT_FLT_OBJ);
160 	u.flt_prcopy = cpu_count_get(CPU_COUNT_FLT_PRCOPY);
161 	u.flt_przero = cpu_count_get(CPU_COUNT_FLT_PRZERO);
162 	u.pdwoke = uvmexp.pdwoke;
163 	u.pdrevs = uvmexp.pdrevs;
164 	u.pdfreed = uvmexp.pdfreed;
165 	u.pdscans = uvmexp.pdscans;
166 	u.pdanscan = uvmexp.pdanscan;
167 	u.pdobscan = uvmexp.pdobscan;
168 	u.pdreact = uvmexp.pdreact;
169 	u.pdbusy = uvmexp.pdbusy;
170 	u.pdpageouts = uvmexp.pdpageouts;
171 	u.pdpending = uvmexp.pdpending;
172 	u.pddeact = uvmexp.pddeact;
173 	u.anonpages = cpu_count_get(CPU_COUNT_ANONPAGES);
174 	u.filepages = cpu_count_get(CPU_COUNT_FILEPAGES);
175 	u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
176 	u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
177 	u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
178 	u.ncolors = uvmexp.ncolors;
179 	u.bootpages = uvmexp.bootpages;
180 	u.poolpages = pool_totalpages();
181 	u.countsyncone = cpu_count_get(CPU_COUNT_SYNC_ONE);
182 	u.countsyncall = cpu_count_get(CPU_COUNT_SYNC_ALL);
183 	u.anonunknown = cpu_count_get(CPU_COUNT_ANONUNKNOWN);
184 	u.anonclean = cpu_count_get(CPU_COUNT_ANONCLEAN);
185 	u.anondirty = cpu_count_get(CPU_COUNT_ANONDIRTY);
186 	u.fileunknown = cpu_count_get(CPU_COUNT_FILEUNKNOWN);
187 	u.fileclean = cpu_count_get(CPU_COUNT_FILECLEAN);
188 	u.filedirty = cpu_count_get(CPU_COUNT_FILEDIRTY);
189 	u.fltup = cpu_count_get(CPU_COUNT_FLTUP);
190 	u.fltnoup = cpu_count_get(CPU_COUNT_FLTNOUP);
191 
192 	node = *rnode;
193 	node.sysctl_data = &u;
194 	node.sysctl_size = sizeof(u);
195 	if (oldlenp)
196 		node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
197 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
198 }
199 
200 /*
201  * sysctl helper routine for uvm_pctparam.
202  */
203 static int
204 uvm_sysctlpctparam(SYSCTLFN_ARGS)
205 {
206 	int t, error;
207 	struct sysctlnode node;
208 	struct uvm_pctparam *pct;
209 
210 	pct = rnode->sysctl_data;
211 	t = pct->pct_pct;
212 
213 	node = *rnode;
214 	node.sysctl_data = &t;
215 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
216 	if (error || newp == NULL)
217 		return error;
218 
219 	if (t < 0 || t > 100)
220 		return EINVAL;
221 
222 	error = uvm_pctparam_check(pct, t);
223 	if (error) {
224 		return error;
225 	}
226 	uvm_pctparam_set(pct, t);
227 
228 	return (0);
229 }
230 
231 /*
232  * uvm_sysctl: sysctl hook into UVM system.
233  */
234 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
235 {
236 
237 	sysctl_createv(clog, 0, NULL, NULL,
238 		       CTLFLAG_PERMANENT,
239 		       CTLTYPE_STRUCT, "vmmeter",
240 		       SYSCTL_DESCR("Simple system-wide virtual memory "
241 				    "statistics"),
242 		       sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
243 		       CTL_VM, VM_METER, CTL_EOL);
244 	sysctl_createv(clog, 0, NULL, NULL,
245 		       CTLFLAG_PERMANENT,
246 		       CTLTYPE_STRUCT, "loadavg",
247 		       SYSCTL_DESCR("System load average history"),
248 		       NULL, 0, &averunnable, sizeof(averunnable),
249 		       CTL_VM, VM_LOADAVG, CTL_EOL);
250 	sysctl_createv(clog, 0, NULL, NULL,
251 		       CTLFLAG_PERMANENT,
252 		       CTLTYPE_STRUCT, "uvmexp",
253 		       SYSCTL_DESCR("Detailed system-wide virtual memory "
254 				    "statistics"),
255 		       sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
256 		       CTL_VM, VM_UVMEXP, CTL_EOL);
257 	sysctl_createv(clog, 0, NULL, NULL,
258 		       CTLFLAG_PERMANENT,
259 		       CTLTYPE_STRUCT, "uvmexp2",
260 		       SYSCTL_DESCR("Detailed system-wide virtual memory "
261 				    "statistics (MI)"),
262 		       sysctl_vm_uvmexp2, 0, NULL, 0,
263 		       CTL_VM, VM_UVMEXP2, CTL_EOL);
264 	sysctl_createv(clog, 0, NULL, NULL,
265 		       CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
266 		       SYSCTL_DESCR("Maximum process sleep time before being "
267 				    "swapped"),
268 		       NULL, 0, &maxslp, 0,
269 		       CTL_VM, VM_MAXSLP, CTL_EOL);
270 	sysctl_createv(clog, 0, NULL, NULL,
271 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
272 		       CTLTYPE_INT, "uspace",
273 		       SYSCTL_DESCR("Number of bytes allocated for a kernel "
274 				    "stack"),
275 		       NULL, USPACE, NULL, 0,
276 		       CTL_VM, VM_USPACE, CTL_EOL);
277 	sysctl_createv(clog, 0, NULL, NULL,
278 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
279 		       CTLTYPE_BOOL, "idlezero",
280 		       SYSCTL_DESCR("Whether try to zero pages in idle loop"),
281 		       NULL, 0, &vm_page_zero_enable, 0,
282 		       CTL_VM, CTL_CREATE, CTL_EOL);
283 	sysctl_createv(clog, 0, NULL, NULL,
284 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
285 		       CTLTYPE_LONG, "minaddress",
286 		       SYSCTL_DESCR("Minimum user address"),
287 		       NULL, VM_MIN_ADDRESS, NULL, 0,
288 		       CTL_VM, VM_MINADDRESS, CTL_EOL);
289 	sysctl_createv(clog, 0, NULL, NULL,
290 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
291 		       CTLTYPE_LONG, "maxaddress",
292 		       SYSCTL_DESCR("Maximum user address"),
293 		       NULL, VM_MAX_ADDRESS, NULL, 0,
294 		       CTL_VM, VM_MAXADDRESS, CTL_EOL);
295 	sysctl_createv(clog, 0, NULL, NULL,
296 		       CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED,
297 		       CTLTYPE_INT, "guard_size",
298 		       SYSCTL_DESCR("Guard size of main thread"),
299 		       NULL, 0, &user_stack_guard_size, 0,
300 		       CTL_VM, VM_GUARD_SIZE, CTL_EOL);
301 	sysctl_createv(clog, 0, NULL, NULL,
302 		       CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED|CTLFLAG_READWRITE,
303 		       CTLTYPE_INT, "thread_guard_size",
304 		       SYSCTL_DESCR("Guard size of other threads"),
305 		       NULL, 0, &user_thread_stack_guard_size, 0,
306 		       CTL_VM, VM_THREAD_GUARD_SIZE, CTL_EOL);
307 #ifdef PMAP_DIRECT
308 	sysctl_createv(clog, 0, NULL, NULL,
309 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
310 		       CTLTYPE_BOOL, "ubc_direct",
311 		       SYSCTL_DESCR("Use direct map for UBC I/O"),
312 		       NULL, 0, &ubc_direct, 0,
313 		       CTL_VM, CTL_CREATE, CTL_EOL);
314 #endif
315 
316 	uvmpdpol_sysctlsetup();
317 }
318 
319 /*
320  * uvm_total: calculate the current state of the system.
321  */
322 static void
323 uvm_total(struct vmtotal *totalp)
324 {
325 	struct lwp *l;
326 #if 0
327 	struct vm_map_entry *	entry;
328 	struct vm_map *map;
329 	int paging;
330 #endif
331 	int freepg;
332 	int active;
333 
334 	memset(totalp, 0, sizeof *totalp);
335 
336 	/*
337 	 * calculate process statistics
338 	 */
339 	mutex_enter(proc_lock);
340 	LIST_FOREACH(l, &alllwp, l_list) {
341 		if (l->l_proc->p_flag & PK_SYSTEM)
342 			continue;
343 		switch (l->l_stat) {
344 		case 0:
345 			continue;
346 
347 		case LSSLEEP:
348 		case LSSTOP:
349 			if ((l->l_flag & LW_SINTR) == 0) {
350 				totalp->t_dw++;
351 			} else if (l->l_slptime < maxslp) {
352 				totalp->t_sl++;
353 			}
354 			if (l->l_slptime >= maxslp)
355 				continue;
356 			break;
357 
358 		case LSRUN:
359 		case LSONPROC:
360 		case LSIDL:
361 			totalp->t_rq++;
362 			if (l->l_stat == LSIDL)
363 				continue;
364 			break;
365 		}
366 		/*
367 		 * note active objects
368 		 */
369 #if 0
370 		/*
371 		 * XXXCDC: BOGUS!  rethink this.  in the mean time
372 		 * don't do it.
373 		 */
374 		paging = 0;
375 		vm_map_lock(map);
376 		for (map = &p->p_vmspace->vm_map, entry = map->header.next;
377 		    entry != &map->header; entry = entry->next) {
378 			if (entry->is_a_map || entry->is_sub_map ||
379 			    entry->object.uvm_obj == NULL)
380 				continue;
381 			/* XXX how to do this with uvm */
382 		}
383 		vm_map_unlock(map);
384 		if (paging)
385 			totalp->t_pw++;
386 #endif
387 	}
388 	mutex_exit(proc_lock);
389 
390 	/*
391 	 * Calculate object memory usage statistics.
392 	 */
393 	freepg = uvm_availmem();
394 	uvm_estimatepageable(&active, NULL);
395 	totalp->t_free = freepg;
396 	totalp->t_vm = uvmexp.npages - freepg + uvmexp.swpginuse;
397 	totalp->t_avm = active + uvmexp.swpginuse;	/* XXX */
398 	totalp->t_rm = uvmexp.npages - freepg;
399 	totalp->t_arm = active;
400 	totalp->t_vmshr = 0;		/* XXX */
401 	totalp->t_avmshr = 0;		/* XXX */
402 	totalp->t_rmshr = 0;		/* XXX */
403 	totalp->t_armshr = 0;		/* XXX */
404 }
405 
406 void
407 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
408 {
409 
410 	pct->pct_pct = val;
411 	pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
412 }
413 
414 int
415 uvm_pctparam_get(struct uvm_pctparam *pct)
416 {
417 
418 	return pct->pct_pct;
419 }
420 
421 int
422 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
423 {
424 
425 	if (pct->pct_check == NULL) {
426 		return 0;
427 	}
428 	return (*pct->pct_check)(pct, val);
429 }
430 
431 void
432 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
433     int (*fn)(struct uvm_pctparam *, int))
434 {
435 
436 	pct->pct_check = fn;
437 	uvm_pctparam_set(pct, val);
438 }
439 
440 int
441 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
442     const char *desc)
443 {
444 
445 	return sysctl_createv(NULL, 0, NULL, NULL,
446 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
447 	    CTLTYPE_INT, name, SYSCTL_DESCR(desc),
448 	    uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
449 }
450 
451 /*
452  * Update uvmexp with aggregate values from the per-CPU counters.
453  */
454 void
455 uvm_update_uvmexp(void)
456 {
457 
458 	cpu_count_sync_all();
459 
460 	uvmexp.free = (int)uvm_availmem();
461 	uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
462 	uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
463 	uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
464 	uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
465 	uvmexp.traps = (int)cpu_count_get(CPU_COUNT_NTRAP);
466 	uvmexp.intrs = (int)cpu_count_get(CPU_COUNT_NINTR);
467 	uvmexp.swtch = (int)cpu_count_get(CPU_COUNT_NSWTCH);
468 	uvmexp.softs = (int)cpu_count_get(CPU_COUNT_NSOFT);
469 	uvmexp.syscalls = (int)cpu_count_get(CPU_COUNT_NSYSCALL);
470 	uvmexp.pageins = (int)cpu_count_get(CPU_COUNT_PAGEINS);
471 	uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
472 	uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
473 	uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
474 	uvmexp.pga_zerohit = (int)cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
475 	uvmexp.pga_zeromiss = (int)cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
476 	uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
477 	uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
478 	uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
479 	uvmexp.fltpgrele = (int)cpu_count_get(CPU_COUNT_FLTPGRELE);
480 	uvmexp.fltrelck = (int)cpu_count_get(CPU_COUNT_FLTRELCK);
481 	uvmexp.fltrelckok = (int)cpu_count_get(CPU_COUNT_FLTRELCKOK);
482 	uvmexp.fltanget = (int)cpu_count_get(CPU_COUNT_FLTANGET);
483 	uvmexp.fltanretry = (int)cpu_count_get(CPU_COUNT_FLTANRETRY);
484 	uvmexp.fltamcopy = (int)cpu_count_get(CPU_COUNT_FLTAMCOPY);
485 	uvmexp.fltnamap = (int)cpu_count_get(CPU_COUNT_FLTNAMAP);
486 	uvmexp.fltnomap = (int)cpu_count_get(CPU_COUNT_FLTNOMAP);
487 	uvmexp.fltlget = (int)cpu_count_get(CPU_COUNT_FLTLGET);
488 	uvmexp.fltget = (int)cpu_count_get(CPU_COUNT_FLTGET);
489 	uvmexp.flt_anon = (int)cpu_count_get(CPU_COUNT_FLT_ANON);
490 	uvmexp.flt_acow = (int)cpu_count_get(CPU_COUNT_FLT_ACOW);
491 	uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
492 	uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
493 	uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
494 	uvmexp.anonpages = (int)cpu_count_get(CPU_COUNT_ANONPAGES);
495 	uvmexp.filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES);
496 	uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
497 	uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
498 	uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);
499 }
500