xref: /netbsd-src/sys/uvm/uvm_meter.c (revision 5dd36a3bc8bf2a9dec29ceb6349550414570c447)
1 /*	$NetBSD: uvm_meter.c,v 1.74 2020/01/15 17:55:45 ad Exp $	*/
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *      The Regents of the University of California.
7  *
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *      @(#)vm_meter.c  8.4 (Berkeley) 1/4/94
35  * from: Id: uvm_meter.c,v 1.1.2.1 1997/08/14 19:10:35 chuck Exp
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: uvm_meter.c,v 1.74 2020/01/15 17:55:45 ad Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/cpu.h>
44 #include <sys/proc.h>
45 #include <sys/kernel.h>
46 #include <sys/sysctl.h>
47 
48 #include <uvm/uvm.h>
49 #include <uvm/uvm_pdpolicy.h>
50 
51 /*
52  * maxslp: ???? XXXCDC
53  */
54 
55 int maxslp = MAXSLP;	/* patchable ... */
56 struct loadavg averunnable;
57 
58 static void uvm_total(struct vmtotal *);
59 
60 /*
61  * sysctl helper routine for the vm.vmmeter node.
62  */
63 static int
64 sysctl_vm_meter(SYSCTLFN_ARGS)
65 {
66 	struct sysctlnode node;
67 	struct vmtotal vmtotals;
68 
69 	node = *rnode;
70 	node.sysctl_data = &vmtotals;
71 	uvm_total(&vmtotals);
72 
73 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
74 }
75 
76 /*
77  * sysctl helper routine for the vm.uvmexp node.
78  */
79 static int
80 sysctl_vm_uvmexp(SYSCTLFN_ARGS)
81 {
82 	struct sysctlnode node;
83 
84 	uvm_update_uvmexp();
85 
86 	node = *rnode;
87 	if (oldlenp)
88 		node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
89 
90 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
91 }
92 
93 static int
94 sysctl_vm_uvmexp2(SYSCTLFN_ARGS)
95 {
96 	struct sysctlnode node;
97 	struct uvmexp_sysctl u;
98 	int active, inactive;
99 
100 	cpu_count_sync_all();
101 	uvm_estimatepageable(&active, &inactive);
102 
103 	memset(&u, 0, sizeof(u));
104 
105 	/* Entries here are in order of uvmexp_sysctl, not uvmexp */
106 	u.pagesize = uvmexp.pagesize;
107 	u.pagemask = uvmexp.pagemask;
108 	u.pageshift = uvmexp.pageshift;
109 	u.npages = uvmexp.npages;
110 	u.free = uvm_availmem();
111 	u.active = active;
112 	u.inactive = inactive;
113 	u.paging = uvmexp.paging;
114 	u.wired = uvmexp.wired;
115 	u.zeropages = cpu_count_get(CPU_COUNT_ZEROPAGES);
116 	u.reserve_pagedaemon = uvmexp.reserve_pagedaemon;
117 	u.reserve_kernel = uvmexp.reserve_kernel;
118 	u.freemin = uvmexp.freemin;
119 	u.freetarg = uvmexp.freetarg;
120 	u.inactarg = 0; /* unused */
121 	u.wiredmax = uvmexp.wiredmax;
122 	u.nswapdev = uvmexp.nswapdev;
123 	u.swpages = uvmexp.swpages;
124 	u.swpginuse = uvmexp.swpginuse;
125 	u.swpgonly = uvmexp.swpgonly;
126 	u.nswget = uvmexp.nswget;
127 	u.cpuhit = cpu_count_get(CPU_COUNT_CPUHIT);
128 	u.cpumiss = cpu_count_get(CPU_COUNT_CPUMISS);
129 	u.faults = cpu_count_get(CPU_COUNT_NFAULT);
130 	u.traps = cpu_count_get(CPU_COUNT_NTRAP);
131 	u.intrs = cpu_count_get(CPU_COUNT_NINTR);
132 	u.swtch = cpu_count_get(CPU_COUNT_NSWTCH);
133 	u.softs = cpu_count_get(CPU_COUNT_NSOFT);
134 	u.syscalls = cpu_count_get(CPU_COUNT_NSYSCALL);
135 	u.pageins = cpu_count_get(CPU_COUNT_PAGEINS);
136 	u.pgswapin = 0; /* unused */
137 	u.pgswapout = uvmexp.pgswapout;
138 	u.forks = cpu_count_get(CPU_COUNT_FORKS);
139 	u.forks_ppwait = cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
140 	u.forks_sharevm = cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
141 	u.pga_zerohit = cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
142 	u.pga_zeromiss = cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
143 	u.zeroaborts = uvmexp.zeroaborts;
144 	u.fltnoram = cpu_count_get(CPU_COUNT_FLTNORAM);
145 	u.fltnoanon = cpu_count_get(CPU_COUNT_FLTNOANON);
146 	u.fltpgwait = cpu_count_get(CPU_COUNT_FLTPGWAIT);
147 	u.fltpgrele = cpu_count_get(CPU_COUNT_FLTPGRELE);
148 	u.fltrelck = cpu_count_get(CPU_COUNT_FLTRELCK);
149 	u.fltrelckok = cpu_count_get(CPU_COUNT_FLTRELCKOK);
150 	u.fltanget = cpu_count_get(CPU_COUNT_FLTANGET);
151 	u.fltanretry = cpu_count_get(CPU_COUNT_FLTANRETRY);
152 	u.fltamcopy = cpu_count_get(CPU_COUNT_FLTAMCOPY);
153 	u.fltnamap = cpu_count_get(CPU_COUNT_FLTNAMAP);
154 	u.fltnomap = cpu_count_get(CPU_COUNT_FLTNOMAP);
155 	u.fltlget = cpu_count_get(CPU_COUNT_FLTLGET);
156 	u.fltget = cpu_count_get(CPU_COUNT_FLTGET);
157 	u.flt_anon = cpu_count_get(CPU_COUNT_FLT_ANON);
158 	u.flt_acow = cpu_count_get(CPU_COUNT_FLT_ACOW);
159 	u.flt_obj = cpu_count_get(CPU_COUNT_FLT_OBJ);
160 	u.flt_prcopy = cpu_count_get(CPU_COUNT_FLT_PRCOPY);
161 	u.flt_przero = cpu_count_get(CPU_COUNT_FLT_PRZERO);
162 	u.pdwoke = uvmexp.pdwoke;
163 	u.pdrevs = uvmexp.pdrevs;
164 	u.pdfreed = uvmexp.pdfreed;
165 	u.pdscans = uvmexp.pdscans;
166 	u.pdanscan = uvmexp.pdanscan;
167 	u.pdobscan = uvmexp.pdobscan;
168 	u.pdreact = uvmexp.pdreact;
169 	u.pdbusy = uvmexp.pdbusy;
170 	u.pdpageouts = uvmexp.pdpageouts;
171 	u.pdpending = uvmexp.pdpending;
172 	u.pddeact = uvmexp.pddeact;
173 	u.anonpages = cpu_count_get(CPU_COUNT_ANONPAGES);
174 	u.filepages = cpu_count_get(CPU_COUNT_FILEPAGES);
175 	u.execpages = cpu_count_get(CPU_COUNT_EXECPAGES);
176 	u.colorhit = cpu_count_get(CPU_COUNT_COLORHIT);
177 	u.colormiss = cpu_count_get(CPU_COUNT_COLORMISS);
178 	u.ncolors = uvmexp.ncolors;
179 	u.bootpages = uvmexp.bootpages;
180 	u.poolpages = pool_totalpages();
181 	u.countsyncone = cpu_count_get(CPU_COUNT_SYNC_ONE);
182 	u.countsyncall = cpu_count_get(CPU_COUNT_SYNC_ALL);
183 	u.anonunknown = (int)cpu_count_get(CPU_COUNT_ANONUNKNOWN);
184 	u.anonclean = (int)cpu_count_get(CPU_COUNT_ANONCLEAN);
185 	u.anondirty = (int)cpu_count_get(CPU_COUNT_ANONDIRTY);
186 	u.fileunknown = (int)cpu_count_get(CPU_COUNT_FILEUNKNOWN);
187 	u.fileclean = (int)cpu_count_get(CPU_COUNT_FILECLEAN);
188 	u.filedirty = (int)cpu_count_get(CPU_COUNT_FILEDIRTY);
189 
190 	node = *rnode;
191 	node.sysctl_data = &u;
192 	node.sysctl_size = sizeof(u);
193 	if (oldlenp)
194 		node.sysctl_size = uimin(*oldlenp, node.sysctl_size);
195 	return (sysctl_lookup(SYSCTLFN_CALL(&node)));
196 }
197 
198 /*
199  * sysctl helper routine for uvm_pctparam.
200  */
201 static int
202 uvm_sysctlpctparam(SYSCTLFN_ARGS)
203 {
204 	int t, error;
205 	struct sysctlnode node;
206 	struct uvm_pctparam *pct;
207 
208 	pct = rnode->sysctl_data;
209 	t = pct->pct_pct;
210 
211 	node = *rnode;
212 	node.sysctl_data = &t;
213 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
214 	if (error || newp == NULL)
215 		return error;
216 
217 	if (t < 0 || t > 100)
218 		return EINVAL;
219 
220 	error = uvm_pctparam_check(pct, t);
221 	if (error) {
222 		return error;
223 	}
224 	uvm_pctparam_set(pct, t);
225 
226 	return (0);
227 }
228 
229 /*
230  * uvm_sysctl: sysctl hook into UVM system.
231  */
232 SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm subtree setup")
233 {
234 
235 	sysctl_createv(clog, 0, NULL, NULL,
236 		       CTLFLAG_PERMANENT,
237 		       CTLTYPE_STRUCT, "vmmeter",
238 		       SYSCTL_DESCR("Simple system-wide virtual memory "
239 				    "statistics"),
240 		       sysctl_vm_meter, 0, NULL, sizeof(struct vmtotal),
241 		       CTL_VM, VM_METER, CTL_EOL);
242 	sysctl_createv(clog, 0, NULL, NULL,
243 		       CTLFLAG_PERMANENT,
244 		       CTLTYPE_STRUCT, "loadavg",
245 		       SYSCTL_DESCR("System load average history"),
246 		       NULL, 0, &averunnable, sizeof(averunnable),
247 		       CTL_VM, VM_LOADAVG, CTL_EOL);
248 	sysctl_createv(clog, 0, NULL, NULL,
249 		       CTLFLAG_PERMANENT,
250 		       CTLTYPE_STRUCT, "uvmexp",
251 		       SYSCTL_DESCR("Detailed system-wide virtual memory "
252 				    "statistics"),
253 		       sysctl_vm_uvmexp, 0, &uvmexp, sizeof(uvmexp),
254 		       CTL_VM, VM_UVMEXP, CTL_EOL);
255 	sysctl_createv(clog, 0, NULL, NULL,
256 		       CTLFLAG_PERMANENT,
257 		       CTLTYPE_STRUCT, "uvmexp2",
258 		       SYSCTL_DESCR("Detailed system-wide virtual memory "
259 				    "statistics (MI)"),
260 		       sysctl_vm_uvmexp2, 0, NULL, 0,
261 		       CTL_VM, VM_UVMEXP2, CTL_EOL);
262 	sysctl_createv(clog, 0, NULL, NULL,
263 		       CTLFLAG_PERMANENT, CTLTYPE_INT, "maxslp",
264 		       SYSCTL_DESCR("Maximum process sleep time before being "
265 				    "swapped"),
266 		       NULL, 0, &maxslp, 0,
267 		       CTL_VM, VM_MAXSLP, CTL_EOL);
268 	sysctl_createv(clog, 0, NULL, NULL,
269 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
270 		       CTLTYPE_INT, "uspace",
271 		       SYSCTL_DESCR("Number of bytes allocated for a kernel "
272 				    "stack"),
273 		       NULL, USPACE, NULL, 0,
274 		       CTL_VM, VM_USPACE, CTL_EOL);
275 	sysctl_createv(clog, 0, NULL, NULL,
276 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
277 		       CTLTYPE_BOOL, "idlezero",
278 		       SYSCTL_DESCR("Whether try to zero pages in idle loop"),
279 		       NULL, 0, &vm_page_zero_enable, 0,
280 		       CTL_VM, CTL_CREATE, CTL_EOL);
281 	sysctl_createv(clog, 0, NULL, NULL,
282 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
283 		       CTLTYPE_LONG, "minaddress",
284 		       SYSCTL_DESCR("Minimum user address"),
285 		       NULL, VM_MIN_ADDRESS, NULL, 0,
286 		       CTL_VM, VM_MINADDRESS, CTL_EOL);
287 	sysctl_createv(clog, 0, NULL, NULL,
288 		       CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
289 		       CTLTYPE_LONG, "maxaddress",
290 		       SYSCTL_DESCR("Maximum user address"),
291 		       NULL, VM_MAX_ADDRESS, NULL, 0,
292 		       CTL_VM, VM_MAXADDRESS, CTL_EOL);
293 	sysctl_createv(clog, 0, NULL, NULL,
294 		       CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED,
295 		       CTLTYPE_INT, "guard_size",
296 		       SYSCTL_DESCR("Guard size of main thread"),
297 		       NULL, 0, &user_stack_guard_size, 0,
298 		       CTL_VM, VM_GUARD_SIZE, CTL_EOL);
299 	sysctl_createv(clog, 0, NULL, NULL,
300 		       CTLFLAG_PERMANENT|CTLFLAG_UNSIGNED|CTLFLAG_READWRITE,
301 		       CTLTYPE_INT, "thread_guard_size",
302 		       SYSCTL_DESCR("Guard size of other threads"),
303 		       NULL, 0, &user_thread_stack_guard_size, 0,
304 		       CTL_VM, VM_THREAD_GUARD_SIZE, CTL_EOL);
305 #ifdef PMAP_DIRECT
306 	sysctl_createv(clog, 0, NULL, NULL,
307 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
308 		       CTLTYPE_BOOL, "ubc_direct",
309 		       SYSCTL_DESCR("Use direct map for UBC I/O"),
310 		       NULL, 0, &ubc_direct, 0,
311 		       CTL_VM, CTL_CREATE, CTL_EOL);
312 #endif
313 
314 	uvmpdpol_sysctlsetup();
315 }
316 
317 /*
318  * uvm_total: calculate the current state of the system.
319  */
320 static void
321 uvm_total(struct vmtotal *totalp)
322 {
323 	struct lwp *l;
324 #if 0
325 	struct vm_map_entry *	entry;
326 	struct vm_map *map;
327 	int paging;
328 #endif
329 	int freepg;
330 	int active;
331 
332 	memset(totalp, 0, sizeof *totalp);
333 
334 	/*
335 	 * calculate process statistics
336 	 */
337 	mutex_enter(proc_lock);
338 	LIST_FOREACH(l, &alllwp, l_list) {
339 		if (l->l_proc->p_flag & PK_SYSTEM)
340 			continue;
341 		switch (l->l_stat) {
342 		case 0:
343 			continue;
344 
345 		case LSSLEEP:
346 		case LSSTOP:
347 			if ((l->l_flag & LW_SINTR) == 0) {
348 				totalp->t_dw++;
349 			} else if (l->l_slptime < maxslp) {
350 				totalp->t_sl++;
351 			}
352 			if (l->l_slptime >= maxslp)
353 				continue;
354 			break;
355 
356 		case LSRUN:
357 		case LSONPROC:
358 		case LSIDL:
359 			totalp->t_rq++;
360 			if (l->l_stat == LSIDL)
361 				continue;
362 			break;
363 		}
364 		/*
365 		 * note active objects
366 		 */
367 #if 0
368 		/*
369 		 * XXXCDC: BOGUS!  rethink this.  in the mean time
370 		 * don't do it.
371 		 */
372 		paging = 0;
373 		vm_map_lock(map);
374 		for (map = &p->p_vmspace->vm_map, entry = map->header.next;
375 		    entry != &map->header; entry = entry->next) {
376 			if (entry->is_a_map || entry->is_sub_map ||
377 			    entry->object.uvm_obj == NULL)
378 				continue;
379 			/* XXX how to do this with uvm */
380 		}
381 		vm_map_unlock(map);
382 		if (paging)
383 			totalp->t_pw++;
384 #endif
385 	}
386 	mutex_exit(proc_lock);
387 
388 	/*
389 	 * Calculate object memory usage statistics.
390 	 */
391 	freepg = uvm_availmem();
392 	uvm_estimatepageable(&active, NULL);
393 	totalp->t_free = freepg;
394 	totalp->t_vm = uvmexp.npages - freepg + uvmexp.swpginuse;
395 	totalp->t_avm = active + uvmexp.swpginuse;	/* XXX */
396 	totalp->t_rm = uvmexp.npages - freepg;
397 	totalp->t_arm = active;
398 	totalp->t_vmshr = 0;		/* XXX */
399 	totalp->t_avmshr = 0;		/* XXX */
400 	totalp->t_rmshr = 0;		/* XXX */
401 	totalp->t_armshr = 0;		/* XXX */
402 }
403 
404 void
405 uvm_pctparam_set(struct uvm_pctparam *pct, int val)
406 {
407 
408 	pct->pct_pct = val;
409 	pct->pct_scaled = val * UVM_PCTPARAM_SCALE / 100;
410 }
411 
412 int
413 uvm_pctparam_get(struct uvm_pctparam *pct)
414 {
415 
416 	return pct->pct_pct;
417 }
418 
419 int
420 uvm_pctparam_check(struct uvm_pctparam *pct, int val)
421 {
422 
423 	if (pct->pct_check == NULL) {
424 		return 0;
425 	}
426 	return (*pct->pct_check)(pct, val);
427 }
428 
429 void
430 uvm_pctparam_init(struct uvm_pctparam *pct, int val,
431     int (*fn)(struct uvm_pctparam *, int))
432 {
433 
434 	pct->pct_check = fn;
435 	uvm_pctparam_set(pct, val);
436 }
437 
438 int
439 uvm_pctparam_createsysctlnode(struct uvm_pctparam *pct, const char *name,
440     const char *desc)
441 {
442 
443 	return sysctl_createv(NULL, 0, NULL, NULL,
444 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
445 	    CTLTYPE_INT, name, SYSCTL_DESCR(desc),
446 	    uvm_sysctlpctparam, 0, (void *)pct, 0, CTL_VM, CTL_CREATE, CTL_EOL);
447 }
448 
449 /*
450  * Update uvmexp with aggregate values from the per-CPU counters.
451  */
452 void
453 uvm_update_uvmexp(void)
454 {
455 
456 	cpu_count_sync_all();
457 
458 	uvmexp.free = (int)uvm_availmem();
459 	uvmexp.zeropages = (int)cpu_count_get(CPU_COUNT_ZEROPAGES);
460 	uvmexp.cpuhit = (int)cpu_count_get(CPU_COUNT_CPUHIT);
461 	uvmexp.cpumiss = (int)cpu_count_get(CPU_COUNT_CPUMISS);
462 	uvmexp.faults = (int)cpu_count_get(CPU_COUNT_NFAULT);
463 	uvmexp.traps = (int)cpu_count_get(CPU_COUNT_NTRAP);
464 	uvmexp.intrs = (int)cpu_count_get(CPU_COUNT_NINTR);
465 	uvmexp.swtch = (int)cpu_count_get(CPU_COUNT_NSWTCH);
466 	uvmexp.softs = (int)cpu_count_get(CPU_COUNT_NSOFT);
467 	uvmexp.syscalls = (int)cpu_count_get(CPU_COUNT_NSYSCALL);
468 	uvmexp.pageins = (int)cpu_count_get(CPU_COUNT_PAGEINS);
469 	uvmexp.forks = (int)cpu_count_get(CPU_COUNT_FORKS);
470 	uvmexp.forks_ppwait = (int)cpu_count_get(CPU_COUNT_FORKS_PPWAIT);
471 	uvmexp.forks_sharevm = (int)cpu_count_get(CPU_COUNT_FORKS_SHAREVM);
472 	uvmexp.pga_zerohit = (int)cpu_count_get(CPU_COUNT_PGA_ZEROHIT);
473 	uvmexp.pga_zeromiss = (int)cpu_count_get(CPU_COUNT_PGA_ZEROMISS);
474 	uvmexp.fltnoram = (int)cpu_count_get(CPU_COUNT_FLTNORAM);
475 	uvmexp.fltnoanon = (int)cpu_count_get(CPU_COUNT_FLTNOANON);
476 	uvmexp.fltpgwait = (int)cpu_count_get(CPU_COUNT_FLTPGWAIT);
477 	uvmexp.fltpgrele = (int)cpu_count_get(CPU_COUNT_FLTPGRELE);
478 	uvmexp.fltrelck = (int)cpu_count_get(CPU_COUNT_FLTRELCK);
479 	uvmexp.fltrelckok = (int)cpu_count_get(CPU_COUNT_FLTRELCKOK);
480 	uvmexp.fltanget = (int)cpu_count_get(CPU_COUNT_FLTANGET);
481 	uvmexp.fltanretry = (int)cpu_count_get(CPU_COUNT_FLTANRETRY);
482 	uvmexp.fltamcopy = (int)cpu_count_get(CPU_COUNT_FLTAMCOPY);
483 	uvmexp.fltnamap = (int)cpu_count_get(CPU_COUNT_FLTNAMAP);
484 	uvmexp.fltnomap = (int)cpu_count_get(CPU_COUNT_FLTNOMAP);
485 	uvmexp.fltlget = (int)cpu_count_get(CPU_COUNT_FLTLGET);
486 	uvmexp.fltget = (int)cpu_count_get(CPU_COUNT_FLTGET);
487 	uvmexp.flt_anon = (int)cpu_count_get(CPU_COUNT_FLT_ANON);
488 	uvmexp.flt_acow = (int)cpu_count_get(CPU_COUNT_FLT_ACOW);
489 	uvmexp.flt_obj = (int)cpu_count_get(CPU_COUNT_FLT_OBJ);
490 	uvmexp.flt_prcopy = (int)cpu_count_get(CPU_COUNT_FLT_PRCOPY);
491 	uvmexp.flt_przero = (int)cpu_count_get(CPU_COUNT_FLT_PRZERO);
492 	uvmexp.anonpages = (int)cpu_count_get(CPU_COUNT_ANONPAGES);
493 	uvmexp.filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES);
494 	uvmexp.execpages = (int)cpu_count_get(CPU_COUNT_EXECPAGES);
495 	uvmexp.colorhit = (int)cpu_count_get(CPU_COUNT_COLORHIT);
496 	uvmexp.colormiss = (int)cpu_count_get(CPU_COUNT_COLORMISS);
497 }
498