10cddce49SRobert Watson /*-
2*4d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
35e53a4f9SPedro F. Giffuni *
40cddce49SRobert Watson * Copyright (c) 2005 Robert N. M. Watson
50cddce49SRobert Watson * All rights reserved.
60cddce49SRobert Watson *
70cddce49SRobert Watson * Redistribution and use in source and binary forms, with or without
80cddce49SRobert Watson * modification, are permitted provided that the following conditions
90cddce49SRobert Watson * are met:
100cddce49SRobert Watson * 1. Redistributions of source code must retain the above copyright
110cddce49SRobert Watson * notice, this list of conditions and the following disclaimer.
120cddce49SRobert Watson * 2. Redistributions in binary form must reproduce the above copyright
130cddce49SRobert Watson * notice, this list of conditions and the following disclaimer in the
140cddce49SRobert Watson * documentation and/or other materials provided with the distribution.
150cddce49SRobert Watson *
160cddce49SRobert Watson * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
170cddce49SRobert Watson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
180cddce49SRobert Watson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
190cddce49SRobert Watson * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
200cddce49SRobert Watson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
210cddce49SRobert Watson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
220cddce49SRobert Watson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
230cddce49SRobert Watson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
240cddce49SRobert Watson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
250cddce49SRobert Watson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
260cddce49SRobert Watson * SUCH DAMAGE.
270cddce49SRobert Watson */
280cddce49SRobert Watson
290cddce49SRobert Watson #include <sys/param.h>
300cddce49SRobert Watson #include <sys/malloc.h>
310cddce49SRobert Watson #include <sys/sysctl.h>
320cddce49SRobert Watson
330cddce49SRobert Watson #include <err.h>
340cddce49SRobert Watson #include <errno.h>
35ba23fa9bSRobert Watson #include <kvm.h>
36ba23fa9bSRobert Watson #include <nlist.h>
370cddce49SRobert Watson #include <stdio.h>
380cddce49SRobert Watson #include <stdlib.h>
390cddce49SRobert Watson #include <string.h>
400cddce49SRobert Watson
410cddce49SRobert Watson #include "memstat.h"
420cddce49SRobert Watson #include "memstat_internal.h"
430cddce49SRobert Watson
44828afddaSMateusz Guzik static int memstat_malloc_zone_count;
45828afddaSMateusz Guzik static int memstat_malloc_zone_sizes[32];
46828afddaSMateusz Guzik
47828afddaSMateusz Guzik static int memstat_malloc_zone_init(void);
48828afddaSMateusz Guzik static int memstat_malloc_zone_init_kvm(kvm_t *kvm);
49828afddaSMateusz Guzik
50ba23fa9bSRobert Watson static struct nlist namelist[] = {
51ba23fa9bSRobert Watson #define X_KMEMSTATISTICS 0
52ba23fa9bSRobert Watson { .n_name = "_kmemstatistics" },
53828afddaSMateusz Guzik #define X_KMEMZONES 1
54828afddaSMateusz Guzik { .n_name = "_kmemzones" },
55828afddaSMateusz Guzik #define X_NUMZONES 2
56828afddaSMateusz Guzik { .n_name = "_numzones" },
57828afddaSMateusz Guzik #define X_VM_MALLOC_ZONE_COUNT 3
58828afddaSMateusz Guzik { .n_name = "_vm_malloc_zone_count" },
59828afddaSMateusz Guzik #define X_MP_MAXCPUS 4
60ba23fa9bSRobert Watson { .n_name = "_mp_maxcpus" },
61ba23fa9bSRobert Watson { .n_name = "" },
62ba23fa9bSRobert Watson };
63ba23fa9bSRobert Watson
640cddce49SRobert Watson /*
650cddce49SRobert Watson * Extract malloc(9) statistics from the running kernel, and store all memory
660cddce49SRobert Watson * type information in the passed list. For each type, check the list for an
670cddce49SRobert Watson * existing entry with the right name/allocator -- if present, update that
680cddce49SRobert Watson * entry. Otherwise, add a new entry. On error, the entire list will be
690cddce49SRobert Watson * cleared, as entries will be in an inconsistent state.
700cddce49SRobert Watson *
710cddce49SRobert Watson * To reduce the level of work for a list that starts empty, we keep around a
720cddce49SRobert Watson * hint as to whether it was empty when we began, so we can avoid searching
730cddce49SRobert Watson * the list for entries to update. Updates are O(n^2) due to searching for
740cddce49SRobert Watson * each entry before adding it.
750cddce49SRobert Watson */
760cddce49SRobert Watson int
memstat_sysctl_malloc(struct memory_type_list * list,int flags)770cddce49SRobert Watson memstat_sysctl_malloc(struct memory_type_list *list, int flags)
780cddce49SRobert Watson {
790cddce49SRobert Watson struct malloc_type_stream_header *mtshp;
800cddce49SRobert Watson struct malloc_type_header *mthp;
810cddce49SRobert Watson struct malloc_type_stats *mtsp;
820cddce49SRobert Watson struct memory_type *mtp;
8334562808SRobert Watson int count, hint_dontsearch, i, j, maxcpus;
840cddce49SRobert Watson char *buffer, *p;
850cddce49SRobert Watson size_t size;
860cddce49SRobert Watson
8734562808SRobert Watson hint_dontsearch = LIST_EMPTY(&list->mtl_list);
880cddce49SRobert Watson
890cddce49SRobert Watson /*
900cddce49SRobert Watson * Query the number of CPUs, number of malloc types so that we can
910cddce49SRobert Watson * guess an initial buffer size. We loop until we succeed or really
920cddce49SRobert Watson * fail. Note that the value of maxcpus we query using sysctl is not
930cddce49SRobert Watson * the version we use when processing the real data -- that is read
940cddce49SRobert Watson * from the header.
950cddce49SRobert Watson */
960cddce49SRobert Watson retry:
970cddce49SRobert Watson size = sizeof(maxcpus);
980cddce49SRobert Watson if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
9934562808SRobert Watson if (errno == EACCES || errno == EPERM)
10034562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_PERMISSION;
10134562808SRobert Watson else
10234562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_DATAERROR;
1030cddce49SRobert Watson return (-1);
1040cddce49SRobert Watson }
1050cddce49SRobert Watson if (size != sizeof(maxcpus)) {
10634562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_DATAERROR;
1070cddce49SRobert Watson return (-1);
1080cddce49SRobert Watson }
1090cddce49SRobert Watson
1100cddce49SRobert Watson size = sizeof(count);
1110cddce49SRobert Watson if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
11234562808SRobert Watson if (errno == EACCES || errno == EPERM)
11334562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_PERMISSION;
11434562808SRobert Watson else
11534562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_VERSION;
1160cddce49SRobert Watson return (-1);
1170cddce49SRobert Watson }
1180cddce49SRobert Watson if (size != sizeof(count)) {
11934562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_DATAERROR;
1200cddce49SRobert Watson return (-1);
1210cddce49SRobert Watson }
1220cddce49SRobert Watson
123828afddaSMateusz Guzik if (memstat_malloc_zone_init() == -1) {
124828afddaSMateusz Guzik list->mtl_error = MEMSTAT_ERROR_VERSION;
125828afddaSMateusz Guzik return (-1);
126828afddaSMateusz Guzik }
127828afddaSMateusz Guzik
1280cddce49SRobert Watson size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
1290cddce49SRobert Watson maxcpus);
1300cddce49SRobert Watson
1310cddce49SRobert Watson buffer = malloc(size);
1320cddce49SRobert Watson if (buffer == NULL) {
13334562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
1340cddce49SRobert Watson return (-1);
1350cddce49SRobert Watson }
1360cddce49SRobert Watson
1370cddce49SRobert Watson if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
1380cddce49SRobert Watson /*
1390cddce49SRobert Watson * XXXRW: ENOMEM is an ambiguous return, we should bound the
1400cddce49SRobert Watson * number of loops, perhaps.
1410cddce49SRobert Watson */
1420cddce49SRobert Watson if (errno == ENOMEM) {
1430cddce49SRobert Watson free(buffer);
1440cddce49SRobert Watson goto retry;
1450cddce49SRobert Watson }
14634562808SRobert Watson if (errno == EACCES || errno == EPERM)
14734562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_PERMISSION;
14834562808SRobert Watson else
14934562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_VERSION;
1500cddce49SRobert Watson free(buffer);
1510cddce49SRobert Watson return (-1);
1520cddce49SRobert Watson }
1530cddce49SRobert Watson
1540cddce49SRobert Watson if (size == 0) {
1550cddce49SRobert Watson free(buffer);
1560cddce49SRobert Watson return (0);
1570cddce49SRobert Watson }
1580cddce49SRobert Watson
1590cddce49SRobert Watson if (size < sizeof(*mtshp)) {
16034562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_VERSION;
1610cddce49SRobert Watson free(buffer);
1620cddce49SRobert Watson return (-1);
1630cddce49SRobert Watson }
1640cddce49SRobert Watson p = buffer;
1650cddce49SRobert Watson mtshp = (struct malloc_type_stream_header *)p;
1660cddce49SRobert Watson p += sizeof(*mtshp);
1670cddce49SRobert Watson
1680cddce49SRobert Watson if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
16934562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_VERSION;
1700cddce49SRobert Watson free(buffer);
1710cddce49SRobert Watson return (-1);
1720cddce49SRobert Watson }
1730cddce49SRobert Watson
1740cddce49SRobert Watson /*
1750cddce49SRobert Watson * For the remainder of this function, we are quite trusting about
1760cddce49SRobert Watson * the layout of structures and sizes, since we've determined we have
1770cddce49SRobert Watson * a matching version and acceptable CPU count.
1780cddce49SRobert Watson */
1790cddce49SRobert Watson maxcpus = mtshp->mtsh_maxcpus;
1800cddce49SRobert Watson count = mtshp->mtsh_count;
1810cddce49SRobert Watson for (i = 0; i < count; i++) {
1820cddce49SRobert Watson mthp = (struct malloc_type_header *)p;
1830cddce49SRobert Watson p += sizeof(*mthp);
1840cddce49SRobert Watson
1850cddce49SRobert Watson if (hint_dontsearch == 0) {
1860cddce49SRobert Watson mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
1870cddce49SRobert Watson mthp->mth_name);
1880cddce49SRobert Watson } else
1890cddce49SRobert Watson mtp = NULL;
1900cddce49SRobert Watson if (mtp == NULL)
191ddefbc89SRobert Watson mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
1921882360bSSergey Kandaurov mthp->mth_name, maxcpus);
1930cddce49SRobert Watson if (mtp == NULL) {
19422247a2aSRobert Watson _memstat_mtl_empty(list);
1950cddce49SRobert Watson free(buffer);
19634562808SRobert Watson list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
1970cddce49SRobert Watson return (-1);
1980cddce49SRobert Watson }
1990cddce49SRobert Watson
2000cddce49SRobert Watson /*
2010cddce49SRobert Watson * Reset the statistics on a current node.
2020cddce49SRobert Watson */
2031882360bSSergey Kandaurov _memstat_mt_reset_stats(mtp, maxcpus);
2040cddce49SRobert Watson
2050cddce49SRobert Watson for (j = 0; j < maxcpus; j++) {
2060cddce49SRobert Watson mtsp = (struct malloc_type_stats *)p;
2070cddce49SRobert Watson p += sizeof(*mtsp);
2080cddce49SRobert Watson
2090cddce49SRobert Watson /*
2100cddce49SRobert Watson * Sumarize raw statistics across CPUs into coalesced
2110cddce49SRobert Watson * statistics.
2120cddce49SRobert Watson */
2130cddce49SRobert Watson mtp->mt_memalloced += mtsp->mts_memalloced;
2140cddce49SRobert Watson mtp->mt_memfreed += mtsp->mts_memfreed;
2150cddce49SRobert Watson mtp->mt_numallocs += mtsp->mts_numallocs;
2160cddce49SRobert Watson mtp->mt_numfrees += mtsp->mts_numfrees;
2170cddce49SRobert Watson mtp->mt_sizemask |= mtsp->mts_size;
2180cddce49SRobert Watson
2190cddce49SRobert Watson /*
2200cddce49SRobert Watson * Copies of per-CPU statistics.
2210cddce49SRobert Watson */
2220cddce49SRobert Watson mtp->mt_percpu_alloc[j].mtp_memalloced =
2230cddce49SRobert Watson mtsp->mts_memalloced;
2240cddce49SRobert Watson mtp->mt_percpu_alloc[j].mtp_memfreed =
2250cddce49SRobert Watson mtsp->mts_memfreed;
2260cddce49SRobert Watson mtp->mt_percpu_alloc[j].mtp_numallocs =
2270cddce49SRobert Watson mtsp->mts_numallocs;
2280cddce49SRobert Watson mtp->mt_percpu_alloc[j].mtp_numfrees =
2290cddce49SRobert Watson mtsp->mts_numfrees;
2300cddce49SRobert Watson mtp->mt_percpu_alloc[j].mtp_sizemask =
2310cddce49SRobert Watson mtsp->mts_size;
2320cddce49SRobert Watson }
2330cddce49SRobert Watson
2340cddce49SRobert Watson /*
2350cddce49SRobert Watson * Derived cross-CPU statistics.
2360cddce49SRobert Watson */
2370cddce49SRobert Watson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
2380cddce49SRobert Watson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
2390cddce49SRobert Watson }
2400cddce49SRobert Watson
2410cddce49SRobert Watson free(buffer);
2420cddce49SRobert Watson
2430cddce49SRobert Watson return (0);
2440cddce49SRobert Watson }
245ba23fa9bSRobert Watson
246ba23fa9bSRobert Watson static int
kread(kvm_t * kvm,void * kvm_pointer,void * address,size_t size,size_t offset)247ba23fa9bSRobert Watson kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
248ba23fa9bSRobert Watson size_t offset)
249ba23fa9bSRobert Watson {
250ba23fa9bSRobert Watson ssize_t ret;
251ba23fa9bSRobert Watson
252ba23fa9bSRobert Watson ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
253ba23fa9bSRobert Watson size);
254ba23fa9bSRobert Watson if (ret < 0)
255ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM);
256ba23fa9bSRobert Watson if ((size_t)ret != size)
257ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM_SHORTREAD);
258ba23fa9bSRobert Watson return (0);
259ba23fa9bSRobert Watson }
260ba23fa9bSRobert Watson
261ba23fa9bSRobert Watson static int
kread_string(kvm_t * kvm,const void * kvm_pointer,char * buffer,int buflen)26244803694SRobert Watson kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
263ba23fa9bSRobert Watson {
264ba23fa9bSRobert Watson ssize_t ret;
265ba23fa9bSRobert Watson int i;
266ba23fa9bSRobert Watson
267ba23fa9bSRobert Watson for (i = 0; i < buflen; i++) {
26844803694SRobert Watson ret = kvm_read(kvm, __DECONST(unsigned long, kvm_pointer) +
26944803694SRobert Watson i, &(buffer[i]), sizeof(char));
270ba23fa9bSRobert Watson if (ret < 0)
271ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM);
272ba23fa9bSRobert Watson if ((size_t)ret != sizeof(char))
273ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM_SHORTREAD);
274ba23fa9bSRobert Watson if (buffer[i] == '\0')
275ba23fa9bSRobert Watson return (0);
276ba23fa9bSRobert Watson }
277ba23fa9bSRobert Watson /* Truncate. */
278ba23fa9bSRobert Watson buffer[i-1] = '\0';
279ba23fa9bSRobert Watson return (0);
280ba23fa9bSRobert Watson }
281ba23fa9bSRobert Watson
282ba23fa9bSRobert Watson static int
kread_symbol(kvm_t * kvm,int index,void * address,size_t size,size_t offset)283ba23fa9bSRobert Watson kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
284ba23fa9bSRobert Watson size_t offset)
285ba23fa9bSRobert Watson {
286ba23fa9bSRobert Watson ssize_t ret;
287ba23fa9bSRobert Watson
288ba23fa9bSRobert Watson ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
289ba23fa9bSRobert Watson if (ret < 0)
290ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM);
291ba23fa9bSRobert Watson if ((size_t)ret != size)
292ba23fa9bSRobert Watson return (MEMSTAT_ERROR_KVM_SHORTREAD);
293ba23fa9bSRobert Watson return (0);
294ba23fa9bSRobert Watson }
295ba23fa9bSRobert Watson
296f38828cbSMateusz Guzik static int
kread_zpcpu(kvm_t * kvm,u_long base,void * buf,size_t size,int cpu)297f38828cbSMateusz Guzik kread_zpcpu(kvm_t *kvm, u_long base, void *buf, size_t size, int cpu)
298f38828cbSMateusz Guzik {
299f38828cbSMateusz Guzik ssize_t ret;
300f38828cbSMateusz Guzik
301f38828cbSMateusz Guzik ret = kvm_read_zpcpu(kvm, base, buf, size, cpu);
302f38828cbSMateusz Guzik if (ret < 0)
303f38828cbSMateusz Guzik return (MEMSTAT_ERROR_KVM);
304f38828cbSMateusz Guzik if ((size_t)ret != size)
305f38828cbSMateusz Guzik return (MEMSTAT_ERROR_KVM_SHORTREAD);
306f38828cbSMateusz Guzik return (0);
307f38828cbSMateusz Guzik }
308f38828cbSMateusz Guzik
309ba23fa9bSRobert Watson int
memstat_kvm_malloc(struct memory_type_list * list,void * kvm_handle)310ba23fa9bSRobert Watson memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
311ba23fa9bSRobert Watson {
312ba23fa9bSRobert Watson struct memory_type *mtp;
313ba23fa9bSRobert Watson void *kmemstatistics;
314f38828cbSMateusz Guzik int hint_dontsearch, j, mp_maxcpus, mp_ncpus, ret;
315ba23fa9bSRobert Watson char name[MEMTYPE_MAXNAME];
316f38828cbSMateusz Guzik struct malloc_type_stats mts;
317bdcc2226SMateusz Guzik struct malloc_type_internal *mtip;
318ba23fa9bSRobert Watson struct malloc_type type, *typep;
319ba23fa9bSRobert Watson kvm_t *kvm;
320ba23fa9bSRobert Watson
321ba23fa9bSRobert Watson kvm = (kvm_t *)kvm_handle;
322ba23fa9bSRobert Watson
323ba23fa9bSRobert Watson hint_dontsearch = LIST_EMPTY(&list->mtl_list);
324ba23fa9bSRobert Watson
325ba23fa9bSRobert Watson if (kvm_nlist(kvm, namelist) != 0) {
326ba23fa9bSRobert Watson list->mtl_error = MEMSTAT_ERROR_KVM;
327ba23fa9bSRobert Watson return (-1);
328ba23fa9bSRobert Watson }
329ba23fa9bSRobert Watson
330ba23fa9bSRobert Watson if (namelist[X_KMEMSTATISTICS].n_type == 0 ||
331ba23fa9bSRobert Watson namelist[X_KMEMSTATISTICS].n_value == 0) {
332ba23fa9bSRobert Watson list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
333ba23fa9bSRobert Watson return (-1);
334ba23fa9bSRobert Watson }
335ba23fa9bSRobert Watson
336ba23fa9bSRobert Watson ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus,
337ba23fa9bSRobert Watson sizeof(mp_maxcpus), 0);
338ba23fa9bSRobert Watson if (ret != 0) {
339ba23fa9bSRobert Watson list->mtl_error = ret;
340ba23fa9bSRobert Watson return (-1);
341ba23fa9bSRobert Watson }
342ba23fa9bSRobert Watson
343ba23fa9bSRobert Watson ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
344ba23fa9bSRobert Watson sizeof(kmemstatistics), 0);
345ba23fa9bSRobert Watson if (ret != 0) {
346ba23fa9bSRobert Watson list->mtl_error = ret;
347ba23fa9bSRobert Watson return (-1);
348ba23fa9bSRobert Watson }
349ba23fa9bSRobert Watson
350828afddaSMateusz Guzik ret = memstat_malloc_zone_init_kvm(kvm);
351828afddaSMateusz Guzik if (ret != 0) {
352828afddaSMateusz Guzik list->mtl_error = ret;
353828afddaSMateusz Guzik return (-1);
354828afddaSMateusz Guzik }
355828afddaSMateusz Guzik
356f38828cbSMateusz Guzik mp_ncpus = kvm_getncpus(kvm);
3571882360bSSergey Kandaurov
358ba23fa9bSRobert Watson for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
359ba23fa9bSRobert Watson ret = kread(kvm, typep, &type, sizeof(type), 0);
360ba23fa9bSRobert Watson if (ret != 0) {
361ba23fa9bSRobert Watson _memstat_mtl_empty(list);
362ba23fa9bSRobert Watson list->mtl_error = ret;
363ba23fa9bSRobert Watson return (-1);
364ba23fa9bSRobert Watson }
365ba23fa9bSRobert Watson ret = kread_string(kvm, (void *)type.ks_shortdesc, name,
366ba23fa9bSRobert Watson MEMTYPE_MAXNAME);
367ba23fa9bSRobert Watson if (ret != 0) {
368ba23fa9bSRobert Watson _memstat_mtl_empty(list);
369ba23fa9bSRobert Watson list->mtl_error = ret;
370ba23fa9bSRobert Watson return (-1);
371ba23fa9bSRobert Watson }
372bdcc2226SMateusz Guzik if (type.ks_version != M_VERSION) {
373bdcc2226SMateusz Guzik warnx("type %s with unsupported version %lu; skipped",
374bdcc2226SMateusz Guzik name, type.ks_version);
375bdcc2226SMateusz Guzik continue;
376bdcc2226SMateusz Guzik }
377ba23fa9bSRobert Watson
378ba23fa9bSRobert Watson /*
3797293f0e6SJohn Baldwin * Since our compile-time value for MAXCPU may differ from the
3807293f0e6SJohn Baldwin * kernel's, we populate our own array.
381ba23fa9bSRobert Watson */
382bdcc2226SMateusz Guzik mtip = &type.ks_mti;
383ba23fa9bSRobert Watson
384ba23fa9bSRobert Watson if (hint_dontsearch == 0) {
385ba23fa9bSRobert Watson mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
386ba23fa9bSRobert Watson } else
387ba23fa9bSRobert Watson mtp = NULL;
388ba23fa9bSRobert Watson if (mtp == NULL)
389ba23fa9bSRobert Watson mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
3901882360bSSergey Kandaurov name, mp_maxcpus);
391ba23fa9bSRobert Watson if (mtp == NULL) {
392ba23fa9bSRobert Watson _memstat_mtl_empty(list);
393ba23fa9bSRobert Watson list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
394ba23fa9bSRobert Watson return (-1);
395ba23fa9bSRobert Watson }
396ba23fa9bSRobert Watson
397ba23fa9bSRobert Watson /*
398ba23fa9bSRobert Watson * This logic is replicated from kern_malloc.c, and should
399ba23fa9bSRobert Watson * be kept in sync.
400ba23fa9bSRobert Watson */
4011882360bSSergey Kandaurov _memstat_mt_reset_stats(mtp, mp_maxcpus);
402f38828cbSMateusz Guzik for (j = 0; j < mp_ncpus; j++) {
403bdcc2226SMateusz Guzik ret = kread_zpcpu(kvm, (u_long)mtip->mti_stats, &mts,
404f38828cbSMateusz Guzik sizeof(mts), j);
405f38828cbSMateusz Guzik if (ret != 0) {
406f38828cbSMateusz Guzik _memstat_mtl_empty(list);
407f38828cbSMateusz Guzik list->mtl_error = ret;
408f38828cbSMateusz Guzik return (-1);
409f38828cbSMateusz Guzik }
410f38828cbSMateusz Guzik mtp->mt_memalloced += mts.mts_memalloced;
411f38828cbSMateusz Guzik mtp->mt_memfreed += mts.mts_memfreed;
412f38828cbSMateusz Guzik mtp->mt_numallocs += mts.mts_numallocs;
413f38828cbSMateusz Guzik mtp->mt_numfrees += mts.mts_numfrees;
414f38828cbSMateusz Guzik mtp->mt_sizemask |= mts.mts_size;
415ba23fa9bSRobert Watson
416ba23fa9bSRobert Watson mtp->mt_percpu_alloc[j].mtp_memalloced =
417f38828cbSMateusz Guzik mts.mts_memalloced;
418ba23fa9bSRobert Watson mtp->mt_percpu_alloc[j].mtp_memfreed =
419f38828cbSMateusz Guzik mts.mts_memfreed;
420ba23fa9bSRobert Watson mtp->mt_percpu_alloc[j].mtp_numallocs =
421f38828cbSMateusz Guzik mts.mts_numallocs;
422ba23fa9bSRobert Watson mtp->mt_percpu_alloc[j].mtp_numfrees =
423f38828cbSMateusz Guzik mts.mts_numfrees;
424ba23fa9bSRobert Watson mtp->mt_percpu_alloc[j].mtp_sizemask =
425f38828cbSMateusz Guzik mts.mts_size;
426f38828cbSMateusz Guzik }
427f38828cbSMateusz Guzik for (; j < mp_maxcpus; j++) {
428f38828cbSMateusz Guzik bzero(&mtp->mt_percpu_alloc[j],
429f38828cbSMateusz Guzik sizeof(mtp->mt_percpu_alloc[0]));
430ba23fa9bSRobert Watson }
431ba23fa9bSRobert Watson
432ba23fa9bSRobert Watson mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
433ba23fa9bSRobert Watson mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
434ba23fa9bSRobert Watson }
435ba23fa9bSRobert Watson
436ba23fa9bSRobert Watson return (0);
437ba23fa9bSRobert Watson }
438828afddaSMateusz Guzik
439828afddaSMateusz Guzik static int
memstat_malloc_zone_init(void)440828afddaSMateusz Guzik memstat_malloc_zone_init(void)
441828afddaSMateusz Guzik {
442828afddaSMateusz Guzik size_t size;
443828afddaSMateusz Guzik
444828afddaSMateusz Guzik size = sizeof(memstat_malloc_zone_count);
445828afddaSMateusz Guzik if (sysctlbyname("vm.malloc.zone_count", &memstat_malloc_zone_count,
446828afddaSMateusz Guzik &size, NULL, 0) < 0) {
447828afddaSMateusz Guzik return (-1);
448828afddaSMateusz Guzik }
449828afddaSMateusz Guzik
450828afddaSMateusz Guzik if (memstat_malloc_zone_count > (int)nitems(memstat_malloc_zone_sizes)) {
451828afddaSMateusz Guzik return (-1);
452828afddaSMateusz Guzik }
453828afddaSMateusz Guzik
454828afddaSMateusz Guzik size = sizeof(memstat_malloc_zone_sizes);
455828afddaSMateusz Guzik if (sysctlbyname("vm.malloc.zone_sizes", &memstat_malloc_zone_sizes,
456828afddaSMateusz Guzik &size, NULL, 0) < 0) {
457828afddaSMateusz Guzik return (-1);
458828afddaSMateusz Guzik }
459828afddaSMateusz Guzik
460828afddaSMateusz Guzik return (0);
461828afddaSMateusz Guzik }
462828afddaSMateusz Guzik
463828afddaSMateusz Guzik /*
464828afddaSMateusz Guzik * Copied from kern_malloc.c
465828afddaSMateusz Guzik *
466828afddaSMateusz Guzik * kz_zone is an array sized at compilation time, the size is exported in
467828afddaSMateusz Guzik * "numzones". Below we need to iterate kz_size.
468828afddaSMateusz Guzik */
469828afddaSMateusz Guzik struct memstat_kmemzone {
470828afddaSMateusz Guzik int kz_size;
471828afddaSMateusz Guzik const char *kz_name;
472828afddaSMateusz Guzik void *kz_zone[1];
473828afddaSMateusz Guzik };
474828afddaSMateusz Guzik
475828afddaSMateusz Guzik static int
memstat_malloc_zone_init_kvm(kvm_t * kvm)476828afddaSMateusz Guzik memstat_malloc_zone_init_kvm(kvm_t *kvm)
477828afddaSMateusz Guzik {
478828afddaSMateusz Guzik struct memstat_kmemzone *kmemzones, *kz;
479828afddaSMateusz Guzik int numzones, objsize, allocsize, ret;
480828afddaSMateusz Guzik int i;
481828afddaSMateusz Guzik
482828afddaSMateusz Guzik ret = kread_symbol(kvm, X_VM_MALLOC_ZONE_COUNT,
483828afddaSMateusz Guzik &memstat_malloc_zone_count, sizeof(memstat_malloc_zone_count), 0);
484828afddaSMateusz Guzik if (ret != 0) {
485828afddaSMateusz Guzik return (ret);
486828afddaSMateusz Guzik }
487828afddaSMateusz Guzik
488828afddaSMateusz Guzik ret = kread_symbol(kvm, X_NUMZONES, &numzones, sizeof(numzones), 0);
489828afddaSMateusz Guzik if (ret != 0) {
490828afddaSMateusz Guzik return (ret);
491828afddaSMateusz Guzik }
492828afddaSMateusz Guzik
493828afddaSMateusz Guzik objsize = __offsetof(struct memstat_kmemzone, kz_zone) +
494828afddaSMateusz Guzik sizeof(void *) * numzones;
495828afddaSMateusz Guzik
496828afddaSMateusz Guzik allocsize = objsize * memstat_malloc_zone_count;
497828afddaSMateusz Guzik kmemzones = malloc(allocsize);
498828afddaSMateusz Guzik if (kmemzones == NULL) {
499828afddaSMateusz Guzik return (MEMSTAT_ERROR_NOMEMORY);
500828afddaSMateusz Guzik }
501828afddaSMateusz Guzik ret = kread_symbol(kvm, X_KMEMZONES, kmemzones, allocsize, 0);
502828afddaSMateusz Guzik if (ret != 0) {
503828afddaSMateusz Guzik free(kmemzones);
504828afddaSMateusz Guzik return (ret);
505828afddaSMateusz Guzik }
506828afddaSMateusz Guzik
507828afddaSMateusz Guzik kz = kmemzones;
508828afddaSMateusz Guzik for (i = 0; i < (int)nitems(memstat_malloc_zone_sizes); i++) {
509828afddaSMateusz Guzik memstat_malloc_zone_sizes[i] = kz->kz_size;
510828afddaSMateusz Guzik kz = (struct memstat_kmemzone *)((char *)kz + objsize);
511828afddaSMateusz Guzik }
512828afddaSMateusz Guzik
513828afddaSMateusz Guzik free(kmemzones);
514828afddaSMateusz Guzik return (0);
515828afddaSMateusz Guzik }
516828afddaSMateusz Guzik
517828afddaSMateusz Guzik size_t
memstat_malloc_zone_get_count(void)518828afddaSMateusz Guzik memstat_malloc_zone_get_count(void)
519828afddaSMateusz Guzik {
520828afddaSMateusz Guzik
521828afddaSMateusz Guzik return (memstat_malloc_zone_count);
522828afddaSMateusz Guzik }
523828afddaSMateusz Guzik
524828afddaSMateusz Guzik size_t
memstat_malloc_zone_get_size(size_t n)525828afddaSMateusz Guzik memstat_malloc_zone_get_size(size_t n)
526828afddaSMateusz Guzik {
527828afddaSMateusz Guzik
528828afddaSMateusz Guzik if (n >= nitems(memstat_malloc_zone_sizes)) {
529828afddaSMateusz Guzik return (-1);
530828afddaSMateusz Guzik }
531828afddaSMateusz Guzik
532828afddaSMateusz Guzik return (memstat_malloc_zone_sizes[n]);
533828afddaSMateusz Guzik }
534828afddaSMateusz Guzik
535828afddaSMateusz Guzik int
memstat_malloc_zone_used(const struct memory_type * mtp,size_t n)536828afddaSMateusz Guzik memstat_malloc_zone_used(const struct memory_type *mtp, size_t n)
537828afddaSMateusz Guzik {
538828afddaSMateusz Guzik
539828afddaSMateusz Guzik if (memstat_get_sizemask(mtp) & (1 << n))
540828afddaSMateusz Guzik return (1);
541828afddaSMateusz Guzik
542828afddaSMateusz Guzik return (0);
543828afddaSMateusz Guzik }
544