1*e9024fa1Skrw/* $OpenBSD: t8.2,v 1.2 2024/07/10 09:20:33 krw Exp $ */ 2bad3ecd0Sotto/* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */ 3bad3ecd0Sotto 4bad3ecd0Sotto/* 5bad3ecd0Sotto * Copyright (c) 1987, 1991, 1993 6bad3ecd0Sotto * The Regents of the University of California. All rights reserved. 7bad3ecd0Sotto * 8bad3ecd0Sotto * Redistribution and use in source and binary forms, with or without 9bad3ecd0Sotto * modification, are permitted provided that the following conditions 10bad3ecd0Sotto * are met: 11bad3ecd0Sotto * 1. Redistributions of source code must retain the above copyright 12bad3ecd0Sotto * notice, this list of conditions and the following disclaimer. 13bad3ecd0Sotto * 2. Redistributions in binary form must reproduce the above copyright 14bad3ecd0Sotto * notice, this list of conditions and the following disclaimer in the 15bad3ecd0Sotto * documentation and/or other materials provided with the distribution. 16bad3ecd0Sotto * 3. Neither the name of the University nor the names of its contributors 17bad3ecd0Sotto * may be used to endorse or promote products derived from this software 18bad3ecd0Sotto * without specific prior written permission. 19bad3ecd0Sotto * 20bad3ecd0Sotto * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21bad3ecd0Sotto * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22bad3ecd0Sotto * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23bad3ecd0Sotto * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24bad3ecd0Sotto * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25bad3ecd0Sotto * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26bad3ecd0Sotto * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27bad3ecd0Sotto * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28bad3ecd0Sotto * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29bad3ecd0Sotto * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30bad3ecd0Sotto * SUCH DAMAGE. 31bad3ecd0Sotto * 32bad3ecd0Sotto * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 33bad3ecd0Sotto */ 34bad3ecd0Sotto 35bad3ecd0Sotto#include <sys/param.h> 36bad3ecd0Sotto#include <sys/proc.h> 37bad3ecd0Sotto#include <sys/kernel.h> 38bad3ecd0Sotto#include <sys/malloc.h> 39bad3ecd0Sotto#include <sys/systm.h> 40bad3ecd0Sotto#include <sys/sysctl.h> 41bad3ecd0Sotto 42bad3ecd0Sotto#include <uvm/uvm_extern.h> 43bad3ecd0Sotto 44bad3ecd0Sottostatic struct vm_map_intrsafe kmem_map_store; 45bad3ecd0Sottostruct vm_map *kmem_map = NULL; 46bad3ecd0Sotto 47bad3ecd0Sotto#ifdef NKMEMCLUSTERS 48bad3ecd0Sotto#error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size 49bad3ecd0Sotto#endif 50bad3ecd0Sotto 51bad3ecd0Sotto/* 52bad3ecd0Sotto * Default number of pages in kmem_map. We attempt to calculate this 53bad3ecd0Sotto * at run-time, but allow it to be either patched or set in the kernel 54bad3ecd0Sotto * config file. 55bad3ecd0Sotto */ 56bad3ecd0Sotto#ifndef NKMEMPAGES 57bad3ecd0Sotto#define NKMEMPAGES 0 58bad3ecd0Sotto#endif 59bad3ecd0Sottoint nkmempages = NKMEMPAGES; 60bad3ecd0Sotto 61bad3ecd0Sotto/* 62bad3ecd0Sotto * Defaults for lower- and upper-bounds for the kmem_map page count. 63bad3ecd0Sotto * Can be overridden by kernel config options. 64bad3ecd0Sotto */ 65bad3ecd0Sotto#ifndef NKMEMPAGES_MIN 66bad3ecd0Sotto#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 67bad3ecd0Sotto#endif 68bad3ecd0Sotto 69bad3ecd0Sotto#ifndef NKMEMPAGES_MAX 70bad3ecd0Sotto#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 71bad3ecd0Sotto#endif 72bad3ecd0Sotto 73bad3ecd0Sottostruct kmembuckets bucket[MINBUCKET + 16]; 74bad3ecd0Sottostruct kmemstats kmemstats[M_LAST]; 75bad3ecd0Sottostruct kmemusage *kmemusage; 76bad3ecd0Sottochar *kmembase, *kmemlimit; 77bad3ecd0Sottochar buckstring[16 * sizeof("123456,")]; 78bad3ecd0Sottoint buckstring_init = 0; 79*e9024fa1Skrw#if defined(KMEMSTATS) || defined(DIAGNOSTIC) 80bad3ecd0Sottochar *memname[] = INITKMEMNAMES; 81bad3ecd0Sottochar *memall = NULL; 82bad3ecd0Sottoextern struct lock sysctl_kmemlock; 83bad3ecd0Sotto#endif 84bad3ecd0Sotto 85bad3ecd0Sotto#ifdef DIAGNOSTIC 86bad3ecd0Sotto/* 87bad3ecd0Sotto * This structure provides a set of masks to catch unaligned frees. 88bad3ecd0Sotto */ 89bad3ecd0Sottoconst long addrmask[] = { 0, 90bad3ecd0Sotto 0x00000001, 0x00000003, 0x00000007, 0x0000000f, 91bad3ecd0Sotto 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff, 92bad3ecd0Sotto 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff, 93bad3ecd0Sotto 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff, 94bad3ecd0Sotto}; 95bad3ecd0Sotto 96bad3ecd0Sotto/* 97bad3ecd0Sotto * The WEIRD_ADDR is used as known text to copy into free objects so 98bad3ecd0Sotto * that modifications after frees can be detected. 99bad3ecd0Sotto */ 100bad3ecd0Sotto#define WEIRD_ADDR ((unsigned) 0xdeadbeef) 101bad3ecd0Sotto#define MAX_COPY 32 102bad3ecd0Sotto 103bad3ecd0Sotto/* 104bad3ecd0Sotto * Normally the freelist structure is used only to hold the list pointer 105bad3ecd0Sotto * for free objects. However, when running with diagnostics, the first 106bad3ecd0Sotto * 8 bytes of the structure is unused except for diagnostic information, 107bad3ecd0Sotto * and the free list pointer is at offset 8 in the structure. Since the 108bad3ecd0Sotto * first 8 bytes is the portion of the structure most often modified, this 109bad3ecd0Sotto * helps to detect memory reuse problems and avoid free list corruption. 110bad3ecd0Sotto */ 111bad3ecd0Sottostruct freelist { 112bad3ecd0Sotto int32_t spare0; 113bad3ecd0Sotto int16_t type; 114bad3ecd0Sotto int16_t spare1; 115bad3ecd0Sotto caddr_t next; 116bad3ecd0Sotto}; 117bad3ecd0Sotto#else /* !DIAGNOSTIC */ 118bad3ecd0Sottostruct freelist { 119bad3ecd0Sotto caddr_t next; 120bad3ecd0Sotto}; 121bad3ecd0Sotto#endif /* DIAGNOSTIC */ 122bad3ecd0Sotto 123bad3ecd0Sotto/* 124bad3ecd0Sotto * Allocate a block of memory 125bad3ecd0Sotto */ 126bad3ecd0Sottovoid * 127bad3ecd0Sottomalloc(size, type, flags) 128bad3ecd0Sotto unsigned long size; 129bad3ecd0Sotto int type, flags; 130bad3ecd0Sotto{ 131bad3ecd0Sotto register struct kmembuckets *kbp; 132bad3ecd0Sotto register struct kmemusage *kup; 133bad3ecd0Sotto register struct freelist *freep; 134bad3ecd0Sotto long indx, npg, allocsize; 135bad3ecd0Sotto int s; 136bad3ecd0Sotto caddr_t va, cp, savedlist; 137bad3ecd0Sotto#ifdef DIAGNOSTIC 138bad3ecd0Sotto int32_t *end, *lp; 139bad3ecd0Sotto int copysize; 140bad3ecd0Sotto char *savedtype; 141bad3ecd0Sotto#endif 142bad3ecd0Sotto#ifdef KMEMSTATS 143bad3ecd0Sotto register struct kmemstats *ksp = &kmemstats[type]; 144bad3ecd0Sotto 145bad3ecd0Sotto if (((unsigned long)type) >= M_LAST) 146bad3ecd0Sotto panic("malloc - bogus type"); 147bad3ecd0Sotto#endif 148bad3ecd0Sotto 149bad3ecd0Sotto#ifdef MALLOC_DEBUG 150bad3ecd0Sotto if (debug_malloc(size, type, flags, (void **)&va)) 151bad3ecd0Sotto return ((void *) va); 152bad3ecd0Sotto#endif 153bad3ecd0Sotto 154bad3ecd0Sotto indx = BUCKETINDX(size); 155bad3ecd0Sotto kbp = &bucket[indx]; 156bad3ecd0Sotto s = splvm(); 157bad3ecd0Sotto#ifdef KMEMSTATS 158bad3ecd0Sotto while (ksp->ks_memuse >= ksp->ks_limit) { 159bad3ecd0Sotto if (flags & M_NOWAIT) { 160bad3ecd0Sotto splx(s); 161bad3ecd0Sotto return ((void *) NULL); 162bad3ecd0Sotto } 163bad3ecd0Sotto if (ksp->ks_limblocks < 65535) 164bad3ecd0Sotto ksp->ks_limblocks++; 165bad3ecd0Sotto tsleep((caddr_t)ksp, PSWP+2, memname[type], 0); 166bad3ecd0Sotto } 167bad3ecd0Sotto ksp->ks_size |= 1 << indx; 168bad3ecd0Sotto#endif 169bad3ecd0Sotto#ifdef DIAGNOSTIC 170bad3ecd0Sotto copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY; 171bad3ecd0Sotto#endif 172bad3ecd0Sotto if (kbp->kb_next == NULL) { 173bad3ecd0Sotto kbp->kb_last = NULL; 174bad3ecd0Sotto if (size > MAXALLOCSAVE) 175bad3ecd0Sotto allocsize = round_page(size); 176bad3ecd0Sotto else 177bad3ecd0Sotto allocsize = 1 << indx; 178bad3ecd0Sotto npg = btoc(allocsize); 179bad3ecd0Sotto va = (caddr_t) uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object, 180bad3ecd0Sotto (vsize_t)ctob(npg), 181bad3ecd0Sotto (flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0); 182bad3ecd0Sotto if (va == NULL) { 183bad3ecd0Sotto /* 184bad3ecd0Sotto * Kmem_malloc() can return NULL, even if it can 185bad3ecd0Sotto * wait, if there is no map space available, because 186bad3ecd0Sotto * it can't fix that problem. Neither can we, 187bad3ecd0Sotto * right now. (We should release pages which 188bad3ecd0Sotto * are completely free and which are in buckets 189bad3ecd0Sotto * with too many free elements.) 190bad3ecd0Sotto */ 191bad3ecd0Sotto if ((flags & M_NOWAIT) == 0) 192bad3ecd0Sotto panic("malloc: out of space in kmem_map"); 193bad3ecd0Sotto splx(s); 194bad3ecd0Sotto return ((void *) NULL); 195bad3ecd0Sotto } 196bad3ecd0Sotto#ifdef KMEMSTATS 197bad3ecd0Sotto kbp->kb_total += kbp->kb_elmpercl; 198bad3ecd0Sotto#endif 199bad3ecd0Sotto kup = btokup(va); 200bad3ecd0Sotto kup->ku_indx = indx; 201bad3ecd0Sotto if (allocsize > MAXALLOCSAVE) { 202bad3ecd0Sotto if (npg > 65535) 203bad3ecd0Sotto panic("malloc: allocation too large"); 204bad3ecd0Sotto kup->ku_pagecnt = npg; 205bad3ecd0Sotto#ifdef KMEMSTATS 206bad3ecd0Sotto ksp->ks_memuse += allocsize; 207bad3ecd0Sotto#endif 208bad3ecd0Sotto goto out; 209bad3ecd0Sotto } 210bad3ecd0Sotto#ifdef KMEMSTATS 211bad3ecd0Sotto kup->ku_freecnt = kbp->kb_elmpercl; 212bad3ecd0Sotto kbp->kb_totalfree += kbp->kb_elmpercl; 213bad3ecd0Sotto#endif 214bad3ecd0Sotto /* 215bad3ecd0Sotto * Just in case we blocked while allocating memory, 216bad3ecd0Sotto * and someone else also allocated memory for this 217bad3ecd0Sotto * bucket, don't assume the list is still empty. 218bad3ecd0Sotto */ 219bad3ecd0Sotto savedlist = kbp->kb_next; 220bad3ecd0Sotto kbp->kb_next = cp = va + (npg * PAGE_SIZE) - allocsize; 221bad3ecd0Sotto for (;;) { 222bad3ecd0Sotto freep = (struct freelist *)cp; 223bad3ecd0Sotto#ifdef DIAGNOSTIC 224bad3ecd0Sotto /* 225bad3ecd0Sotto * Copy in known text to detect modification 226bad3ecd0Sotto * after freeing. 227bad3ecd0Sotto */ 228bad3ecd0Sotto end = (int32_t *)&cp[copysize]; 229bad3ecd0Sotto for (lp = (int32_t *)cp; lp < end; lp++) 230bad3ecd0Sotto *lp = WEIRD_ADDR; 231bad3ecd0Sotto freep->type = M_FREE; 232bad3ecd0Sotto#endif /* DIAGNOSTIC */ 233bad3ecd0Sotto if (cp <= va) 234bad3ecd0Sotto break; 235bad3ecd0Sotto cp -= allocsize; 236bad3ecd0Sotto freep->next = cp; 237bad3ecd0Sotto } 238bad3ecd0Sotto freep->next = savedlist; 239bad3ecd0Sotto if (kbp->kb_last == NULL) 240bad3ecd0Sotto kbp->kb_last = (caddr_t)freep; 241bad3ecd0Sotto } 242bad3ecd0Sotto va = kbp->kb_next; 243bad3ecd0Sotto kbp->kb_next = ((struct freelist *)va)->next; 244bad3ecd0Sotto#ifdef DIAGNOSTIC 245bad3ecd0Sotto freep = (struct freelist *)va; 246bad3ecd0Sotto savedtype = (unsigned)freep->type < M_LAST ? 247bad3ecd0Sotto memname[freep->type] : "???"; 248bad3ecd0Sotto if (kbp->kb_next) { 249bad3ecd0Sotto int rv; 250bad3ecd0Sotto vaddr_t addr = (vaddr_t)kbp->kb_next; 251bad3ecd0Sotto 252bad3ecd0Sotto vm_map_lock(kmem_map); 253bad3ecd0Sotto rv = uvm_map_checkprot(kmem_map, addr, 254bad3ecd0Sotto addr + sizeof(struct freelist), VM_PROT_WRITE); 255bad3ecd0Sotto vm_map_unlock(kmem_map); 256bad3ecd0Sotto 257bad3ecd0Sotto if (!rv) { 258bad3ecd0Sotto printf("%s %d of object %p size 0x%lx %s %s (invalid addr %p)\n", 259bad3ecd0Sotto "Data modified on freelist: word", 260bad3ecd0Sotto (int32_t *)&kbp->kb_next - (int32_t *)kbp, va, size, 261bad3ecd0Sotto "previous type", savedtype, kbp->kb_next); 262bad3ecd0Sotto kbp->kb_next = NULL; 263bad3ecd0Sotto } 264bad3ecd0Sotto } 265bad3ecd0Sotto 266bad3ecd0Sotto /* Fill the fields that we've used with WEIRD_ADDR */ 267bad3ecd0Sotto#if BYTE_ORDER == BIG_ENDIAN 268bad3ecd0Sotto freep->type = WEIRD_ADDR >> 16; 269bad3ecd0Sotto#endif 270bad3ecd0Sotto#if BYTE_ORDER == LITTLE_ENDIAN 271bad3ecd0Sotto freep->type = (short)WEIRD_ADDR; 272bad3ecd0Sotto#endif 273bad3ecd0Sotto end = (int32_t *)&freep->next + 274bad3ecd0Sotto (sizeof(freep->next) / sizeof(int32_t)); 275bad3ecd0Sotto for (lp = (int32_t *)&freep->next; lp < end; lp++) 276bad3ecd0Sotto *lp = WEIRD_ADDR; 277bad3ecd0Sotto 278bad3ecd0Sotto /* and check that the data hasn't been modified. */ 279bad3ecd0Sotto end = (int32_t *)&va[copysize]; 280bad3ecd0Sotto for (lp = (int32_t *)va; lp < end; lp++) { 281bad3ecd0Sotto if (*lp == WEIRD_ADDR) 282bad3ecd0Sotto continue; 283bad3ecd0Sotto printf("%s %d of object %p size 0x%lx %s %s (0x%x != 0x%x)\n", 284bad3ecd0Sotto "Data modified on freelist: word", lp - (int32_t *)va, 285bad3ecd0Sotto va, size, "previous type", savedtype, *lp, WEIRD_ADDR); 286bad3ecd0Sotto break; 287bad3ecd0Sotto } 288bad3ecd0Sotto 289bad3ecd0Sotto freep->spare0 = 0; 290bad3ecd0Sotto#endif /* DIAGNOSTIC */ 291bad3ecd0Sotto#ifdef KMEMSTATS 292bad3ecd0Sotto kup = btokup(va); 293bad3ecd0Sotto if (kup->ku_indx != indx) 294bad3ecd0Sotto panic("malloc: wrong bucket"); 295bad3ecd0Sotto if (kup->ku_freecnt == 0) 296bad3ecd0Sotto panic("malloc: lost data"); 297bad3ecd0Sotto kup->ku_freecnt--; 298bad3ecd0Sotto kbp->kb_totalfree--; 299bad3ecd0Sotto ksp->ks_memuse += 1 << indx; 300bad3ecd0Sottoout: 301bad3ecd0Sotto kbp->kb_calls++; 302bad3ecd0Sotto ksp->ks_inuse++; 303bad3ecd0Sotto ksp->ks_calls++; 304bad3ecd0Sotto if (ksp->ks_memuse > ksp->ks_maxused) 305bad3ecd0Sotto ksp->ks_maxused = ksp->ks_memuse; 306bad3ecd0Sotto#else 307bad3ecd0Sottoout: 308bad3ecd0Sotto#endif 309bad3ecd0Sotto splx(s); 310bad3ecd0Sotto return ((void *) va); 311bad3ecd0Sotto} 312bad3ecd0Sotto 313bad3ecd0Sotto/* 314bad3ecd0Sotto * Free a block of memory allocated by malloc. 315bad3ecd0Sotto */ 316bad3ecd0Sottovoid 317bad3ecd0Sottofree(addr, type) 318bad3ecd0Sotto void *addr; 319bad3ecd0Sotto int type; 320bad3ecd0Sotto{ 321bad3ecd0Sotto register struct kmembuckets *kbp; 322bad3ecd0Sotto register struct kmemusage *kup; 323bad3ecd0Sotto register struct freelist *freep; 324bad3ecd0Sotto long size; 325bad3ecd0Sotto int s; 326bad3ecd0Sotto#ifdef DIAGNOSTIC 327bad3ecd0Sotto caddr_t cp; 328bad3ecd0Sotto int32_t *end, *lp; 329bad3ecd0Sotto long alloc, copysize; 330bad3ecd0Sotto#endif 331bad3ecd0Sotto#ifdef KMEMSTATS 332bad3ecd0Sotto register struct kmemstats *ksp = &kmemstats[type]; 333bad3ecd0Sotto#endif 334bad3ecd0Sotto 335bad3ecd0Sotto#ifdef MALLOC_DEBUG 336bad3ecd0Sotto if (debug_free(addr, type)) 337bad3ecd0Sotto return; 338bad3ecd0Sotto#endif 339bad3ecd0Sotto 340bad3ecd0Sotto#ifdef DIAGNOSTIC 341bad3ecd0Sotto if (addr < (void *)kmembase || addr >= (void *)kmemlimit) 342bad3ecd0Sotto panic("free: non-malloced addr %p type %s", addr, 343bad3ecd0Sotto memname[type]); 344bad3ecd0Sotto#endif 345bad3ecd0Sotto 346bad3ecd0Sotto kup = btokup(addr); 347bad3ecd0Sotto size = 1 << kup->ku_indx; 348bad3ecd0Sotto kbp = &bucket[kup->ku_indx]; 349bad3ecd0Sotto s = splvm(); 350bad3ecd0Sotto#ifdef DIAGNOSTIC 351bad3ecd0Sotto /* 352bad3ecd0Sotto * Check for returns of data that do not point to the 353bad3ecd0Sotto * beginning of the allocation. 354bad3ecd0Sotto */ 355bad3ecd0Sotto if (size > PAGE_SIZE) 356bad3ecd0Sotto alloc = addrmask[BUCKETINDX(PAGE_SIZE)]; 357bad3ecd0Sotto else 358bad3ecd0Sotto alloc = addrmask[kup->ku_indx]; 359bad3ecd0Sotto if (((u_long)addr & alloc) != 0) 360bad3ecd0Sotto panic("free: unaligned addr %p, size %ld, type %s, mask %ld", 361bad3ecd0Sotto addr, size, memname[type], alloc); 362bad3ecd0Sotto#endif /* DIAGNOSTIC */ 363bad3ecd0Sotto if (size > MAXALLOCSAVE) { 364bad3ecd0Sotto uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt)); 365bad3ecd0Sotto#ifdef KMEMSTATS 366bad3ecd0Sotto size = kup->ku_pagecnt << PGSHIFT; 367bad3ecd0Sotto ksp->ks_memuse -= size; 368bad3ecd0Sotto kup->ku_indx = 0; 369bad3ecd0Sotto kup->ku_pagecnt = 0; 370bad3ecd0Sotto if (ksp->ks_memuse + size >= ksp->ks_limit && 371bad3ecd0Sotto ksp->ks_memuse < ksp->ks_limit) 372bad3ecd0Sotto wakeup((caddr_t)ksp); 373bad3ecd0Sotto ksp->ks_inuse--; 374bad3ecd0Sotto kbp->kb_total -= 1; 375bad3ecd0Sotto#endif 376bad3ecd0Sotto splx(s); 377bad3ecd0Sotto return; 378bad3ecd0Sotto } 379bad3ecd0Sotto freep = (struct freelist *)addr; 380bad3ecd0Sotto#ifdef DIAGNOSTIC 381bad3ecd0Sotto /* 382bad3ecd0Sotto * Check for multiple frees. Use a quick check to see if 383bad3ecd0Sotto * it looks free before laboriously searching the freelist. 384bad3ecd0Sotto */ 385bad3ecd0Sotto if (freep->spare0 == WEIRD_ADDR) { 386bad3ecd0Sotto for (cp = kbp->kb_next; cp; 387bad3ecd0Sotto cp = ((struct freelist *)cp)->next) { 388bad3ecd0Sotto if (addr != cp) 389bad3ecd0Sotto continue; 390bad3ecd0Sotto printf("multiply freed item %p\n", addr); 391bad3ecd0Sotto panic("free: duplicated free"); 392bad3ecd0Sotto } 393bad3ecd0Sotto } 394bad3ecd0Sotto /* 395bad3ecd0Sotto * Copy in known text to detect modification after freeing 396bad3ecd0Sotto * and to make it look free. Also, save the type being freed 397bad3ecd0Sotto * so we can list likely culprit if modification is detected 398bad3ecd0Sotto * when the object is reallocated. 399bad3ecd0Sotto */ 400bad3ecd0Sotto copysize = size < MAX_COPY ? size : MAX_COPY; 401bad3ecd0Sotto end = (int32_t *)&((caddr_t)addr)[copysize]; 402bad3ecd0Sotto for (lp = (int32_t *)addr; lp < end; lp++) 403bad3ecd0Sotto *lp = WEIRD_ADDR; 404bad3ecd0Sotto freep->type = type; 405bad3ecd0Sotto#endif /* DIAGNOSTIC */ 406bad3ecd0Sotto#ifdef KMEMSTATS 407bad3ecd0Sotto kup->ku_freecnt++; 408bad3ecd0Sotto if (kup->ku_freecnt >= kbp->kb_elmpercl) { 409bad3ecd0Sotto if (kup->ku_freecnt > kbp->kb_elmpercl) 410bad3ecd0Sotto panic("free: multiple frees"); 411bad3ecd0Sotto else if (kbp->kb_totalfree > kbp->kb_highwat) 412bad3ecd0Sotto kbp->kb_couldfree++; 413bad3ecd0Sotto } 414bad3ecd0Sotto kbp->kb_totalfree++; 415bad3ecd0Sotto ksp->ks_memuse -= size; 416bad3ecd0Sotto if (ksp->ks_memuse + size >= ksp->ks_limit && 417bad3ecd0Sotto ksp->ks_memuse < ksp->ks_limit) 418bad3ecd0Sotto wakeup((caddr_t)ksp); 419bad3ecd0Sotto ksp->ks_inuse--; 420bad3ecd0Sotto#endif 421bad3ecd0Sotto if (kbp->kb_next == NULL) 422bad3ecd0Sotto kbp->kb_next = addr; 423bad3ecd0Sotto else 424bad3ecd0Sotto ((struct freelist *)kbp->kb_last)->next = addr; 425bad3ecd0Sotto freep->next = NULL; 426bad3ecd0Sotto kbp->kb_last = addr; 427bad3ecd0Sotto splx(s); 428bad3ecd0Sotto} 429bad3ecd0Sotto 430bad3ecd0Sotto/* 431bad3ecd0Sotto * Compute the number of pages that kmem_map will map, that is, 432bad3ecd0Sotto * the size of the kernel malloc arena. 433bad3ecd0Sotto */ 434bad3ecd0Sottovoid 435bad3ecd0Sottokmeminit_nkmempages() 436bad3ecd0Sotto{ 437bad3ecd0Sotto int npages; 438bad3ecd0Sotto 439bad3ecd0Sotto if (nkmempages != 0) { 440bad3ecd0Sotto /* 441bad3ecd0Sotto * It's already been set (by us being here before, or 442bad3ecd0Sotto * by patching or kernel config options), bail out now. 443bad3ecd0Sotto */ 444bad3ecd0Sotto return; 445bad3ecd0Sotto } 446bad3ecd0Sotto 447bad3ecd0Sotto /* 448bad3ecd0Sotto * We use the following (simple) formula: 449bad3ecd0Sotto * 450bad3ecd0Sotto * - Starting point is physical memory / 4. 451bad3ecd0Sotto * 452bad3ecd0Sotto * - Clamp it down to NKMEMPAGES_MAX. 453bad3ecd0Sotto * 454bad3ecd0Sotto * - Round it up to NKMEMPAGES_MIN. 455bad3ecd0Sotto */ 456bad3ecd0Sotto npages = physmem / 4; 457bad3ecd0Sotto 458bad3ecd0Sotto if (npages > NKMEMPAGES_MAX) 459bad3ecd0Sotto npages = NKMEMPAGES_MAX; 460bad3ecd0Sotto 461bad3ecd0Sotto if (npages < NKMEMPAGES_MIN) 462bad3ecd0Sotto npages = NKMEMPAGES_MIN; 463bad3ecd0Sotto 464bad3ecd0Sotto nkmempages = npages; 465bad3ecd0Sotto} 466bad3ecd0Sotto 467bad3ecd0Sotto/* 468bad3ecd0Sotto * Initialize the kernel memory allocator 469bad3ecd0Sotto */ 470bad3ecd0Sottovoid 471bad3ecd0Sottokmeminit() 472bad3ecd0Sotto{ 473bad3ecd0Sotto vaddr_t base, limit; 474bad3ecd0Sotto#ifdef KMEMSTATS 475bad3ecd0Sotto long indx; 476bad3ecd0Sotto#endif 477bad3ecd0Sotto 478bad3ecd0Sotto#ifdef DIAGNOSTIC 479bad3ecd0Sotto if (sizeof(struct freelist) > (1 << MINBUCKET)) 480bad3ecd0Sotto panic("kmeminit: minbucket too small/struct freelist too big"); 481bad3ecd0Sotto#endif 482bad3ecd0Sotto 483bad3ecd0Sotto /* 484bad3ecd0Sotto * Compute the number of kmem_map pages, if we have not 485bad3ecd0Sotto * done so already. 486bad3ecd0Sotto */ 487bad3ecd0Sotto kmeminit_nkmempages(); 488bad3ecd0Sotto base = vm_map_min(kernel_map); 489bad3ecd0Sotto kmem_map = uvm_km_suballoc(kernel_map, &base, &limit, 490bad3ecd0Sotto (vsize_t)(nkmempages * PAGE_SIZE), VM_MAP_INTRSAFE, FALSE, 491bad3ecd0Sotto &kmem_map_store.vmi_map); 492bad3ecd0Sotto kmembase = (char *)base; 493bad3ecd0Sotto kmemlimit = (char *)limit; 494bad3ecd0Sotto kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map, 495bad3ecd0Sotto (vsize_t)(nkmempages * sizeof(struct kmemusage))); 496bad3ecd0Sotto#ifdef KMEMSTATS 497bad3ecd0Sotto for (indx = 0; indx < MINBUCKET + 16; indx++) { 498bad3ecd0Sotto if (1 << indx >= PAGE_SIZE) 499bad3ecd0Sotto bucket[indx].kb_elmpercl = 1; 500bad3ecd0Sotto else 501bad3ecd0Sotto bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx); 502bad3ecd0Sotto bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl; 503bad3ecd0Sotto } 504bad3ecd0Sotto for (indx = 0; indx < M_LAST; indx++) 505bad3ecd0Sotto kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10; 506bad3ecd0Sotto#endif 507bad3ecd0Sotto#ifdef MALLOC_DEBUG 508bad3ecd0Sotto debug_malloc_init(); 509bad3ecd0Sotto#endif 510bad3ecd0Sotto} 511bad3ecd0Sotto 512bad3ecd0Sotto/* 513bad3ecd0Sotto * Return kernel malloc statistics information. 514bad3ecd0Sotto */ 515bad3ecd0Sottoint 516bad3ecd0Sottosysctl_malloc(name, namelen, oldp, oldlenp, newp, newlen, p) 517bad3ecd0Sotto int *name; 518bad3ecd0Sotto u_int namelen; 519bad3ecd0Sotto void *oldp; 520bad3ecd0Sotto size_t *oldlenp; 521bad3ecd0Sotto void *newp; 522bad3ecd0Sotto size_t newlen; 523bad3ecd0Sotto struct proc *p; 524bad3ecd0Sotto{ 525bad3ecd0Sotto struct kmembuckets kb; 526bad3ecd0Sotto int i, siz; 527bad3ecd0Sotto 528bad3ecd0Sotto if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS && 529bad3ecd0Sotto name[0] != KERN_MALLOC_KMEMNAMES) 530bad3ecd0Sotto return (ENOTDIR); /* overloaded */ 531bad3ecd0Sotto 532bad3ecd0Sotto switch (name[0]) { 533bad3ecd0Sotto case KERN_MALLOC_BUCKETS: 534bad3ecd0Sotto /* Initialize the first time */ 535bad3ecd0Sotto if (buckstring_init == 0) { 536bad3ecd0Sotto buckstring_init = 1; 537bad3ecd0Sotto bzero(buckstring, sizeof(buckstring)); 538bad3ecd0Sotto for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) { 539bad3ecd0Sotto snprintf(buckstring + siz, 540bad3ecd0Sotto sizeof buckstring - siz, 541bad3ecd0Sotto "%d,", (u_int)(1<<i)); 542bad3ecd0Sotto siz += strlen(buckstring + siz); 543bad3ecd0Sotto } 544bad3ecd0Sotto /* Remove trailing comma */ 545bad3ecd0Sotto if (siz) 546bad3ecd0Sotto buckstring[siz - 1] = '\0'; 547bad3ecd0Sotto } 548bad3ecd0Sotto return (sysctl_rdstring(oldp, oldlenp, newp, buckstring)); 549bad3ecd0Sotto 550bad3ecd0Sotto case KERN_MALLOC_BUCKET: 551bad3ecd0Sotto bcopy(&bucket[BUCKETINDX(name[1])], &kb, sizeof(kb)); 552bad3ecd0Sotto kb.kb_next = kb.kb_last = 0; 553bad3ecd0Sotto return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb))); 554bad3ecd0Sotto case KERN_MALLOC_KMEMSTATS: 555bad3ecd0Sotto#ifdef KMEMSTATS 556bad3ecd0Sotto if ((name[1] < 0) || (name[1] >= M_LAST)) 557bad3ecd0Sotto return (EINVAL); 558bad3ecd0Sotto return (sysctl_rdstruct(oldp, oldlenp, newp, 559bad3ecd0Sotto &kmemstats[name[1]], sizeof(struct kmemstats))); 560bad3ecd0Sotto#else 561bad3ecd0Sotto return (EOPNOTSUPP); 562bad3ecd0Sotto#endif 563bad3ecd0Sotto case KERN_MALLOC_KMEMNAMES: 564*e9024fa1Skrw#if defined(KMEMSTATS) || defined(DIAGNOSTIC) 565bad3ecd0Sotto if (memall == NULL) { 566bad3ecd0Sotto int totlen; 567bad3ecd0Sotto 568bad3ecd0Sotto i = lockmgr(&sysctl_kmemlock, LK_EXCLUSIVE, NULL, p); 569bad3ecd0Sotto if (i) 570bad3ecd0Sotto return (i); 571bad3ecd0Sotto 572bad3ecd0Sotto /* Figure out how large a buffer we need */ 573bad3ecd0Sotto for (totlen = 0, i = 0; i < M_LAST; i++) { 574bad3ecd0Sotto if (memname[i]) 575bad3ecd0Sotto totlen += strlen(memname[i]); 576bad3ecd0Sotto totlen++; 577bad3ecd0Sotto } 578bad3ecd0Sotto memall = malloc(totlen + M_LAST, M_SYSCTL, M_WAITOK); 579bad3ecd0Sotto bzero(memall, totlen + M_LAST); 580bad3ecd0Sotto for (siz = 0, i = 0; i < M_LAST; i++) { 581bad3ecd0Sotto snprintf(memall + siz, 582bad3ecd0Sotto totlen + M_LAST - siz, 583bad3ecd0Sotto "%s,", memname[i] ? memname[i] : ""); 584bad3ecd0Sotto siz += strlen(memall + siz); 585bad3ecd0Sotto } 586bad3ecd0Sotto /* Remove trailing comma */ 587bad3ecd0Sotto if (siz) 588bad3ecd0Sotto memall[siz - 1] = '\0'; 589bad3ecd0Sotto 590bad3ecd0Sotto /* Now, convert all spaces to underscores */ 591bad3ecd0Sotto for (i = 0; i < totlen; i++) 592bad3ecd0Sotto if (memall[i] == ' ') 593bad3ecd0Sotto memall[i] = '_'; 594bad3ecd0Sotto lockmgr(&sysctl_kmemlock, LK_RELEASE, NULL, p); 595bad3ecd0Sotto } 596bad3ecd0Sotto return (sysctl_rdstring(oldp, oldlenp, newp, memall)); 597bad3ecd0Sotto#else 598bad3ecd0Sotto return (EOPNOTSUPP); 599bad3ecd0Sotto#endif 600bad3ecd0Sotto default: 601bad3ecd0Sotto return (EOPNOTSUPP); 602bad3ecd0Sotto } 603bad3ecd0Sotto /* NOTREACHED */ 604bad3ecd0Sotto} 605bad3ecd0Sotto 606bad3ecd0Sotto/* 607bad3ecd0Sotto * Round up a size to how much malloc would actually allocate. 608bad3ecd0Sotto */ 609bad3ecd0Sottosize_t 610bad3ecd0Sottomalloc_roundup(size_t sz) 611bad3ecd0Sotto{ 612bad3ecd0Sotto if (sz > MAXALLOCSAVE) 613bad3ecd0Sotto return round_page(sz); 614bad3ecd0Sotto 615bad3ecd0Sotto return (1 << BUCKETINDX(sz)); 616bad3ecd0Sotto} 617