xref: /dflybsd-src/sys/kern/kern_slaballoc.c (revision 11050bbcfb34283a9d27bc1ded58120cc5862897)
1a108bf71SMatthew Dillon /*
25b287bbaSMatthew Dillon  * KERN_SLABALLOC.C	- Kernel SLAB memory allocator
3a108bf71SMatthew Dillon  *
462938642SMatthew Dillon  * Copyright (c) 2003,2004,2010-2019 The DragonFly Project.
562938642SMatthew Dillon  * All rights reserved.
68c10bfcfSMatthew Dillon  *
78c10bfcfSMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
88c10bfcfSMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
9a108bf71SMatthew Dillon  *
10a108bf71SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
11a108bf71SMatthew Dillon  * modification, are permitted provided that the following conditions
12a108bf71SMatthew Dillon  * are met:
138c10bfcfSMatthew Dillon  *
14a108bf71SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
15a108bf71SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
16a108bf71SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
178c10bfcfSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
188c10bfcfSMatthew Dillon  *    the documentation and/or other materials provided with the
198c10bfcfSMatthew Dillon  *    distribution.
208c10bfcfSMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
218c10bfcfSMatthew Dillon  *    contributors may be used to endorse or promote products derived
228c10bfcfSMatthew Dillon  *    from this software without specific, prior written permission.
23a108bf71SMatthew Dillon  *
248c10bfcfSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
258c10bfcfSMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
268c10bfcfSMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
278c10bfcfSMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
288c10bfcfSMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
298c10bfcfSMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
308c10bfcfSMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
318c10bfcfSMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
328c10bfcfSMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
338c10bfcfSMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
348c10bfcfSMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35a108bf71SMatthew Dillon  * SUCH DAMAGE.
36a108bf71SMatthew Dillon  *
37a108bf71SMatthew Dillon  * This module implements a slab allocator drop-in replacement for the
38a108bf71SMatthew Dillon  * kernel malloc().
39a108bf71SMatthew Dillon  *
40a108bf71SMatthew Dillon  * A slab allocator reserves a ZONE for each chunk size, then lays the
41a108bf71SMatthew Dillon  * chunks out in an array within the zone.  Allocation and deallocation
42a108bf71SMatthew Dillon  * is nearly instantanious, and fragmentation/overhead losses are limited
43a108bf71SMatthew Dillon  * to a fixed worst-case amount.
44a108bf71SMatthew Dillon  *
45a108bf71SMatthew Dillon  * The downside of this slab implementation is in the chunk size
46a108bf71SMatthew Dillon  * multiplied by the number of zones.  ~80 zones * 128K = 10MB of VM per cpu.
47a108bf71SMatthew Dillon  * In a kernel implementation all this memory will be physical so
48a108bf71SMatthew Dillon  * the zone size is adjusted downward on machines with less physical
49a108bf71SMatthew Dillon  * memory.  The upside is that overhead is bounded... this is the *worst*
50a108bf71SMatthew Dillon  * case overhead.
51a108bf71SMatthew Dillon  *
52a108bf71SMatthew Dillon  * Slab management is done on a per-cpu basis and no locking or mutexes
53a108bf71SMatthew Dillon  * are required, only a critical section.  When one cpu frees memory
54a108bf71SMatthew Dillon  * belonging to another cpu's slab manager an asynchronous IPI message
55a108bf71SMatthew Dillon  * will be queued to execute the operation.   In addition, both the
56a108bf71SMatthew Dillon  * high level slab allocator and the low level zone allocator optimize
57a108bf71SMatthew Dillon  * M_ZERO requests, and the slab allocator does not have to pre initialize
58a108bf71SMatthew Dillon  * the linked list of chunks.
59a108bf71SMatthew Dillon  *
60a108bf71SMatthew Dillon  * XXX Balancing is needed between cpus.  Balance will be handled through
61a108bf71SMatthew Dillon  * asynchronous IPIs primarily by reassigning the z_Cpu ownership of chunks.
62a108bf71SMatthew Dillon  *
63a108bf71SMatthew Dillon  * XXX If we have to allocate a new zone and M_USE_RESERVE is set, use of
64a108bf71SMatthew Dillon  * the new zone should be restricted to M_USE_RESERVE requests only.
65a108bf71SMatthew Dillon  *
66a108bf71SMatthew Dillon  *	Alloc Size	Chunking        Number of zones
67a108bf71SMatthew Dillon  *	0-127		8		16
68a108bf71SMatthew Dillon  *	128-255		16		8
69a108bf71SMatthew Dillon  *	256-511		32		8
70a108bf71SMatthew Dillon  *	512-1023	64		8
71a108bf71SMatthew Dillon  *	1024-2047	128		8
72a108bf71SMatthew Dillon  *	2048-4095	256		8
73a108bf71SMatthew Dillon  *	4096-8191	512		8
74a108bf71SMatthew Dillon  *	8192-16383	1024		8
75a108bf71SMatthew Dillon  *	16384-32767	2048		8
76a108bf71SMatthew Dillon  *	(if PAGE_SIZE is 4K the maximum zone allocation is 16383)
77a108bf71SMatthew Dillon  *
7846a3f46dSMatthew Dillon  *	Allocations >= ZoneLimit go directly to kmem.
790cc5287bSSepherosa Ziehau  *	(n * PAGE_SIZE, n > 2) allocations go directly to kmem.
80a108bf71SMatthew Dillon  *
811f3dc3c2SSepherosa Ziehau  * Alignment properties:
821f3dc3c2SSepherosa Ziehau  * - All power-of-2 sized allocations are power-of-2 aligned.
831f3dc3c2SSepherosa Ziehau  * - Allocations with M_POWEROF2 are power-of-2 aligned on the nearest
841f3dc3c2SSepherosa Ziehau  *   power-of-2 round up of 'size'.
851f3dc3c2SSepherosa Ziehau  * - Non-power-of-2 sized allocations are zone chunk size aligned (see the
861f3dc3c2SSepherosa Ziehau  *   above table 'Chunking' column).
871f3dc3c2SSepherosa Ziehau  *
88a108bf71SMatthew Dillon  *			API REQUIREMENTS AND SIDE EFFECTS
89a108bf71SMatthew Dillon  *
90a108bf71SMatthew Dillon  *    To operate as a drop-in replacement to the FreeBSD-4.x malloc() we
91a108bf71SMatthew Dillon  *    have remained compatible with the following API requirements:
92a108bf71SMatthew Dillon  *
93a108bf71SMatthew Dillon  *    + malloc(0) is allowed and returns non-NULL (ahc driver)
94a108bf71SMatthew Dillon  *    + ability to allocate arbitrarily large chunks of memory
95a108bf71SMatthew Dillon  */
96a108bf71SMatthew Dillon 
97a108bf71SMatthew Dillon #include <sys/param.h>
98a108bf71SMatthew Dillon #include <sys/systm.h>
99a108bf71SMatthew Dillon #include <sys/kernel.h>
100a108bf71SMatthew Dillon #include <sys/slaballoc.h>
101a108bf71SMatthew Dillon #include <sys/mbuf.h>
102a108bf71SMatthew Dillon #include <sys/vmmeter.h>
103a108bf71SMatthew Dillon #include <sys/lock.h>
104a108bf71SMatthew Dillon #include <sys/thread.h>
105a108bf71SMatthew Dillon #include <sys/globaldata.h>
106d2182dc1SMatthew Dillon #include <sys/sysctl.h>
107f2b5daf9SMatthew Dillon #include <sys/ktr.h>
108e9dbfea1SMatthew Dillon #include <sys/kthread.h>
109e2164e29Szrj #include <sys/malloc.h>
110a108bf71SMatthew Dillon 
111a108bf71SMatthew Dillon #include <vm/vm.h>
112a108bf71SMatthew Dillon #include <vm/vm_param.h>
113a108bf71SMatthew Dillon #include <vm/vm_kern.h>
114a108bf71SMatthew Dillon #include <vm/vm_extern.h>
115a108bf71SMatthew Dillon #include <vm/vm_object.h>
116a108bf71SMatthew Dillon #include <vm/pmap.h>
117a108bf71SMatthew Dillon #include <vm/vm_map.h>
118a108bf71SMatthew Dillon #include <vm/vm_page.h>
119a108bf71SMatthew Dillon #include <vm/vm_pageout.h>
120a108bf71SMatthew Dillon 
121a108bf71SMatthew Dillon #include <machine/cpu.h>
122a108bf71SMatthew Dillon 
123a108bf71SMatthew Dillon #include <sys/thread2.h>
124a86ce0cdSMatthew Dillon #include <vm/vm_page2.h>
125a108bf71SMatthew Dillon 
12662938642SMatthew Dillon #if (__VM_CACHELINE_SIZE == 32)
12762938642SMatthew Dillon #define CAN_CACHEALIGN(sz)	((sz) >= 256)
12862938642SMatthew Dillon #elif (__VM_CACHELINE_SIZE == 64)
12962938642SMatthew Dillon #define CAN_CACHEALIGN(sz)	((sz) >= 512)
13062938642SMatthew Dillon #elif (__VM_CACHELINE_SIZE == 128)
13162938642SMatthew Dillon #define CAN_CACHEALIGN(sz)	((sz) >= 1024)
13262938642SMatthew Dillon #else
13362938642SMatthew Dillon #error "unsupported cacheline size"
13462938642SMatthew Dillon #endif
13562938642SMatthew Dillon 
136722871d3SMatthew Dillon #define btokup(z)	(&pmap_kvtom((vm_offset_t)(z))->ku_pagecnt)
137722871d3SMatthew Dillon 
1385bf48697SAggelos Economopoulos #define MEMORY_STRING	"ptr=%p type=%p size=%lu flags=%04x"
1395bf48697SAggelos Economopoulos #define MEMORY_ARGS	void *ptr, void *type, unsigned long size, int flags
140f2b5daf9SMatthew Dillon 
141f2b5daf9SMatthew Dillon #if !defined(KTR_MEMORY)
142f2b5daf9SMatthew Dillon #define KTR_MEMORY	KTR_ALL
143f2b5daf9SMatthew Dillon #endif
144f2b5daf9SMatthew Dillon KTR_INFO_MASTER(memory);
1455bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, malloc_beg, 0, "malloc begin");
1465bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, malloc_end, 1, MEMORY_STRING, MEMORY_ARGS);
1475bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_zero, 2, MEMORY_STRING, MEMORY_ARGS);
1485bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_ovsz, 3, MEMORY_STRING, MEMORY_ARGS);
1495bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_ovsz_delayed, 4, MEMORY_STRING, MEMORY_ARGS);
1505bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_chunk, 5, MEMORY_STRING, MEMORY_ARGS);
1515bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_request, 6, MEMORY_STRING, MEMORY_ARGS);
1525bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_rem_beg, 7, MEMORY_STRING, MEMORY_ARGS);
1535bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_rem_end, 8, MEMORY_STRING, MEMORY_ARGS);
1545bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_beg, 9, "free begin");
1555bf48697SAggelos Economopoulos KTR_INFO(KTR_MEMORY, memory, free_end, 10, "free end");
156f2b5daf9SMatthew Dillon 
157f2b5daf9SMatthew Dillon #define logmemory(name, ptr, type, size, flags)				\
158f2b5daf9SMatthew Dillon 	KTR_LOG(memory_ ## name, ptr, type, size, flags)
159b68ad50cSMatthew Dillon #define logmemory_quick(name)						\
160b68ad50cSMatthew Dillon 	KTR_LOG(memory_ ## name)
161f2b5daf9SMatthew Dillon 
162a108bf71SMatthew Dillon /*
163a108bf71SMatthew Dillon  * Fixed globals (not per-cpu)
164a108bf71SMatthew Dillon  */
165f21dfc74SMatthew Dillon __read_frequently static int ZoneSize;
166f21dfc74SMatthew Dillon __read_frequently static int ZoneLimit;
167f21dfc74SMatthew Dillon __read_frequently static int ZonePageCount;
168f21dfc74SMatthew Dillon __read_frequently static uintptr_t ZoneMask;
169f21dfc74SMatthew Dillon __read_frequently struct malloc_type *kmemstatistics;	/* exported to vmstat */
170a108bf71SMatthew Dillon 
17110cc6608SMatthew Dillon #if defined(INVARIANTS)
17210cc6608SMatthew Dillon static void chunk_mark_allocated(SLZone *z, void *chunk);
17310cc6608SMatthew Dillon static void chunk_mark_free(SLZone *z, void *chunk);
1745fee07e6SMatthew Dillon #else
1755fee07e6SMatthew Dillon #define chunk_mark_allocated(z, chunk)
1765fee07e6SMatthew Dillon #define chunk_mark_free(z, chunk)
17710cc6608SMatthew Dillon #endif
178a108bf71SMatthew Dillon 
179a108bf71SMatthew Dillon /*
180a108bf71SMatthew Dillon  * Misc constants.  Note that allocations that are exact multiples of
181a108bf71SMatthew Dillon  * PAGE_SIZE, or exceed the zone limit, fall through to the kmem module.
182a108bf71SMatthew Dillon  */
183ad94a851SMatthew Dillon #define ZONE_RELS_THRESH	32		/* threshold number of zones */
184a108bf71SMatthew Dillon 
18550fac53fSSascha Wildner #ifdef INVARIANTS
186a108bf71SMatthew Dillon /*
187a108bf71SMatthew Dillon  * The WEIRD_ADDR is used as known text to copy into free objects to
188a108bf71SMatthew Dillon  * try to create deterministic failure cases if the data is accessed after
189a108bf71SMatthew Dillon  * free.
190a108bf71SMatthew Dillon  */
191a108bf71SMatthew Dillon #define WEIRD_ADDR      0xdeadc0de
19250fac53fSSascha Wildner #endif
193a108bf71SMatthew Dillon #define ZERO_LENGTH_PTR	((void *)-8)
194a108bf71SMatthew Dillon 
195a108bf71SMatthew Dillon /*
196a108bf71SMatthew Dillon  * Misc global malloc buckets
197a108bf71SMatthew Dillon  */
198a108bf71SMatthew Dillon 
199a108bf71SMatthew Dillon MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
200a108bf71SMatthew Dillon MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
201a108bf71SMatthew Dillon MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
2020bd082cfSFrançois Tigeot MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations");
203a108bf71SMatthew Dillon 
204a108bf71SMatthew Dillon MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
205a108bf71SMatthew Dillon MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
206a108bf71SMatthew Dillon 
207a108bf71SMatthew Dillon /*
208a108bf71SMatthew Dillon  * Initialize the slab memory allocator.  We have to choose a zone size based
209a108bf71SMatthew Dillon  * on available physical memory.  We choose a zone side which is approximately
210a108bf71SMatthew Dillon  * 1/1024th of our memory, so if we have 128MB of ram we have a zone size of
211a108bf71SMatthew Dillon  * 128K.  The zone size is limited to the bounds set in slaballoc.h
212a108bf71SMatthew Dillon  * (typically 32K min, 128K max).
213a108bf71SMatthew Dillon  */
214a108bf71SMatthew Dillon static void kmeminit(void *dummy);
2150186d194SMatthew Dillon static void kmemfinishinit(void *dummy);
216a108bf71SMatthew Dillon 
217c7841cbeSMatthew Dillon char *ZeroPage;
218c7841cbeSMatthew Dillon 
2190186d194SMatthew Dillon SYSINIT(kmem1, SI_BOOT1_ALLOCATOR, SI_ORDER_FIRST, kmeminit, NULL);
2200186d194SMatthew Dillon SYSINIT(kmem2, SI_BOOT2_POST_SMP, SI_ORDER_FIRST, kmemfinishinit, NULL);
221a108bf71SMatthew Dillon 
222d2182dc1SMatthew Dillon #ifdef INVARIANTS
223d2182dc1SMatthew Dillon /*
224d2182dc1SMatthew Dillon  * If enabled any memory allocated without M_ZERO is initialized to -1.
225d2182dc1SMatthew Dillon  */
226f21dfc74SMatthew Dillon __read_frequently static int  use_malloc_pattern;
227d2182dc1SMatthew Dillon SYSCTL_INT(_debug, OID_AUTO, use_malloc_pattern, CTLFLAG_RW,
2280c52fa62SSamuel J. Greear 	   &use_malloc_pattern, 0,
2290c52fa62SSamuel J. Greear 	   "Initialize memory to -1 if M_ZERO not specified");
230f21dfc74SMatthew Dillon 
231f21dfc74SMatthew Dillon __read_frequently static int32_t weirdary[16];
232f21dfc74SMatthew Dillon __read_frequently static int  use_weird_array;
233f21dfc74SMatthew Dillon SYSCTL_INT(_debug, OID_AUTO, use_weird_array, CTLFLAG_RW,
234f21dfc74SMatthew Dillon 	   &use_weird_array, 0,
235f21dfc74SMatthew Dillon 	   "Initialize memory to weird values on kfree()");
236d2182dc1SMatthew Dillon #endif
237d2182dc1SMatthew Dillon 
238f21dfc74SMatthew Dillon __read_frequently static int ZoneRelsThresh = ZONE_RELS_THRESH;
239ad94a851SMatthew Dillon SYSCTL_INT(_kern, OID_AUTO, zone_cache, CTLFLAG_RW, &ZoneRelsThresh, 0, "");
240e21a70feSMatthew Dillon __read_frequently static int kzone_pollfreq = 1;
241e21a70feSMatthew Dillon SYSCTL_INT(_kern, OID_AUTO, kzone_pollfreq, CTLFLAG_RW, &kzone_pollfreq, 0, "");
242665206eeSMatthew Dillon 
2431cc73057SMatthew Dillon static struct spinlock kmemstat_spin =
2441cc73057SMatthew Dillon 			SPINLOCK_INITIALIZER(&kmemstat_spin, "malinit");
245e9dbfea1SMatthew Dillon static struct malloc_type *kmemstat_poll;
2461cc73057SMatthew Dillon 
247b12defdcSMatthew Dillon /*
248b12defdcSMatthew Dillon  * Returns the kernel memory size limit for the purposes of initializing
249b12defdcSMatthew Dillon  * various subsystem caches.  The smaller of available memory and the KVM
250b12defdcSMatthew Dillon  * memory space is returned.
251b12defdcSMatthew Dillon  *
252b12defdcSMatthew Dillon  * The size in megabytes is returned.
253b12defdcSMatthew Dillon  */
254b12defdcSMatthew Dillon size_t
kmem_lim_size(void)255b12defdcSMatthew Dillon kmem_lim_size(void)
256b12defdcSMatthew Dillon {
257b12defdcSMatthew Dillon     size_t limsize;
258b12defdcSMatthew Dillon 
259b12defdcSMatthew Dillon     limsize = (size_t)vmstats.v_page_count * PAGE_SIZE;
260b12defdcSMatthew Dillon     if (limsize > KvaSize)
261b12defdcSMatthew Dillon 	limsize = KvaSize;
262b12defdcSMatthew Dillon     return (limsize / (1024 * 1024));
263b12defdcSMatthew Dillon }
264b12defdcSMatthew Dillon 
265a108bf71SMatthew Dillon static void
kmeminit(void * dummy)266a108bf71SMatthew Dillon kmeminit(void *dummy)
267a108bf71SMatthew Dillon {
2687c457ac8SMatthew Dillon     size_t limsize;
269a108bf71SMatthew Dillon     int usesize;
27050fac53fSSascha Wildner #ifdef INVARIANTS
271a108bf71SMatthew Dillon     int i;
27250fac53fSSascha Wildner #endif
273a108bf71SMatthew Dillon 
274b12defdcSMatthew Dillon     limsize = kmem_lim_size();
275b12defdcSMatthew Dillon     usesize = (int)(limsize * 1024);	/* convert to KB */
276a108bf71SMatthew Dillon 
277b12defdcSMatthew Dillon     /*
278b12defdcSMatthew Dillon      * If the machine has a large KVM space and more than 8G of ram,
279b12defdcSMatthew Dillon      * double the zone release threshold to reduce SMP invalidations.
280b12defdcSMatthew Dillon      * If more than 16G of ram, do it again.
281b12defdcSMatthew Dillon      *
282b12defdcSMatthew Dillon      * The BIOS eats a little ram so add some slop.  We want 8G worth of
283b12defdcSMatthew Dillon      * memory sticks to trigger the first adjustment.
284b12defdcSMatthew Dillon      */
285b12defdcSMatthew Dillon     if (ZoneRelsThresh == ZONE_RELS_THRESH) {
286b12defdcSMatthew Dillon 	    if (limsize >= 7 * 1024)
287b12defdcSMatthew Dillon 		    ZoneRelsThresh *= 2;
288b12defdcSMatthew Dillon 	    if (limsize >= 15 * 1024)
289b12defdcSMatthew Dillon 		    ZoneRelsThresh *= 2;
290f21dfc74SMatthew Dillon 	    if (limsize >= 31 * 1024)
291f21dfc74SMatthew Dillon 		    ZoneRelsThresh *= 2;
292f21dfc74SMatthew Dillon 	    if (limsize >= 63 * 1024)
293f21dfc74SMatthew Dillon 		    ZoneRelsThresh *= 2;
294f21dfc74SMatthew Dillon 	    if (limsize >= 127 * 1024)
295f21dfc74SMatthew Dillon 		    ZoneRelsThresh *= 2;
296b12defdcSMatthew Dillon     }
297a108bf71SMatthew Dillon 
298b12defdcSMatthew Dillon     /*
299b12defdcSMatthew Dillon      * Calculate the zone size.  This typically calculates to
300b12defdcSMatthew Dillon      * ZALLOC_MAX_ZONE_SIZE
301b12defdcSMatthew Dillon      */
302a108bf71SMatthew Dillon     ZoneSize = ZALLOC_MIN_ZONE_SIZE;
303a108bf71SMatthew Dillon     while (ZoneSize < ZALLOC_MAX_ZONE_SIZE && (ZoneSize << 1) < usesize)
304a108bf71SMatthew Dillon 	ZoneSize <<= 1;
30546a3f46dSMatthew Dillon     ZoneLimit = ZoneSize / 4;
30646a3f46dSMatthew Dillon     if (ZoneLimit > ZALLOC_ZONE_LIMIT)
30746a3f46dSMatthew Dillon 	ZoneLimit = ZALLOC_ZONE_LIMIT;
3085fee07e6SMatthew Dillon     ZoneMask = ~(uintptr_t)(ZoneSize - 1);
309a108bf71SMatthew Dillon     ZonePageCount = ZoneSize / PAGE_SIZE;
310a108bf71SMatthew Dillon 
31150fac53fSSascha Wildner #ifdef INVARIANTS
312a3034532SVenkatesh Srinivas     for (i = 0; i < NELEM(weirdary); ++i)
313a108bf71SMatthew Dillon 	weirdary[i] = WEIRD_ADDR;
31450fac53fSSascha Wildner #endif
315a108bf71SMatthew Dillon 
316c7841cbeSMatthew Dillon     ZeroPage = kmem_slab_alloc(PAGE_SIZE, PAGE_SIZE, M_WAITOK|M_ZERO);
317c7841cbeSMatthew Dillon 
318a108bf71SMatthew Dillon     if (bootverbose)
3196ea70f76SSascha Wildner 	kprintf("Slab ZoneSize set to %dKB\n", ZoneSize / 1024);
320a108bf71SMatthew Dillon }
321a108bf71SMatthew Dillon 
322a108bf71SMatthew Dillon /*
3230186d194SMatthew Dillon  * Once we know how many cpus are configured reduce ZoneRelsThresh
3240186d194SMatthew Dillon  * based on multiples of 32 cpu threads.
3250186d194SMatthew Dillon  */
3260186d194SMatthew Dillon static void
kmemfinishinit(void * dummy)3270186d194SMatthew Dillon kmemfinishinit(void *dummy)
3280186d194SMatthew Dillon {
3290186d194SMatthew Dillon 	if (ncpus > 32)
3300186d194SMatthew Dillon 		ZoneRelsThresh = ZoneRelsThresh * 32 / ncpus;
3310186d194SMatthew Dillon }
3320186d194SMatthew Dillon 
3330186d194SMatthew Dillon /*
334c1b91053SMatthew Dillon  * (low level) Initialize slab-related elements in the globaldata structure.
335c1b91053SMatthew Dillon  *
336c1b91053SMatthew Dillon  * Occurs after kmeminit().
337c1b91053SMatthew Dillon  */
338c1b91053SMatthew Dillon void
slab_gdinit(globaldata_t gd)339c1b91053SMatthew Dillon slab_gdinit(globaldata_t gd)
340c1b91053SMatthew Dillon {
341c1b91053SMatthew Dillon 	SLGlobalData *slgd;
342c1b91053SMatthew Dillon 	int i;
343c1b91053SMatthew Dillon 
344c1b91053SMatthew Dillon 	slgd = &gd->gd_slab;
345c1b91053SMatthew Dillon 	for (i = 0; i < NZONES; ++i)
346c1b91053SMatthew Dillon 		TAILQ_INIT(&slgd->ZoneAry[i]);
347c1b91053SMatthew Dillon 	TAILQ_INIT(&slgd->FreeZones);
348c1b91053SMatthew Dillon 	TAILQ_INIT(&slgd->FreeOvZones);
349c1b91053SMatthew Dillon }
350c1b91053SMatthew Dillon 
351c1b91053SMatthew Dillon /*
352bba6a44dSMatthew Dillon  * Initialize a malloc type tracking structure.
353a108bf71SMatthew Dillon  */
354a108bf71SMatthew Dillon void
malloc_init(void * data)355a108bf71SMatthew Dillon malloc_init(void *data)
356a108bf71SMatthew Dillon {
357a108bf71SMatthew Dillon     struct malloc_type *type = data;
3583ab3ae18SMatthew Dillon     struct kmalloc_use *use;
3597c457ac8SMatthew Dillon     size_t limsize;
360e9dbfea1SMatthew Dillon     int n;
361a108bf71SMatthew Dillon 
362a108bf71SMatthew Dillon     if (type->ks_magic != M_MAGIC)
363a108bf71SMatthew Dillon 	panic("malloc type lacks magic");
364a108bf71SMatthew Dillon 
365a108bf71SMatthew Dillon     if (type->ks_limit != 0)
366a108bf71SMatthew Dillon 	return;
367a108bf71SMatthew Dillon 
368a108bf71SMatthew Dillon     if (vmstats.v_page_count == 0)
369a108bf71SMatthew Dillon 	panic("malloc_init not allowed before vm init");
370a108bf71SMatthew Dillon 
371b12defdcSMatthew Dillon     limsize = kmem_lim_size() * (1024 * 1024);
372a108bf71SMatthew Dillon     type->ks_limit = limsize / 10;
373e9dbfea1SMatthew Dillon     if (type->ks_flags & KSF_OBJSIZE)
374e9dbfea1SMatthew Dillon 	    malloc_mgt_init(type, &type->ks_mgt, type->ks_objsize);
375a108bf71SMatthew Dillon 
3763ab3ae18SMatthew Dillon     if (ncpus == 1)
3773ab3ae18SMatthew Dillon 	use = &type->ks_use0;
3783ab3ae18SMatthew Dillon     else
3793ab3ae18SMatthew Dillon 	use = kmalloc(ncpus * sizeof(*use), M_TEMP, M_WAITOK | M_ZERO);
380e9dbfea1SMatthew Dillon     if (type->ks_flags & KSF_OBJSIZE) {
381e9dbfea1SMatthew Dillon 	for (n = 0; n < ncpus; ++n)
382e9dbfea1SMatthew Dillon 	    malloc_mgt_init(type, &use[n].mgt, type->ks_objsize);
383e9dbfea1SMatthew Dillon     }
3843ab3ae18SMatthew Dillon 
3851cc73057SMatthew Dillon     spin_lock(&kmemstat_spin);
386a108bf71SMatthew Dillon     type->ks_next = kmemstatistics;
3873ab3ae18SMatthew Dillon     type->ks_use = use;
388a108bf71SMatthew Dillon     kmemstatistics = type;
3891cc73057SMatthew Dillon     spin_unlock(&kmemstat_spin);
390a108bf71SMatthew Dillon }
391a108bf71SMatthew Dillon 
392a108bf71SMatthew Dillon void
malloc_uninit(void * data)393a108bf71SMatthew Dillon malloc_uninit(void *data)
394a108bf71SMatthew Dillon {
395a108bf71SMatthew Dillon     struct malloc_type *type = data;
396a108bf71SMatthew Dillon     struct malloc_type *t;
397bba6a44dSMatthew Dillon     int i;
398337c2aaaSSascha Wildner #ifdef INVARIANTS
3991d712609SMatthew Dillon     long ttl;
400bba6a44dSMatthew Dillon #endif
401a108bf71SMatthew Dillon 
402a108bf71SMatthew Dillon     if (type->ks_magic != M_MAGIC)
403a108bf71SMatthew Dillon 	panic("malloc type lacks magic");
404a108bf71SMatthew Dillon 
405a108bf71SMatthew Dillon     if (vmstats.v_page_count == 0)
406a108bf71SMatthew Dillon 	panic("malloc_uninit not allowed before vm init");
407a108bf71SMatthew Dillon 
408a108bf71SMatthew Dillon     if (type->ks_limit == 0)
409a108bf71SMatthew Dillon 	panic("malloc_uninit on uninitialized type");
410a108bf71SMatthew Dillon 
4116c92c1f2SSepherosa Ziehau     /* Make sure that all pending kfree()s are finished. */
4126c92c1f2SSepherosa Ziehau     lwkt_synchronize_ipiqs("muninit");
4136c92c1f2SSepherosa Ziehau 
4141d712609SMatthew Dillon     /*
415e9dbfea1SMatthew Dillon      * Remove from the kmemstatistics list, blocking if the removal races
416e9dbfea1SMatthew Dillon      * the kmalloc poller.
417e9dbfea1SMatthew Dillon      *
418e9dbfea1SMatthew Dillon      * Advance kmemstat_poll if necessary.
4191d712609SMatthew Dillon      */
4201cc73057SMatthew Dillon     spin_lock(&kmemstat_spin);
421e9dbfea1SMatthew Dillon     while (type->ks_flags & KSF_POLLING)
422e9dbfea1SMatthew Dillon 	ssleep(type, &kmemstat_spin, 0, "kmuninit", 0);
423e9dbfea1SMatthew Dillon 
424e9dbfea1SMatthew Dillon     if (kmemstat_poll == type)
425e9dbfea1SMatthew Dillon 	kmemstat_poll = type->ks_next;
426e9dbfea1SMatthew Dillon 
427e9dbfea1SMatthew Dillon     if (kmemstatistics == type) {
428a108bf71SMatthew Dillon 	kmemstatistics = type->ks_next;
429a108bf71SMatthew Dillon     } else {
430a108bf71SMatthew Dillon 	for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
431a108bf71SMatthew Dillon 	    if (t->ks_next == type) {
432a108bf71SMatthew Dillon 		t->ks_next = type->ks_next;
433a108bf71SMatthew Dillon 		break;
434a108bf71SMatthew Dillon 	    }
435a108bf71SMatthew Dillon 	}
436a108bf71SMatthew Dillon     }
437a108bf71SMatthew Dillon     type->ks_next = NULL;
438a108bf71SMatthew Dillon     type->ks_limit = 0;
4391cc73057SMatthew Dillon     spin_unlock(&kmemstat_spin);
4403ab3ae18SMatthew Dillon 
441e9dbfea1SMatthew Dillon     /*
442e9dbfea1SMatthew Dillon      * memuse is only correct in aggregation.  Due to memory being allocated
443e9dbfea1SMatthew Dillon      * on one cpu and freed on another individual array entries may be
444e9dbfea1SMatthew Dillon      * negative or positive (canceling each other out).
445e9dbfea1SMatthew Dillon      */
446e9dbfea1SMatthew Dillon #ifdef INVARIANTS
447e9dbfea1SMatthew Dillon     ttl = 0;
448e9dbfea1SMatthew Dillon #endif
449e9dbfea1SMatthew Dillon     for (i = 0; i < ncpus; ++i) {
450e9dbfea1SMatthew Dillon #ifdef INVARIANTS
451e9dbfea1SMatthew Dillon 	ttl += type->ks_use[i].memuse;
452e9dbfea1SMatthew Dillon #endif
453e9dbfea1SMatthew Dillon 	if (type->ks_flags & KSF_OBJSIZE)
454e9dbfea1SMatthew Dillon 	    malloc_mgt_uninit(type, &type->ks_use[i].mgt);
455e9dbfea1SMatthew Dillon     }
456e9dbfea1SMatthew Dillon     if (type->ks_flags & KSF_OBJSIZE)
457e9dbfea1SMatthew Dillon 	malloc_mgt_uninit(type, &type->ks_mgt);
458e9dbfea1SMatthew Dillon #ifdef INVARIANTS
459e9dbfea1SMatthew Dillon     if (ttl) {
460e9dbfea1SMatthew Dillon 	kprintf("malloc_uninit: %ld bytes of '%s' still allocated on cpu %d\n",
461e9dbfea1SMatthew Dillon 	    ttl, type->ks_shortdesc, i);
462e9dbfea1SMatthew Dillon     }
463e9dbfea1SMatthew Dillon #endif
464e9dbfea1SMatthew Dillon 
4653ab3ae18SMatthew Dillon     if (type->ks_use != &type->ks_use0) {
4663ab3ae18SMatthew Dillon 	kfree(type->ks_use, M_TEMP);
4673ab3ae18SMatthew Dillon 	type->ks_use = NULL;
4683ab3ae18SMatthew Dillon     }
4693ab3ae18SMatthew Dillon }
4703ab3ae18SMatthew Dillon 
4713ab3ae18SMatthew Dillon /*
472e9dbfea1SMatthew Dillon  * Slowly polls all kmalloc zones for cleanup
473e9dbfea1SMatthew Dillon  */
474e9dbfea1SMatthew Dillon static void
kmalloc_poller_thread(void)475e9dbfea1SMatthew Dillon kmalloc_poller_thread(void)
476e9dbfea1SMatthew Dillon {
477e9dbfea1SMatthew Dillon     struct malloc_type *type;
478e9dbfea1SMatthew Dillon 
479e9dbfea1SMatthew Dillon     for (;;) {
480e9dbfea1SMatthew Dillon 	/*
481e21a70feSMatthew Dillon 	 * Very slow poll by default, adjustable with sysctl
482e9dbfea1SMatthew Dillon 	 */
483e21a70feSMatthew Dillon 	int sticks;
484e21a70feSMatthew Dillon 
485e21a70feSMatthew Dillon 	sticks = kzone_pollfreq;
486e21a70feSMatthew Dillon 	cpu_ccfence();
487e21a70feSMatthew Dillon 	if (sticks > 0)
488e21a70feSMatthew Dillon 		sticks = hz / sticks + 1;	/* approximate */
489e21a70feSMatthew Dillon 	else
490e21a70feSMatthew Dillon 		sticks = hz;			/* safety */
491e21a70feSMatthew Dillon 	tsleep((caddr_t)&sticks, 0, "kmslp", sticks);
492e9dbfea1SMatthew Dillon 
493e9dbfea1SMatthew Dillon 	/*
494e21a70feSMatthew Dillon 	 * [re]poll one zone each period.
495e9dbfea1SMatthew Dillon 	 */
496e9dbfea1SMatthew Dillon 	spin_lock(&kmemstat_spin);
497e9dbfea1SMatthew Dillon 	type = kmemstat_poll;
498e9dbfea1SMatthew Dillon 
499e9dbfea1SMatthew Dillon 	if (type == NULL)
500e9dbfea1SMatthew Dillon 		type = kmemstatistics;
501e9dbfea1SMatthew Dillon 	if (type) {
502e9dbfea1SMatthew Dillon 		atomic_set_int(&type->ks_flags, KSF_POLLING);
503e9dbfea1SMatthew Dillon 		spin_unlock(&kmemstat_spin);
504e9dbfea1SMatthew Dillon 		if (malloc_mgt_poll(type)) {
505e9dbfea1SMatthew Dillon 			spin_lock(&kmemstat_spin);
506e9dbfea1SMatthew Dillon 			kmemstat_poll = type->ks_next;
507e9dbfea1SMatthew Dillon 		} else {
508e9dbfea1SMatthew Dillon 			spin_lock(&kmemstat_spin);
509e9dbfea1SMatthew Dillon 		}
510e9dbfea1SMatthew Dillon 		atomic_clear_int(&type->ks_flags, KSF_POLLING);
511e9dbfea1SMatthew Dillon 		wakeup(type);
512e9dbfea1SMatthew Dillon 	} else {
513e9dbfea1SMatthew Dillon 		kmemstat_poll = NULL;
514e9dbfea1SMatthew Dillon 	}
515e9dbfea1SMatthew Dillon 	spin_unlock(&kmemstat_spin);
516e9dbfea1SMatthew Dillon     }
517e9dbfea1SMatthew Dillon }
518e9dbfea1SMatthew Dillon 
519e9dbfea1SMatthew Dillon static struct thread *kmalloc_poller_td;
520e9dbfea1SMatthew Dillon static struct kproc_desc kmalloc_poller_kp = {
521e9dbfea1SMatthew Dillon         "kmalloc_poller",
522e9dbfea1SMatthew Dillon 	kmalloc_poller_thread,
523e9dbfea1SMatthew Dillon 	&kmalloc_poller_td
524e9dbfea1SMatthew Dillon };
525e9dbfea1SMatthew Dillon SYSINIT(kmalloc_polller, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST,
526e9dbfea1SMatthew Dillon 	kproc_start, &kmalloc_poller_kp);
527e9dbfea1SMatthew Dillon 
528e9dbfea1SMatthew Dillon /*
5293ab3ae18SMatthew Dillon  * Reinitialize all installed malloc regions after ncpus has been
5303ab3ae18SMatthew Dillon  * determined.  type->ks_use0 is initially set to &type->ks_use0,
5313ab3ae18SMatthew Dillon  * this function will dynamically allocate it as appropriate for ncpus.
5323ab3ae18SMatthew Dillon  */
5333ab3ae18SMatthew Dillon void
malloc_reinit_ncpus(void)5343ab3ae18SMatthew Dillon malloc_reinit_ncpus(void)
5353ab3ae18SMatthew Dillon {
5363ab3ae18SMatthew Dillon     struct malloc_type *t;
5373ab3ae18SMatthew Dillon     struct kmalloc_use *use;
538e9dbfea1SMatthew Dillon     int n;
5393ab3ae18SMatthew Dillon 
5403ab3ae18SMatthew Dillon     /*
5413ab3ae18SMatthew Dillon      * If only one cpu we can leave ks_use set to ks_use0
5423ab3ae18SMatthew Dillon      */
5433ab3ae18SMatthew Dillon     if (ncpus <= 1)
5443ab3ae18SMatthew Dillon 	return;
5453ab3ae18SMatthew Dillon 
5463ab3ae18SMatthew Dillon     /*
5473ab3ae18SMatthew Dillon      * Expand ks_use for all kmalloc blocks
5483ab3ae18SMatthew Dillon      */
5497d5349e1SMatthew Dillon     for (t = kmemstatistics; t; t = t->ks_next) {
5503ab3ae18SMatthew Dillon 	KKASSERT(t->ks_use == &t->ks_use0);
5513ab3ae18SMatthew Dillon 	t->ks_use = kmalloc(sizeof(*use) * ncpus, M_TEMP, M_WAITOK|M_ZERO);
5523ab3ae18SMatthew Dillon 	t->ks_use[0] = t->ks_use0;
553e9dbfea1SMatthew Dillon 	if (t->ks_flags & KSF_OBJSIZE) {
554e9dbfea1SMatthew Dillon 	    malloc_mgt_relocate(&t->ks_use0.mgt, &t->ks_use[0].mgt);
555e9dbfea1SMatthew Dillon 	    for (n = 1; n < ncpus; ++n)
556e9dbfea1SMatthew Dillon 		malloc_mgt_init(t, &t->ks_use[n].mgt, t->ks_objsize);
557e9dbfea1SMatthew Dillon 	}
5583ab3ae18SMatthew Dillon     }
559a108bf71SMatthew Dillon }
560a108bf71SMatthew Dillon 
561a108bf71SMatthew Dillon /*
56240153c65SMatthew Dillon  * Increase the kmalloc pool limit for the specified pool.  No changes
56340153c65SMatthew Dillon  * are the made if the pool would shrink.
56440153c65SMatthew Dillon  */
56540153c65SMatthew Dillon void
kmalloc_raise_limit(struct malloc_type * type,size_t bytes)56640153c65SMatthew Dillon kmalloc_raise_limit(struct malloc_type *type, size_t bytes)
56740153c65SMatthew Dillon {
5683ab3ae18SMatthew Dillon     KKASSERT(type->ks_limit != 0);
5697c457ac8SMatthew Dillon     if (bytes == 0)
5707c457ac8SMatthew Dillon 	bytes = KvaSize;
57140153c65SMatthew Dillon     if (type->ks_limit < bytes)
57240153c65SMatthew Dillon 	type->ks_limit = bytes;
57340153c65SMatthew Dillon }
57440153c65SMatthew Dillon 
57592914d5dSMatthew Dillon void
kmalloc_set_unlimited(struct malloc_type * type)57692914d5dSMatthew Dillon kmalloc_set_unlimited(struct malloc_type *type)
57792914d5dSMatthew Dillon {
57892914d5dSMatthew Dillon     type->ks_limit = kmem_lim_size() * (1024 * 1024);
57992914d5dSMatthew Dillon }
58092914d5dSMatthew Dillon 
58140153c65SMatthew Dillon /*
582ebe36cfeSMatthew Dillon  * Dynamically create a malloc pool.  This function is a NOP if *typep is
583ebe36cfeSMatthew Dillon  * already non-NULL.
584ebe36cfeSMatthew Dillon  */
585ebe36cfeSMatthew Dillon void
kmalloc_create(struct malloc_type ** typep,const char * descr)586ebe36cfeSMatthew Dillon kmalloc_create(struct malloc_type **typep, const char *descr)
587ebe36cfeSMatthew Dillon {
588ebe36cfeSMatthew Dillon 	struct malloc_type *type;
589ebe36cfeSMatthew Dillon 
590ebe36cfeSMatthew Dillon 	if (*typep == NULL) {
591ebe36cfeSMatthew Dillon 		type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
592ebe36cfeSMatthew Dillon 		type->ks_magic = M_MAGIC;
593ebe36cfeSMatthew Dillon 		type->ks_shortdesc = descr;
594ebe36cfeSMatthew Dillon 		malloc_init(type);
595ebe36cfeSMatthew Dillon 		*typep = type;
596ebe36cfeSMatthew Dillon 	}
597ebe36cfeSMatthew Dillon }
598ebe36cfeSMatthew Dillon 
599e9dbfea1SMatthew Dillon void
_kmalloc_create_obj(struct malloc_type ** typep,const char * descr,size_t objsize)600e9dbfea1SMatthew Dillon _kmalloc_create_obj(struct malloc_type **typep, const char *descr,
601e9dbfea1SMatthew Dillon 		    size_t objsize)
602e9dbfea1SMatthew Dillon {
603e9dbfea1SMatthew Dillon 	struct malloc_type *type;
604e9dbfea1SMatthew Dillon 
605e9dbfea1SMatthew Dillon 	if (*typep == NULL) {
606e9dbfea1SMatthew Dillon 		type = kmalloc(sizeof(*type), M_TEMP, M_WAITOK | M_ZERO);
607e9dbfea1SMatthew Dillon 		type->ks_magic = M_MAGIC;
608e9dbfea1SMatthew Dillon 		type->ks_shortdesc = descr;
609e9dbfea1SMatthew Dillon 		type->ks_flags = KSF_OBJSIZE;
610e9dbfea1SMatthew Dillon 		type->ks_objsize = __VM_CACHELINE_ALIGN(objsize);
611e9dbfea1SMatthew Dillon 		malloc_init(type);
612e9dbfea1SMatthew Dillon 		*typep = type;
613e9dbfea1SMatthew Dillon 	}
614e9dbfea1SMatthew Dillon }
615e9dbfea1SMatthew Dillon 
616ebe36cfeSMatthew Dillon /*
617ebe36cfeSMatthew Dillon  * Destroy a dynamically created malloc pool.  This function is a NOP if
618ebe36cfeSMatthew Dillon  * the pool has already been destroyed.
61956c9ecc8SMatthew Dillon  *
62056c9ecc8SMatthew Dillon  * WARNING! For kmalloc_obj's, the exis state for related slabs is ignored,
62156c9ecc8SMatthew Dillon  *	    only call once all references are 100% known to be gone.
622ebe36cfeSMatthew Dillon  */
623ebe36cfeSMatthew Dillon void
kmalloc_destroy(struct malloc_type ** typep)624ebe36cfeSMatthew Dillon kmalloc_destroy(struct malloc_type **typep)
625ebe36cfeSMatthew Dillon {
626ebe36cfeSMatthew Dillon 	if (*typep != NULL) {
627ebe36cfeSMatthew Dillon 		malloc_uninit(*typep);
628ebe36cfeSMatthew Dillon 		kfree(*typep, M_TEMP);
629ebe36cfeSMatthew Dillon 		*typep = NULL;
630ebe36cfeSMatthew Dillon 	}
631ebe36cfeSMatthew Dillon }
632ebe36cfeSMatthew Dillon 
633ebe36cfeSMatthew Dillon /*
634a108bf71SMatthew Dillon  * Calculate the zone index for the allocation request size and set the
635a108bf71SMatthew Dillon  * allocation request size to that particular zone's chunk size.
636a108bf71SMatthew Dillon  */
637a108bf71SMatthew Dillon static __inline int
zoneindex(unsigned long * bytes,unsigned long * align)63881dedbc2SSepherosa Ziehau zoneindex(unsigned long *bytes, unsigned long *align)
639a108bf71SMatthew Dillon {
640a108bf71SMatthew Dillon     unsigned int n = (unsigned int)*bytes;	/* unsigned for shift opt */
6413175d638SSepherosa Ziehau 
642a108bf71SMatthew Dillon     if (n < 128) {
643a108bf71SMatthew Dillon 	*bytes = n = (n + 7) & ~7;
64481dedbc2SSepherosa Ziehau 	*align = 8;
645a108bf71SMatthew Dillon 	return(n / 8 - 1);		/* 8 byte chunks, 16 zones */
646a108bf71SMatthew Dillon     }
647a108bf71SMatthew Dillon     if (n < 256) {
648a108bf71SMatthew Dillon 	*bytes = n = (n + 15) & ~15;
64981dedbc2SSepherosa Ziehau 	*align = 16;
650a108bf71SMatthew Dillon 	return(n / 16 + 7);
651a108bf71SMatthew Dillon     }
652a108bf71SMatthew Dillon     if (n < 8192) {
653a108bf71SMatthew Dillon 	if (n < 512) {
654a108bf71SMatthew Dillon 	    *bytes = n = (n + 31) & ~31;
65581dedbc2SSepherosa Ziehau 	    *align = 32;
656a108bf71SMatthew Dillon 	    return(n / 32 + 15);
657a108bf71SMatthew Dillon 	}
658a108bf71SMatthew Dillon 	if (n < 1024) {
659a108bf71SMatthew Dillon 	    *bytes = n = (n + 63) & ~63;
66081dedbc2SSepherosa Ziehau 	    *align = 64;
661a108bf71SMatthew Dillon 	    return(n / 64 + 23);
662a108bf71SMatthew Dillon 	}
663a108bf71SMatthew Dillon 	if (n < 2048) {
664a108bf71SMatthew Dillon 	    *bytes = n = (n + 127) & ~127;
66581dedbc2SSepherosa Ziehau 	    *align = 128;
666a108bf71SMatthew Dillon 	    return(n / 128 + 31);
667a108bf71SMatthew Dillon 	}
668a108bf71SMatthew Dillon 	if (n < 4096) {
669a108bf71SMatthew Dillon 	    *bytes = n = (n + 255) & ~255;
67081dedbc2SSepherosa Ziehau 	    *align = 256;
671a108bf71SMatthew Dillon 	    return(n / 256 + 39);
672a108bf71SMatthew Dillon 	}
673a108bf71SMatthew Dillon 	*bytes = n = (n + 511) & ~511;
67481dedbc2SSepherosa Ziehau 	*align = 512;
675a108bf71SMatthew Dillon 	return(n / 512 + 47);
676a108bf71SMatthew Dillon     }
677a108bf71SMatthew Dillon #if ZALLOC_ZONE_LIMIT > 8192
678a108bf71SMatthew Dillon     if (n < 16384) {
679a108bf71SMatthew Dillon 	*bytes = n = (n + 1023) & ~1023;
68081dedbc2SSepherosa Ziehau 	*align = 1024;
681a108bf71SMatthew Dillon 	return(n / 1024 + 55);
682a108bf71SMatthew Dillon     }
683a108bf71SMatthew Dillon #endif
684a108bf71SMatthew Dillon #if ZALLOC_ZONE_LIMIT > 16384
685a108bf71SMatthew Dillon     if (n < 32768) {
686a108bf71SMatthew Dillon 	*bytes = n = (n + 2047) & ~2047;
68781dedbc2SSepherosa Ziehau 	*align = 2048;
688a108bf71SMatthew Dillon 	return(n / 2048 + 63);
689a108bf71SMatthew Dillon     }
690a108bf71SMatthew Dillon #endif
691a108bf71SMatthew Dillon     panic("Unexpected byte count %d", n);
692a108bf71SMatthew Dillon     return(0);
693a108bf71SMatthew Dillon }
694a108bf71SMatthew Dillon 
695bad949c8SSepherosa Ziehau static __inline void
clean_zone_rchunks(SLZone * z)696c2f95d8aSMatthew Dillon clean_zone_rchunks(SLZone *z)
697c2f95d8aSMatthew Dillon {
698c2f95d8aSMatthew Dillon     SLChunk *bchunk;
699c2f95d8aSMatthew Dillon 
700c2f95d8aSMatthew Dillon     while ((bchunk = z->z_RChunks) != NULL) {
701c2f95d8aSMatthew Dillon 	cpu_ccfence();
702c2f95d8aSMatthew Dillon 	if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, NULL)) {
703c2f95d8aSMatthew Dillon 	    *z->z_LChunksp = bchunk;
704c2f95d8aSMatthew Dillon 	    while (bchunk) {
705c2f95d8aSMatthew Dillon 		chunk_mark_free(z, bchunk);
706c2f95d8aSMatthew Dillon 		z->z_LChunksp = &bchunk->c_Next;
707c2f95d8aSMatthew Dillon 		bchunk = bchunk->c_Next;
708c2f95d8aSMatthew Dillon 		++z->z_NFree;
709c2f95d8aSMatthew Dillon 	    }
710c2f95d8aSMatthew Dillon 	    break;
711c2f95d8aSMatthew Dillon 	}
712c2f95d8aSMatthew Dillon 	/* retry */
713c2f95d8aSMatthew Dillon     }
714c2f95d8aSMatthew Dillon }
715c2f95d8aSMatthew Dillon 
716c2f95d8aSMatthew Dillon /*
717c1b91053SMatthew Dillon  * If the zone becomes totally free and is not the only zone listed for a
718c1b91053SMatthew Dillon  * chunk size we move it to the FreeZones list.  We always leave at least
719c1b91053SMatthew Dillon  * one zone per chunk size listed, even if it is freeable.
720c1b91053SMatthew Dillon  *
721c1b91053SMatthew Dillon  * Do not move the zone if there is an IPI in_flight (z_RCount != 0),
722c1b91053SMatthew Dillon  * otherwise MP races can result in our free_remote code accessing a
723c1b91053SMatthew Dillon  * destroyed zone.  The remote end interlocks z_RCount with z_RChunks
724c1b91053SMatthew Dillon  * so one has to test both z_NFree and z_RCount.
725c1b91053SMatthew Dillon  *
726c06ca5eeSMatthew Dillon  * Since this code can be called from an IPI callback, do *NOT* try to mess
727c1b91053SMatthew Dillon  * with kernel_map here.  Hysteresis will be performed at kmalloc() time.
728c2f95d8aSMatthew Dillon  */
729bad949c8SSepherosa Ziehau static __inline SLZone *
check_zone_free(SLGlobalData * slgd,SLZone * z)730c2f95d8aSMatthew Dillon check_zone_free(SLGlobalData *slgd, SLZone *z)
731c2f95d8aSMatthew Dillon {
732243dbb26SMatthew Dillon     SLZone *znext;
733c1b91053SMatthew Dillon 
734c1b91053SMatthew Dillon     znext = TAILQ_NEXT(z, z_Entry);
735c1b91053SMatthew Dillon     if (z->z_NFree == z->z_NMax && z->z_RCount == 0 &&
7363175d638SSepherosa Ziehau 	(TAILQ_FIRST(&slgd->ZoneAry[z->z_ZoneIndex]) != z || znext)) {
737c2f95d8aSMatthew Dillon 	int *kup;
738c2f95d8aSMatthew Dillon 
739c1b91053SMatthew Dillon 	TAILQ_REMOVE(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
740243dbb26SMatthew Dillon 
741c2f95d8aSMatthew Dillon 	z->z_Magic = -1;
742c1b91053SMatthew Dillon 	TAILQ_INSERT_HEAD(&slgd->FreeZones, z, z_Entry);
743c2f95d8aSMatthew Dillon 	++slgd->NFreeZones;
744c2f95d8aSMatthew Dillon 	kup = btokup(z);
745c2f95d8aSMatthew Dillon 	*kup = 0;
746c2f95d8aSMatthew Dillon     }
747c1b91053SMatthew Dillon     return znext;
748c2f95d8aSMatthew Dillon }
749c2f95d8aSMatthew Dillon 
750bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
751bbb201fdSMatthew Dillon /*
752bbb201fdSMatthew Dillon  * Used to debug memory corruption issues.  Record up to (typically 32)
753bbb201fdSMatthew Dillon  * allocation sources for this zone (for a particular chunk size).
754bbb201fdSMatthew Dillon  */
755bbb201fdSMatthew Dillon 
756bbb201fdSMatthew Dillon static void
slab_record_source(SLZone * z,const char * file,int line)757bbb201fdSMatthew Dillon slab_record_source(SLZone *z, const char *file, int line)
758bbb201fdSMatthew Dillon {
759bbb201fdSMatthew Dillon     int i;
760bbb201fdSMatthew Dillon     int b = line & (SLAB_DEBUG_ENTRIES - 1);
761bbb201fdSMatthew Dillon 
762bbb201fdSMatthew Dillon     i = b;
763bbb201fdSMatthew Dillon     do {
764bbb201fdSMatthew Dillon 	if (z->z_Sources[i].file == file && z->z_Sources[i].line == line)
765bbb201fdSMatthew Dillon 		return;
766bbb201fdSMatthew Dillon 	if (z->z_Sources[i].file == NULL)
767bbb201fdSMatthew Dillon 		break;
768bbb201fdSMatthew Dillon 	i = (i + 1) & (SLAB_DEBUG_ENTRIES - 1);
769bbb201fdSMatthew Dillon     } while (i != b);
770bbb201fdSMatthew Dillon     z->z_Sources[i].file = file;
771bbb201fdSMatthew Dillon     z->z_Sources[i].line = line;
772bbb201fdSMatthew Dillon }
773bbb201fdSMatthew Dillon 
774bbb201fdSMatthew Dillon #endif
775bbb201fdSMatthew Dillon 
7761e57f867SSepherosa Ziehau static __inline unsigned long
powerof2_size(unsigned long size)7771e57f867SSepherosa Ziehau powerof2_size(unsigned long size)
7781e57f867SSepherosa Ziehau {
77918042a75SSepherosa Ziehau 	int i;
7801e57f867SSepherosa Ziehau 
78118042a75SSepherosa Ziehau 	if (size == 0 || powerof2(size))
78218042a75SSepherosa Ziehau 		return size;
7831e57f867SSepherosa Ziehau 
7841e57f867SSepherosa Ziehau 	i = flsl(size);
7851e57f867SSepherosa Ziehau 	return (1UL << i);
7861e57f867SSepherosa Ziehau }
7871e57f867SSepherosa Ziehau 
788a108bf71SMatthew Dillon /*
7895fee07e6SMatthew Dillon  * kmalloc()	(SLAB ALLOCATOR)
790a108bf71SMatthew Dillon  *
791a108bf71SMatthew Dillon  *	Allocate memory via the slab allocator.  If the request is too large,
792a108bf71SMatthew Dillon  *	or if it page-aligned beyond a certain size, we fall back to the
793a108bf71SMatthew Dillon  *	KMEM subsystem.  A SLAB tracking descriptor must be specified, use
794a108bf71SMatthew Dillon  *	&SlabMisc if you don't care.
795a108bf71SMatthew Dillon  *
7968cb2bf45SJoerg Sonnenberger  *	M_RNOWAIT	- don't block.
7978cb2bf45SJoerg Sonnenberger  *	M_NULLOK	- return NULL instead of blocking.
798a108bf71SMatthew Dillon  *	M_ZERO		- zero the returned memory.
799dc1fd4b3SMatthew Dillon  *	M_USE_RESERVE	- allow greater drawdown of the free list
800dc1fd4b3SMatthew Dillon  *	M_USE_INTERRUPT_RESERVE - allow the freelist to be exhausted
8011e57f867SSepherosa Ziehau  *	M_POWEROF2	- roundup size to the nearest power of 2
8025b287bbaSMatthew Dillon  *
8035b287bbaSMatthew Dillon  * MPSAFE
804a108bf71SMatthew Dillon  */
805bbb201fdSMatthew Dillon 
806c8320d08SMatthew Dillon /* don't let kmalloc macro mess up function declaration */
807c8320d08SMatthew Dillon #undef kmalloc
808c8320d08SMatthew Dillon 
809bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
810bbb201fdSMatthew Dillon void *
_kmalloc_debug(unsigned long size,struct malloc_type * type,int flags,const char * file,int line)811e9dbfea1SMatthew Dillon _kmalloc_debug(unsigned long size, struct malloc_type *type, int flags,
812bbb201fdSMatthew Dillon 	      const char *file, int line)
813bbb201fdSMatthew Dillon #else
814a108bf71SMatthew Dillon void *
815e9dbfea1SMatthew Dillon _kmalloc(unsigned long size, struct malloc_type *type, int flags)
816bbb201fdSMatthew Dillon #endif
817a108bf71SMatthew Dillon {
818a108bf71SMatthew Dillon     SLZone *z;
819a108bf71SMatthew Dillon     SLChunk *chunk;
820a108bf71SMatthew Dillon     SLGlobalData *slgd;
821bba6a44dSMatthew Dillon     struct globaldata *gd;
82281dedbc2SSepherosa Ziehau     unsigned long align;
823a108bf71SMatthew Dillon     int zi;
824d2182dc1SMatthew Dillon #ifdef INVARIANTS
825d2182dc1SMatthew Dillon     int i;
826d2182dc1SMatthew Dillon #endif
827a108bf71SMatthew Dillon 
828b68ad50cSMatthew Dillon     logmemory_quick(malloc_beg);
829bba6a44dSMatthew Dillon     gd = mycpu;
830bba6a44dSMatthew Dillon     slgd = &gd->gd_slab;
831a108bf71SMatthew Dillon 
832a108bf71SMatthew Dillon     /*
833a108bf71SMatthew Dillon      * XXX silly to have this in the critical path.
834a108bf71SMatthew Dillon      */
8353ab3ae18SMatthew Dillon     KKASSERT(type->ks_limit != 0);
836fc7075bbSSepherosa Ziehau     ++type->ks_use[gd->gd_cpuid].calls;
837a108bf71SMatthew Dillon 
83862938642SMatthew Dillon     /*
83962938642SMatthew Dillon      * Flagged for cache-alignment
84062938642SMatthew Dillon      */
84162938642SMatthew Dillon     if (flags & M_CACHEALIGN) {
84262938642SMatthew Dillon 	if (size < __VM_CACHELINE_SIZE)
84362938642SMatthew Dillon 		size = __VM_CACHELINE_SIZE;
84462938642SMatthew Dillon 	else if (!CAN_CACHEALIGN(size))
84562938642SMatthew Dillon 		flags |= M_POWEROF2;
84662938642SMatthew Dillon     }
84762938642SMatthew Dillon 
84862938642SMatthew Dillon     /*
84962938642SMatthew Dillon      * Flagged to force nearest power-of-2 (higher or same)
85062938642SMatthew Dillon      */
8511e57f867SSepherosa Ziehau     if (flags & M_POWEROF2)
8521e57f867SSepherosa Ziehau 	size = powerof2_size(size);
8531e57f867SSepherosa Ziehau 
854a108bf71SMatthew Dillon     /*
85538e34349SMatthew Dillon      * Handle the case where the limit is reached.  Panic if we can't return
85638e34349SMatthew Dillon      * NULL.  The original malloc code looped, but this tended to
857a108bf71SMatthew Dillon      * simply deadlock the computer.
85838e34349SMatthew Dillon      *
85938e34349SMatthew Dillon      * ks_loosememuse is an up-only limit that is NOT MP-synchronized, used
86038e34349SMatthew Dillon      * to determine if a more complete limit check should be done.  The
8618edfbc5eSMatthew Dillon      * actual memory use is tracked via ks_use[cpu].memuse.
862a108bf71SMatthew Dillon      */
863bba6a44dSMatthew Dillon     while (type->ks_loosememuse >= type->ks_limit) {
864bba6a44dSMatthew Dillon 	int i;
865bba6a44dSMatthew Dillon 	long ttl;
866bba6a44dSMatthew Dillon 
867bba6a44dSMatthew Dillon 	for (i = ttl = 0; i < ncpus; ++i)
8688edfbc5eSMatthew Dillon 	    ttl += type->ks_use[i].memuse;
86938e34349SMatthew Dillon 	type->ks_loosememuse = ttl;	/* not MP synchronized */
87028135cc2SMatthew Dillon 	if ((ssize_t)ttl < 0)		/* deal with occassional race */
87128135cc2SMatthew Dillon 		ttl = 0;
872bba6a44dSMatthew Dillon 	if (ttl >= type->ks_limit) {
873f2b5daf9SMatthew Dillon 	    if (flags & M_NULLOK) {
8745fee07e6SMatthew Dillon 		logmemory(malloc_end, NULL, type, size, flags);
875a108bf71SMatthew Dillon 		return(NULL);
876f2b5daf9SMatthew Dillon 	    }
877a108bf71SMatthew Dillon 	    panic("%s: malloc limit exceeded", type->ks_shortdesc);
878a108bf71SMatthew Dillon 	}
879bba6a44dSMatthew Dillon     }
880a108bf71SMatthew Dillon 
881a108bf71SMatthew Dillon     /*
882a108bf71SMatthew Dillon      * Handle the degenerate size == 0 case.  Yes, this does happen.
883a108bf71SMatthew Dillon      * Return a special pointer.  This is to maintain compatibility with
884a108bf71SMatthew Dillon      * the original malloc implementation.  Certain devices, such as the
885a108bf71SMatthew Dillon      * adaptec driver, not only allocate 0 bytes, they check for NULL and
886a108bf71SMatthew Dillon      * also realloc() later on.  Joy.
887a108bf71SMatthew Dillon      */
888f2b5daf9SMatthew Dillon     if (size == 0) {
8895fee07e6SMatthew Dillon 	logmemory(malloc_end, ZERO_LENGTH_PTR, type, size, flags);
890a108bf71SMatthew Dillon 	return(ZERO_LENGTH_PTR);
891f2b5daf9SMatthew Dillon     }
892a108bf71SMatthew Dillon 
893a108bf71SMatthew Dillon     /*
894a7cf0021SMatthew Dillon      * Handle hysteresis from prior frees here in malloc().  We cannot
895a7cf0021SMatthew Dillon      * safely manipulate the kernel_map in free() due to free() possibly
896a7cf0021SMatthew Dillon      * being called via an IPI message or from sensitive interrupt code.
8975fee07e6SMatthew Dillon      *
8985fee07e6SMatthew Dillon      * NOTE: ku_pagecnt must be cleared before we free the slab or we
8995fee07e6SMatthew Dillon      *	     might race another cpu allocating the kva and setting
9005fee07e6SMatthew Dillon      *	     ku_pagecnt.
901a7cf0021SMatthew Dillon      */
902ad94a851SMatthew Dillon     while (slgd->NFreeZones > ZoneRelsThresh && (flags & M_RNOWAIT) == 0) {
903a7cf0021SMatthew Dillon 	crit_enter();
904ad94a851SMatthew Dillon 	if (slgd->NFreeZones > ZoneRelsThresh) {	/* crit sect race */
905722871d3SMatthew Dillon 	    int *kup;
9065fee07e6SMatthew Dillon 
907c1b91053SMatthew Dillon 	    z = TAILQ_LAST(&slgd->FreeZones, SLZoneList);
908c1b91053SMatthew Dillon 	    KKASSERT(z != NULL);
909c1b91053SMatthew Dillon 	    TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
910a7cf0021SMatthew Dillon 	    --slgd->NFreeZones;
9115fee07e6SMatthew Dillon 	    kup = btokup(z);
912722871d3SMatthew Dillon 	    *kup = 0;
913a7cf0021SMatthew Dillon 	    kmem_slab_free(z, ZoneSize);	/* may block */
914a7cf0021SMatthew Dillon 	}
915a7cf0021SMatthew Dillon 	crit_exit();
916a7cf0021SMatthew Dillon     }
9175fee07e6SMatthew Dillon 
91846a3f46dSMatthew Dillon     /*
9195fee07e6SMatthew Dillon      * XXX handle oversized frees that were queued from kfree().
92046a3f46dSMatthew Dillon      */
921c1b91053SMatthew Dillon     while (TAILQ_FIRST(&slgd->FreeOvZones) && (flags & M_RNOWAIT) == 0) {
92246a3f46dSMatthew Dillon 	crit_enter();
923c1b91053SMatthew Dillon 	if ((z = TAILQ_LAST(&slgd->FreeOvZones, SLZoneList)) != NULL) {
9247e83df33SMatthew Dillon 	    vm_size_t tsize;
9257e83df33SMatthew Dillon 
92646a3f46dSMatthew Dillon 	    KKASSERT(z->z_Magic == ZALLOC_OVSZ_MAGIC);
927c1b91053SMatthew Dillon 	    TAILQ_REMOVE(&slgd->FreeOvZones, z, z_Entry);
9287e83df33SMatthew Dillon 	    tsize = z->z_ChunkSize;
9297e83df33SMatthew Dillon 	    kmem_slab_free(z, tsize);	/* may block */
93046a3f46dSMatthew Dillon 	}
93146a3f46dSMatthew Dillon 	crit_exit();
93246a3f46dSMatthew Dillon     }
933a7cf0021SMatthew Dillon 
934a7cf0021SMatthew Dillon     /*
935a108bf71SMatthew Dillon      * Handle large allocations directly.  There should not be very many of
936a108bf71SMatthew Dillon      * these so performance is not a big issue.
937a108bf71SMatthew Dillon      *
938b543eeedSMatthew Dillon      * The backend allocator is pretty nasty on a SMP system.   Use the
939b543eeedSMatthew Dillon      * slab allocator for one and two page-sized chunks even though we lose
940b543eeedSMatthew Dillon      * some efficiency.  XXX maybe fix mmio and the elf loader instead.
941a108bf71SMatthew Dillon      */
942b543eeedSMatthew Dillon     if (size >= ZoneLimit || ((size & PAGE_MASK) == 0 && size > PAGE_SIZE*2)) {
943722871d3SMatthew Dillon 	int *kup;
944a108bf71SMatthew Dillon 
945a108bf71SMatthew Dillon 	size = round_page(size);
946a108bf71SMatthew Dillon 	chunk = kmem_slab_alloc(size, PAGE_SIZE, flags);
947f2b5daf9SMatthew Dillon 	if (chunk == NULL) {
9485fee07e6SMatthew Dillon 	    logmemory(malloc_end, NULL, type, size, flags);
949a108bf71SMatthew Dillon 	    return(NULL);
950f2b5daf9SMatthew Dillon 	}
951a108bf71SMatthew Dillon 	flags &= ~M_ZERO;	/* result already zero'd if M_ZERO was set */
9528f1d5415SMatthew Dillon 	flags |= M_PASSIVE_ZERO;
953a108bf71SMatthew Dillon 	kup = btokup(chunk);
954722871d3SMatthew Dillon 	*kup = size / PAGE_SIZE;
955a108bf71SMatthew Dillon 	crit_enter();
956a108bf71SMatthew Dillon 	goto done;
957a108bf71SMatthew Dillon     }
958a108bf71SMatthew Dillon 
959a108bf71SMatthew Dillon     /*
960a108bf71SMatthew Dillon      * Attempt to allocate out of an existing zone.  First try the free list,
961a108bf71SMatthew Dillon      * then allocate out of unallocated space.  If we find a good zone move
962a108bf71SMatthew Dillon      * it to the head of the list so later allocations find it quickly
963a108bf71SMatthew Dillon      * (we might have thousands of zones in the list).
964a108bf71SMatthew Dillon      *
965a108bf71SMatthew Dillon      * Note: zoneindex() will panic of size is too large.
966a108bf71SMatthew Dillon      */
96781dedbc2SSepherosa Ziehau     zi = zoneindex(&size, &align);
968a108bf71SMatthew Dillon     KKASSERT(zi < NZONES);
969a108bf71SMatthew Dillon     crit_enter();
970a108bf71SMatthew Dillon 
971c1b91053SMatthew Dillon     if ((z = TAILQ_LAST(&slgd->ZoneAry[zi], SLZoneList)) != NULL) {
972a108bf71SMatthew Dillon 	/*
9735fee07e6SMatthew Dillon 	 * Locate a chunk - we have to have at least one.  If this is the
9745fee07e6SMatthew Dillon 	 * last chunk go ahead and do the work to retrieve chunks freed
9755fee07e6SMatthew Dillon 	 * from remote cpus, and if the zone is still empty move it off
9765fee07e6SMatthew Dillon 	 * the ZoneAry.
977a108bf71SMatthew Dillon 	 */
9785fee07e6SMatthew Dillon 	if (--z->z_NFree <= 0) {
9795fee07e6SMatthew Dillon 	    KKASSERT(z->z_NFree == 0);
9805fee07e6SMatthew Dillon 
9815fee07e6SMatthew Dillon 	    /*
9825fee07e6SMatthew Dillon 	     * WARNING! This code competes with other cpus.  It is ok
9835fee07e6SMatthew Dillon 	     * for us to not drain RChunks here but we might as well, and
9845fee07e6SMatthew Dillon 	     * it is ok if more accumulate after we're done.
9855fee07e6SMatthew Dillon 	     *
9865fee07e6SMatthew Dillon 	     * Set RSignal before pulling rchunks off, indicating that we
9875fee07e6SMatthew Dillon 	     * will be moving ourselves off of the ZoneAry.  Remote ends will
9885fee07e6SMatthew Dillon 	     * read RSignal before putting rchunks on thus interlocking
9895fee07e6SMatthew Dillon 	     * their IPI signaling.
9905fee07e6SMatthew Dillon 	     */
9915fee07e6SMatthew Dillon 	    if (z->z_RChunks == NULL)
9925fee07e6SMatthew Dillon 		atomic_swap_int(&z->z_RSignal, 1);
9935fee07e6SMatthew Dillon 
994c2f95d8aSMatthew Dillon 	    clean_zone_rchunks(z);
995c2f95d8aSMatthew Dillon 
9965fee07e6SMatthew Dillon 	    /*
9975fee07e6SMatthew Dillon 	     * Remove from the zone list if no free chunks remain.
9985fee07e6SMatthew Dillon 	     * Clear RSignal
9995fee07e6SMatthew Dillon 	     */
10005fee07e6SMatthew Dillon 	    if (z->z_NFree == 0) {
1001c1b91053SMatthew Dillon 		TAILQ_REMOVE(&slgd->ZoneAry[zi], z, z_Entry);
10025fee07e6SMatthew Dillon 	    } else {
10035fee07e6SMatthew Dillon 		z->z_RSignal = 0;
10045fee07e6SMatthew Dillon 	    }
1005a108bf71SMatthew Dillon 	}
1006a108bf71SMatthew Dillon 
1007a108bf71SMatthew Dillon 	/*
10085fee07e6SMatthew Dillon 	 * Fast path, we have chunks available in z_LChunks.
1009a108bf71SMatthew Dillon 	 */
10105fee07e6SMatthew Dillon 	chunk = z->z_LChunks;
10115fee07e6SMatthew Dillon 	if (chunk) {
101210cc6608SMatthew Dillon 		chunk_mark_allocated(z, chunk);
10135fee07e6SMatthew Dillon 		z->z_LChunks = chunk->c_Next;
10145fee07e6SMatthew Dillon 		if (z->z_LChunks == NULL)
10155fee07e6SMatthew Dillon 			z->z_LChunksp = &z->z_LChunks;
1016bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1017bbb201fdSMatthew Dillon 		slab_record_source(z, file, line);
1018bbb201fdSMatthew Dillon #endif
1019a108bf71SMatthew Dillon 		goto done;
1020a108bf71SMatthew Dillon 	}
1021a108bf71SMatthew Dillon 
1022a108bf71SMatthew Dillon 	/*
10235fee07e6SMatthew Dillon 	 * No chunks are available in LChunks, the free chunk MUST be
10245fee07e6SMatthew Dillon 	 * in the never-before-used memory area, controlled by UIndex.
10255fee07e6SMatthew Dillon 	 *
10265fee07e6SMatthew Dillon 	 * The consequences are very serious if our zone got corrupted so
10275fee07e6SMatthew Dillon 	 * we use an explicit panic rather than a KASSERT.
1028a108bf71SMatthew Dillon 	 */
10291c5ca4f3SMatthew Dillon 	if (z->z_UIndex + 1 != z->z_NMax)
10305fee07e6SMatthew Dillon 	    ++z->z_UIndex;
10311c5ca4f3SMatthew Dillon 	else
10321c5ca4f3SMatthew Dillon 	    z->z_UIndex = 0;
10335fee07e6SMatthew Dillon 
10341c5ca4f3SMatthew Dillon 	if (z->z_UIndex == z->z_UEndIndex)
10351c5ca4f3SMatthew Dillon 	    panic("slaballoc: corrupted zone");
10365fee07e6SMatthew Dillon 
10371c5ca4f3SMatthew Dillon 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
10388f1d5415SMatthew Dillon 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1039a108bf71SMatthew Dillon 	    flags &= ~M_ZERO;
10408f1d5415SMatthew Dillon 	    flags |= M_PASSIVE_ZERO;
10418f1d5415SMatthew Dillon 	}
104210cc6608SMatthew Dillon 	chunk_mark_allocated(z, chunk);
1043bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1044bbb201fdSMatthew Dillon 	slab_record_source(z, file, line);
1045bbb201fdSMatthew Dillon #endif
1046a108bf71SMatthew Dillon 	goto done;
1047a108bf71SMatthew Dillon     }
1048a108bf71SMatthew Dillon 
1049a108bf71SMatthew Dillon     /*
1050a108bf71SMatthew Dillon      * If all zones are exhausted we need to allocate a new zone for this
1051a108bf71SMatthew Dillon      * index.  Use M_ZERO to take advantage of pre-zerod pages.  Also see
10526ab8e1daSMatthew Dillon      * UAlloc use above in regards to M_ZERO.  Note that when we are reusing
10536ab8e1daSMatthew Dillon      * a zone from the FreeZones list UAlloc'd data will not be zero'd, and
10546ab8e1daSMatthew Dillon      * we do not pre-zero it because we do not want to mess up the L1 cache.
1055a108bf71SMatthew Dillon      *
1056a108bf71SMatthew Dillon      * At least one subsystem, the tty code (see CROUND) expects power-of-2
1057a108bf71SMatthew Dillon      * allocations to be power-of-2 aligned.  We maintain compatibility by
1058a108bf71SMatthew Dillon      * adjusting the base offset below.
1059a108bf71SMatthew Dillon      */
1060a108bf71SMatthew Dillon     {
1061a108bf71SMatthew Dillon 	int off;
1062722871d3SMatthew Dillon 	int *kup;
1063a108bf71SMatthew Dillon 
1064c1b91053SMatthew Dillon 	if ((z = TAILQ_FIRST(&slgd->FreeZones)) != NULL) {
1065c1b91053SMatthew Dillon 	    TAILQ_REMOVE(&slgd->FreeZones, z, z_Entry);
1066a108bf71SMatthew Dillon 	    --slgd->NFreeZones;
1067a108bf71SMatthew Dillon 	    bzero(z, sizeof(SLZone));
10686ab8e1daSMatthew Dillon 	    z->z_Flags |= SLZF_UNOTZEROD;
1069a108bf71SMatthew Dillon 	} else {
1070a108bf71SMatthew Dillon 	    z = kmem_slab_alloc(ZoneSize, ZoneSize, flags|M_ZERO);
1071a108bf71SMatthew Dillon 	    if (z == NULL)
1072a108bf71SMatthew Dillon 		goto fail;
1073a108bf71SMatthew Dillon 	}
1074a108bf71SMatthew Dillon 
1075a108bf71SMatthew Dillon 	/*
107610cc6608SMatthew Dillon 	 * How big is the base structure?
107710cc6608SMatthew Dillon 	 */
107810cc6608SMatthew Dillon #if defined(INVARIANTS)
107910cc6608SMatthew Dillon 	/*
108010cc6608SMatthew Dillon 	 * Make room for z_Bitmap.  An exact calculation is somewhat more
108110cc6608SMatthew Dillon 	 * complicated so don't make an exact calculation.
108210cc6608SMatthew Dillon 	 */
108310cc6608SMatthew Dillon 	off = offsetof(SLZone, z_Bitmap[(ZoneSize / size + 31) / 32]);
108410cc6608SMatthew Dillon 	bzero(z->z_Bitmap, (ZoneSize / size + 31) / 8);
108510cc6608SMatthew Dillon #else
108610cc6608SMatthew Dillon 	off = sizeof(SLZone);
108710cc6608SMatthew Dillon #endif
108810cc6608SMatthew Dillon 
108910cc6608SMatthew Dillon 	/*
1090a108bf71SMatthew Dillon 	 * Guarentee power-of-2 alignment for power-of-2-sized chunks.
1091e0fe4b4eSSepherosa Ziehau 	 * Otherwise properly align the data according to the chunk size.
1092a108bf71SMatthew Dillon 	 */
1093298ff6e5SSepherosa Ziehau 	if (powerof2(size))
10944aae2d75SSepherosa Ziehau 	    align = size;
1095965b839fSSascha Wildner 	off = roundup2(off, align);
10964aae2d75SSepherosa Ziehau 
1097a108bf71SMatthew Dillon 	z->z_Magic = ZALLOC_SLAB_MAGIC;
1098a108bf71SMatthew Dillon 	z->z_ZoneIndex = zi;
1099a108bf71SMatthew Dillon 	z->z_NMax = (ZoneSize - off) / size;
1100a108bf71SMatthew Dillon 	z->z_NFree = z->z_NMax - 1;
11011c5ca4f3SMatthew Dillon 	z->z_BasePtr = (char *)z + off;
11021c5ca4f3SMatthew Dillon 	z->z_UIndex = z->z_UEndIndex = slgd->JunkIndex % z->z_NMax;
1103a108bf71SMatthew Dillon 	z->z_ChunkSize = size;
11042db3b277SMatthew Dillon 	z->z_CpuGd = gd;
1105bba6a44dSMatthew Dillon 	z->z_Cpu = gd->gd_cpuid;
11065fee07e6SMatthew Dillon 	z->z_LChunksp = &z->z_LChunks;
1107bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1108bbb201fdSMatthew Dillon 	bcopy(z->z_Sources, z->z_AltSources, sizeof(z->z_Sources));
1109bbb201fdSMatthew Dillon 	bzero(z->z_Sources, sizeof(z->z_Sources));
1110bbb201fdSMatthew Dillon #endif
11111c5ca4f3SMatthew Dillon 	chunk = (SLChunk *)(z->z_BasePtr + z->z_UIndex * size);
1112c1b91053SMatthew Dillon 	TAILQ_INSERT_HEAD(&slgd->ZoneAry[zi], z, z_Entry);
11138f1d5415SMatthew Dillon 	if ((z->z_Flags & SLZF_UNOTZEROD) == 0) {
1114a108bf71SMatthew Dillon 	    flags &= ~M_ZERO;	/* already zero'd */
11158f1d5415SMatthew Dillon 	    flags |= M_PASSIVE_ZERO;
11168f1d5415SMatthew Dillon 	}
11175fee07e6SMatthew Dillon 	kup = btokup(z);
1118722871d3SMatthew Dillon 	*kup = -(z->z_Cpu + 1);	/* -1 to -(N+1) */
111910cc6608SMatthew Dillon 	chunk_mark_allocated(z, chunk);
1120bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1121bbb201fdSMatthew Dillon 	slab_record_source(z, file, line);
1122bbb201fdSMatthew Dillon #endif
11231c5ca4f3SMatthew Dillon 
11241c5ca4f3SMatthew Dillon 	/*
11251c5ca4f3SMatthew Dillon 	 * Slide the base index for initial allocations out of the next
11261c5ca4f3SMatthew Dillon 	 * zone we create so we do not over-weight the lower part of the
11271c5ca4f3SMatthew Dillon 	 * cpu memory caches.
11281c5ca4f3SMatthew Dillon 	 */
11291c5ca4f3SMatthew Dillon 	slgd->JunkIndex = (slgd->JunkIndex + ZALLOC_SLAB_SLIDE)
11301c5ca4f3SMatthew Dillon 				& (ZALLOC_MAX_ZONE_SIZE - 1);
1131a108bf71SMatthew Dillon     }
11325fee07e6SMatthew Dillon 
1133a108bf71SMatthew Dillon done:
11348edfbc5eSMatthew Dillon     ++type->ks_use[gd->gd_cpuid].inuse;
11358edfbc5eSMatthew Dillon     type->ks_use[gd->gd_cpuid].memuse += size;
113607de42e8SSepherosa Ziehau     type->ks_use[gd->gd_cpuid].loosememuse += size;
113707de42e8SSepherosa Ziehau     if (type->ks_use[gd->gd_cpuid].loosememuse >= ZoneSize) {
113807de42e8SSepherosa Ziehau 	/* not MP synchronized */
113907de42e8SSepherosa Ziehau 	type->ks_loosememuse += type->ks_use[gd->gd_cpuid].loosememuse;
114007de42e8SSepherosa Ziehau 	type->ks_use[gd->gd_cpuid].loosememuse = 0;
114107de42e8SSepherosa Ziehau     }
1142a108bf71SMatthew Dillon     crit_exit();
11435fee07e6SMatthew Dillon 
1144a108bf71SMatthew Dillon     if (flags & M_ZERO)
1145a108bf71SMatthew Dillon 	bzero(chunk, size);
1146bba6a44dSMatthew Dillon #ifdef INVARIANTS
1147d2182dc1SMatthew Dillon     else if ((flags & (M_ZERO|M_PASSIVE_ZERO)) == 0) {
1148d2182dc1SMatthew Dillon 	if (use_malloc_pattern) {
1149d2182dc1SMatthew Dillon 	    for (i = 0; i < size; i += sizeof(int)) {
1150d2182dc1SMatthew Dillon 		*(int *)((char *)chunk + i) = -1;
1151d2182dc1SMatthew Dillon 	    }
1152d2182dc1SMatthew Dillon 	}
1153bba6a44dSMatthew Dillon 	chunk->c_Next = (void *)-1; /* avoid accidental double-free check */
1154d2182dc1SMatthew Dillon     }
1155bba6a44dSMatthew Dillon #endif
11565fee07e6SMatthew Dillon     logmemory(malloc_end, chunk, type, size, flags);
1157a108bf71SMatthew Dillon     return(chunk);
1158a108bf71SMatthew Dillon fail:
1159a108bf71SMatthew Dillon     crit_exit();
11605fee07e6SMatthew Dillon     logmemory(malloc_end, NULL, type, size, flags);
1161a108bf71SMatthew Dillon     return(NULL);
1162a108bf71SMatthew Dillon }
1163a108bf71SMatthew Dillon 
116438e34349SMatthew Dillon /*
116538e34349SMatthew Dillon  * kernel realloc.  (SLAB ALLOCATOR) (MP SAFE)
116638e34349SMatthew Dillon  *
116738e34349SMatthew Dillon  * Generally speaking this routine is not called very often and we do
116838e34349SMatthew Dillon  * not attempt to optimize it beyond reusing the same pointer if the
116938e34349SMatthew Dillon  * new size fits within the chunking of the old pointer's zone.
117038e34349SMatthew Dillon  */
1171bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1172bbb201fdSMatthew Dillon void *
krealloc_debug(void * ptr,unsigned long size,struct malloc_type * type,int flags,const char * file,int line)1173bbb201fdSMatthew Dillon krealloc_debug(void *ptr, unsigned long size,
1174bbb201fdSMatthew Dillon 	       struct malloc_type *type, int flags,
1175bbb201fdSMatthew Dillon 	       const char *file, int line)
1176bbb201fdSMatthew Dillon #else
1177a108bf71SMatthew Dillon void *
11788aca2bd4SMatthew Dillon krealloc(void *ptr, unsigned long size, struct malloc_type *type, int flags)
1179bbb201fdSMatthew Dillon #endif
1180a108bf71SMatthew Dillon {
1181722871d3SMatthew Dillon     unsigned long osize;
118281dedbc2SSepherosa Ziehau     unsigned long align;
1183a108bf71SMatthew Dillon     SLZone *z;
1184a108bf71SMatthew Dillon     void *nptr;
1185722871d3SMatthew Dillon     int *kup;
1186a108bf71SMatthew Dillon 
1187eb7f3e3cSMatthew Dillon     KKASSERT((flags & M_ZERO) == 0);	/* not supported */
1188eb7f3e3cSMatthew Dillon 
1189a108bf71SMatthew Dillon     if (ptr == NULL || ptr == ZERO_LENGTH_PTR)
119088da688cSMatthew Dillon 	return(_kmalloc_debug(size, type, flags, file, line));
1191a108bf71SMatthew Dillon     if (size == 0) {
1192efda3bd0SMatthew Dillon 	kfree(ptr, type);
1193a108bf71SMatthew Dillon 	return(NULL);
1194a108bf71SMatthew Dillon     }
1195a108bf71SMatthew Dillon 
1196a108bf71SMatthew Dillon     /*
1197a108bf71SMatthew Dillon      * Handle oversized allocations.  XXX we really should require that a
1198a108bf71SMatthew Dillon      * size be passed to free() instead of this nonsense.
1199a108bf71SMatthew Dillon      */
1200a108bf71SMatthew Dillon     kup = btokup(ptr);
1201722871d3SMatthew Dillon     if (*kup > 0) {
1202722871d3SMatthew Dillon 	osize = *kup << PAGE_SHIFT;
1203a108bf71SMatthew Dillon 	if (osize == round_page(size))
1204a108bf71SMatthew Dillon 	    return(ptr);
120588da688cSMatthew Dillon 	if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL)
1206a108bf71SMatthew Dillon 	    return(NULL);
1207a108bf71SMatthew Dillon 	bcopy(ptr, nptr, min(size, osize));
1208efda3bd0SMatthew Dillon 	kfree(ptr, type);
1209a108bf71SMatthew Dillon 	return(nptr);
1210a108bf71SMatthew Dillon     }
1211a108bf71SMatthew Dillon 
1212a108bf71SMatthew Dillon     /*
1213a108bf71SMatthew Dillon      * Get the original allocation's zone.  If the new request winds up
1214a108bf71SMatthew Dillon      * using the same chunk size we do not have to do anything.
1215a108bf71SMatthew Dillon      */
12165fee07e6SMatthew Dillon     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
12175fee07e6SMatthew Dillon     kup = btokup(z);
1218722871d3SMatthew Dillon     KKASSERT(*kup < 0);
1219a108bf71SMatthew Dillon     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1220a108bf71SMatthew Dillon 
1221a108bf71SMatthew Dillon     /*
1222a108bf71SMatthew Dillon      * Allocate memory for the new request size.  Note that zoneindex has
1223a108bf71SMatthew Dillon      * already adjusted the request size to the appropriate chunk size, which
1224a108bf71SMatthew Dillon      * should optimize our bcopy().  Then copy and return the new pointer.
12251ea6580dSMatthew Dillon      *
12261ea6580dSMatthew Dillon      * Resizing a non-power-of-2 allocation to a power-of-2 size does not
12271ea6580dSMatthew Dillon      * necessary align the result.
12281ea6580dSMatthew Dillon      *
12291ea6580dSMatthew Dillon      * We can only zoneindex (to align size to the chunk size) if the new
12301ea6580dSMatthew Dillon      * size is not too large.
1231a108bf71SMatthew Dillon      */
12321ea6580dSMatthew Dillon     if (size < ZoneLimit) {
123381dedbc2SSepherosa Ziehau 	zoneindex(&size, &align);
12341ea6580dSMatthew Dillon 	if (z->z_ChunkSize == size)
12351ea6580dSMatthew Dillon 	    return(ptr);
12361ea6580dSMatthew Dillon     }
123788da688cSMatthew Dillon     if ((nptr = _kmalloc_debug(size, type, flags, file, line)) == NULL)
1238a108bf71SMatthew Dillon 	return(NULL);
1239a108bf71SMatthew Dillon     bcopy(ptr, nptr, min(size, z->z_ChunkSize));
1240efda3bd0SMatthew Dillon     kfree(ptr, type);
1241a108bf71SMatthew Dillon     return(nptr);
1242a108bf71SMatthew Dillon }
1243a108bf71SMatthew Dillon 
1244*44298395SMatthew Dillon size_t
kmalloc_usable_size(const void * ptr)1245*44298395SMatthew Dillon kmalloc_usable_size(const void *ptr)
1246*44298395SMatthew Dillon {
1247*44298395SMatthew Dillon     unsigned long size;
1248*44298395SMatthew Dillon     SLZone *z;
1249*44298395SMatthew Dillon     int *kup;
1250*44298395SMatthew Dillon 
1251*44298395SMatthew Dillon     if (ptr == NULL)
1252*44298395SMatthew Dillon 	return 0;
1253*44298395SMatthew Dillon     if (ptr == ZERO_LENGTH_PTR)
1254*44298395SMatthew Dillon 	return 0;
1255*44298395SMatthew Dillon 
1256*44298395SMatthew Dillon     /*
1257*44298395SMatthew Dillon      * Check to see if the pointer blongs to an oversized segment
1258*44298395SMatthew Dillon      */
1259*44298395SMatthew Dillon     kup = btokup(ptr);
1260*44298395SMatthew Dillon     if (*kup > 0) {
1261*44298395SMatthew Dillon 	size = *kup << PAGE_SHIFT;
1262*44298395SMatthew Dillon 	return size;
1263*44298395SMatthew Dillon     }
1264*44298395SMatthew Dillon 
1265*44298395SMatthew Dillon     /*
1266*44298395SMatthew Dillon      * Zone case.  Figure out the zone based on the fact that it is
1267*44298395SMatthew Dillon      * ZoneSize aligned.
1268*44298395SMatthew Dillon      */
1269*44298395SMatthew Dillon     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
1270*44298395SMatthew Dillon     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1271*44298395SMatthew Dillon 
1272*44298395SMatthew Dillon     return (z->z_ChunkSize);
1273*44298395SMatthew Dillon }
1274*44298395SMatthew Dillon 
127538e34349SMatthew Dillon /*
127645d2b1d8SMatthew Dillon  * Return the kmalloc limit for this type, in bytes.
127745d2b1d8SMatthew Dillon  */
127845d2b1d8SMatthew Dillon long
kmalloc_limit(struct malloc_type * type)127945d2b1d8SMatthew Dillon kmalloc_limit(struct malloc_type *type)
128045d2b1d8SMatthew Dillon {
12813ab3ae18SMatthew Dillon     KKASSERT(type->ks_limit != 0);
128245d2b1d8SMatthew Dillon     return(type->ks_limit);
128345d2b1d8SMatthew Dillon }
128445d2b1d8SMatthew Dillon 
128545d2b1d8SMatthew Dillon /*
128638e34349SMatthew Dillon  * Allocate a copy of the specified string.
128738e34349SMatthew Dillon  *
128838e34349SMatthew Dillon  * (MP SAFE) (MAY BLOCK)
128938e34349SMatthew Dillon  */
1290bbb201fdSMatthew Dillon #ifdef SLAB_DEBUG
1291bbb201fdSMatthew Dillon char *
kstrdup_debug(const char * str,struct malloc_type * type,const char * file,int line)1292bbb201fdSMatthew Dillon kstrdup_debug(const char *str, struct malloc_type *type,
1293bbb201fdSMatthew Dillon 	      const char *file, int line)
1294bbb201fdSMatthew Dillon #else
12951ac06773SMatthew Dillon char *
129659302080SMatthew Dillon kstrdup(const char *str, struct malloc_type *type)
1297bbb201fdSMatthew Dillon #endif
12981ac06773SMatthew Dillon {
12991ac06773SMatthew Dillon     int zlen;	/* length inclusive of terminating NUL */
13001ac06773SMatthew Dillon     char *nstr;
13011ac06773SMatthew Dillon 
13021ac06773SMatthew Dillon     if (str == NULL)
13031ac06773SMatthew Dillon 	return(NULL);
13041ac06773SMatthew Dillon     zlen = strlen(str) + 1;
130588da688cSMatthew Dillon     nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line);
13061ac06773SMatthew Dillon     bcopy(str, nstr, zlen);
13071ac06773SMatthew Dillon     return(nstr);
13081ac06773SMatthew Dillon }
13091ac06773SMatthew Dillon 
1310718ec3d1STomohiro Kusumi #ifdef SLAB_DEBUG
1311718ec3d1STomohiro Kusumi char *
kstrndup_debug(const char * str,size_t maxlen,struct malloc_type * type,const char * file,int line)1312718ec3d1STomohiro Kusumi kstrndup_debug(const char *str, size_t maxlen, struct malloc_type *type,
1313718ec3d1STomohiro Kusumi 	      const char *file, int line)
1314718ec3d1STomohiro Kusumi #else
1315718ec3d1STomohiro Kusumi char *
1316718ec3d1STomohiro Kusumi kstrndup(const char *str, size_t maxlen, struct malloc_type *type)
1317718ec3d1STomohiro Kusumi #endif
1318718ec3d1STomohiro Kusumi {
1319718ec3d1STomohiro Kusumi     int zlen;	/* length inclusive of terminating NUL */
1320718ec3d1STomohiro Kusumi     char *nstr;
1321718ec3d1STomohiro Kusumi 
1322718ec3d1STomohiro Kusumi     if (str == NULL)
1323718ec3d1STomohiro Kusumi 	return(NULL);
1324718ec3d1STomohiro Kusumi     zlen = strnlen(str, maxlen) + 1;
132588da688cSMatthew Dillon     nstr = _kmalloc_debug(zlen, type, M_WAITOK, file, line);
1326718ec3d1STomohiro Kusumi     bcopy(str, nstr, zlen);
1327718ec3d1STomohiro Kusumi     nstr[zlen - 1] = '\0';
1328718ec3d1STomohiro Kusumi     return(nstr);
1329718ec3d1STomohiro Kusumi }
1330718ec3d1STomohiro Kusumi 
1331a108bf71SMatthew Dillon /*
13325fee07e6SMatthew Dillon  * Notify our cpu that a remote cpu has freed some chunks in a zone that
1333df9daea8SMatthew Dillon  * we own.  RCount will be bumped so the memory should be good, but validate
1334df9daea8SMatthew Dillon  * that it really is.
1335a108bf71SMatthew Dillon  */
1336bad949c8SSepherosa Ziehau static void
kfree_remote(void * ptr)13375fee07e6SMatthew Dillon kfree_remote(void *ptr)
1338a108bf71SMatthew Dillon {
13395fee07e6SMatthew Dillon     SLGlobalData *slgd;
13405fee07e6SMatthew Dillon     SLZone *z;
13415fee07e6SMatthew Dillon     int nfree;
1342722871d3SMatthew Dillon     int *kup;
13435fee07e6SMatthew Dillon 
13445fee07e6SMatthew Dillon     slgd = &mycpu->gd_slab;
13455fee07e6SMatthew Dillon     z = ptr;
13465fee07e6SMatthew Dillon     kup = btokup(z);
1347df9daea8SMatthew Dillon     KKASSERT(*kup == -((int)mycpuid + 1));
1348df9daea8SMatthew Dillon     KKASSERT(z->z_RCount > 0);
1349df9daea8SMatthew Dillon     atomic_subtract_int(&z->z_RCount, 1);
13505fee07e6SMatthew Dillon 
13515bf48697SAggelos Economopoulos     logmemory(free_rem_beg, z, NULL, 0L, 0);
13525fee07e6SMatthew Dillon     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
13535fee07e6SMatthew Dillon     KKASSERT(z->z_Cpu  == mycpu->gd_cpuid);
13545fee07e6SMatthew Dillon     nfree = z->z_NFree;
13555fee07e6SMatthew Dillon 
13565fee07e6SMatthew Dillon     /*
13575fee07e6SMatthew Dillon      * Indicate that we will no longer be off of the ZoneAry by
13585fee07e6SMatthew Dillon      * clearing RSignal.
13595fee07e6SMatthew Dillon      */
13605fee07e6SMatthew Dillon     if (z->z_RChunks)
13615fee07e6SMatthew Dillon 	z->z_RSignal = 0;
13625fee07e6SMatthew Dillon 
13635fee07e6SMatthew Dillon     /*
13645fee07e6SMatthew Dillon      * Atomically extract the bchunks list and then process it back
13655fee07e6SMatthew Dillon      * into the lchunks list.  We want to append our bchunks to the
13665fee07e6SMatthew Dillon      * lchunks list and not prepend since we likely do not have
13675fee07e6SMatthew Dillon      * cache mastership of the related data (not that it helps since
13685fee07e6SMatthew Dillon      * we are using c_Next).
13695fee07e6SMatthew Dillon      */
1370c2f95d8aSMatthew Dillon     clean_zone_rchunks(z);
13715fee07e6SMatthew Dillon     if (z->z_NFree && nfree == 0) {
1372c1b91053SMatthew Dillon 	TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
13735fee07e6SMatthew Dillon     }
13745fee07e6SMatthew Dillon 
1375be05c555SSepherosa Ziehau     check_zone_free(slgd, z);
1376c2f95d8aSMatthew Dillon     logmemory(free_rem_end, z, NULL, 0L, 0);
13775fee07e6SMatthew Dillon }
1378a108bf71SMatthew Dillon 
137938e34349SMatthew Dillon /*
13805b287bbaSMatthew Dillon  * free (SLAB ALLOCATOR)
138138e34349SMatthew Dillon  *
138207de42e8SSepherosa Ziehau  * Free a memory block previously allocated by malloc.
138307de42e8SSepherosa Ziehau  *
138407de42e8SSepherosa Ziehau  * Note: We do not attempt to update ks_loosememuse as MP races could
138507de42e8SSepherosa Ziehau  * prevent us from checking memory limits in malloc.   YYY we may
138607de42e8SSepherosa Ziehau  * consider updating ks_cpu.loosememuse.
13875b287bbaSMatthew Dillon  *
13885b287bbaSMatthew Dillon  * MPSAFE
138938e34349SMatthew Dillon  */
1390a108bf71SMatthew Dillon void
_kfree(void * ptr,struct malloc_type * type)1391e9dbfea1SMatthew Dillon _kfree(void *ptr, struct malloc_type *type)
1392a108bf71SMatthew Dillon {
1393a108bf71SMatthew Dillon     SLZone *z;
1394a108bf71SMatthew Dillon     SLChunk *chunk;
1395a108bf71SMatthew Dillon     SLGlobalData *slgd;
1396bba6a44dSMatthew Dillon     struct globaldata *gd;
1397722871d3SMatthew Dillon     int *kup;
13985fee07e6SMatthew Dillon     unsigned long size;
1399d8100bdcSSascha Wildner     SLChunk *bchunk;
14005fee07e6SMatthew Dillon     int rsignal;
1401a108bf71SMatthew Dillon 
1402b68ad50cSMatthew Dillon     logmemory_quick(free_beg);
1403bba6a44dSMatthew Dillon     gd = mycpu;
1404bba6a44dSMatthew Dillon     slgd = &gd->gd_slab;
1405a108bf71SMatthew Dillon 
1406d39911d9SJoerg Sonnenberger     if (ptr == NULL)
1407d39911d9SJoerg Sonnenberger 	panic("trying to free NULL pointer");
1408d39911d9SJoerg Sonnenberger 
1409a108bf71SMatthew Dillon     /*
1410a108bf71SMatthew Dillon      * Handle special 0-byte allocations
1411a108bf71SMatthew Dillon      */
1412f2b5daf9SMatthew Dillon     if (ptr == ZERO_LENGTH_PTR) {
14135bf48697SAggelos Economopoulos 	logmemory(free_zero, ptr, type, -1UL, 0);
1414b68ad50cSMatthew Dillon 	logmemory_quick(free_end);
1415a108bf71SMatthew Dillon 	return;
1416f2b5daf9SMatthew Dillon     }
1417a108bf71SMatthew Dillon 
1418a108bf71SMatthew Dillon     /*
14195fee07e6SMatthew Dillon      * Panic on bad malloc type
14205fee07e6SMatthew Dillon      */
14215fee07e6SMatthew Dillon     if (type->ks_magic != M_MAGIC)
14225fee07e6SMatthew Dillon 	panic("free: malloc type lacks magic");
14235fee07e6SMatthew Dillon 
14245fee07e6SMatthew Dillon     /*
1425a108bf71SMatthew Dillon      * Handle oversized allocations.  XXX we really should require that a
1426a108bf71SMatthew Dillon      * size be passed to free() instead of this nonsense.
1427bba6a44dSMatthew Dillon      *
1428bba6a44dSMatthew Dillon      * This code is never called via an ipi.
1429a108bf71SMatthew Dillon      */
1430a108bf71SMatthew Dillon     kup = btokup(ptr);
1431722871d3SMatthew Dillon     if (*kup > 0) {
1432722871d3SMatthew Dillon 	size = *kup << PAGE_SHIFT;
1433722871d3SMatthew Dillon 	*kup = 0;
1434a108bf71SMatthew Dillon #ifdef INVARIANTS
1435f21dfc74SMatthew Dillon 	if (use_weird_array) {
1436a108bf71SMatthew Dillon 		KKASSERT(sizeof(weirdary) <= size);
1437a108bf71SMatthew Dillon 		bcopy(weirdary, ptr, sizeof(weirdary));
1438f21dfc74SMatthew Dillon 	}
1439a108bf71SMatthew Dillon #endif
1440bba6a44dSMatthew Dillon 	/*
1441fc183e1fSMatthew Dillon 	 * NOTE: For oversized allocations we do not record the
1442fc183e1fSMatthew Dillon 	 *	     originating cpu.  It gets freed on the cpu calling
1443fc183e1fSMatthew Dillon 	 *	     kfree().  The statistics are in aggregate.
144481f5fc99SMatthew Dillon 	 *
144581f5fc99SMatthew Dillon 	 * note: XXX we have still inherited the interrupts-can't-block
144681f5fc99SMatthew Dillon 	 * assumption.  An interrupt thread does not bump
144781f5fc99SMatthew Dillon 	 * gd_intr_nesting_level so check TDF_INTTHREAD.  This is
144881f5fc99SMatthew Dillon 	 * primarily until we can fix softupdate's assumptions about free().
1449bba6a44dSMatthew Dillon 	 */
145046a3f46dSMatthew Dillon 	crit_enter();
14518edfbc5eSMatthew Dillon 	--type->ks_use[gd->gd_cpuid].inuse;
14528edfbc5eSMatthew Dillon 	type->ks_use[gd->gd_cpuid].memuse -= size;
1453fc183e1fSMatthew Dillon 	if (mycpu->gd_intr_nesting_level ||
14543175d638SSepherosa Ziehau 	    (gd->gd_curthread->td_flags & TDF_INTTHREAD)) {
1455f2b5daf9SMatthew Dillon 	    logmemory(free_ovsz_delayed, ptr, type, size, 0);
145646a3f46dSMatthew Dillon 	    z = (SLZone *)ptr;
145746a3f46dSMatthew Dillon 	    z->z_Magic = ZALLOC_OVSZ_MAGIC;
145846a3f46dSMatthew Dillon 	    z->z_ChunkSize = size;
1459243dbb26SMatthew Dillon 
1460c1b91053SMatthew Dillon 	    TAILQ_INSERT_HEAD(&slgd->FreeOvZones, z, z_Entry);
146146a3f46dSMatthew Dillon 	    crit_exit();
146246a3f46dSMatthew Dillon 	} else {
1463bba6a44dSMatthew Dillon 	    crit_exit();
1464f2b5daf9SMatthew Dillon 	    logmemory(free_ovsz, ptr, type, size, 0);
1465a108bf71SMatthew Dillon 	    kmem_slab_free(ptr, size);	/* may block */
146646a3f46dSMatthew Dillon 	}
1467b68ad50cSMatthew Dillon 	logmemory_quick(free_end);
1468a108bf71SMatthew Dillon 	return;
1469a108bf71SMatthew Dillon     }
1470a108bf71SMatthew Dillon 
1471a108bf71SMatthew Dillon     /*
1472a108bf71SMatthew Dillon      * Zone case.  Figure out the zone based on the fact that it is
1473a108bf71SMatthew Dillon      * ZoneSize aligned.
1474a108bf71SMatthew Dillon      */
14755fee07e6SMatthew Dillon     z = (SLZone *)((uintptr_t)ptr & ZoneMask);
14765fee07e6SMatthew Dillon     kup = btokup(z);
1477722871d3SMatthew Dillon     KKASSERT(*kup < 0);
1478a108bf71SMatthew Dillon     KKASSERT(z->z_Magic == ZALLOC_SLAB_MAGIC);
1479a108bf71SMatthew Dillon 
1480a108bf71SMatthew Dillon     /*
14815fee07e6SMatthew Dillon      * If we do not own the zone then use atomic ops to free to the
14825fee07e6SMatthew Dillon      * remote cpu linked list and notify the target zone using a
14835fee07e6SMatthew Dillon      * passive message.
14845fee07e6SMatthew Dillon      *
14855fee07e6SMatthew Dillon      * The target zone cannot be deallocated while we own a chunk of it,
14865fee07e6SMatthew Dillon      * so the zone header's storage is stable until the very moment
14875fee07e6SMatthew Dillon      * we adjust z_RChunks.  After that we cannot safely dereference (z).
14885fee07e6SMatthew Dillon      *
14895fee07e6SMatthew Dillon      * (no critical section needed)
1490a108bf71SMatthew Dillon      */
14912db3b277SMatthew Dillon     if (z->z_CpuGd != gd) {
14925fee07e6SMatthew Dillon 	/*
14935fee07e6SMatthew Dillon 	 * Making these adjustments now allow us to avoid passing (type)
14948edfbc5eSMatthew Dillon 	 * to the remote cpu.  Note that inuse/memuse is being
149528135cc2SMatthew Dillon 	 * adjusted on OUR cpu, not the zone cpu, but it should all still
149628135cc2SMatthew Dillon 	 * sum up properly and cancel out.
14975fee07e6SMatthew Dillon 	 */
149828135cc2SMatthew Dillon 	crit_enter();
14998edfbc5eSMatthew Dillon 	--type->ks_use[gd->gd_cpuid].inuse;
15008edfbc5eSMatthew Dillon 	type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
150128135cc2SMatthew Dillon 	crit_exit();
15025fee07e6SMatthew Dillon 
15035fee07e6SMatthew Dillon 	/*
15045fee07e6SMatthew Dillon 	 * WARNING! This code competes with other cpus.  Once we
15055fee07e6SMatthew Dillon 	 *	    successfully link the chunk to RChunks the remote
15065fee07e6SMatthew Dillon 	 *	    cpu can rip z's storage out from under us.
1507df9daea8SMatthew Dillon 	 *
1508df9daea8SMatthew Dillon 	 *	    Bumping RCount prevents z's storage from getting
1509df9daea8SMatthew Dillon 	 *	    ripped out.
15105fee07e6SMatthew Dillon 	 */
15115fee07e6SMatthew Dillon 	rsignal = z->z_RSignal;
15125fee07e6SMatthew Dillon 	cpu_lfence();
1513df9daea8SMatthew Dillon 	if (rsignal)
1514df9daea8SMatthew Dillon 		atomic_add_int(&z->z_RCount, 1);
15155fee07e6SMatthew Dillon 
15165fee07e6SMatthew Dillon 	chunk = ptr;
15175fee07e6SMatthew Dillon 	for (;;) {
15185fee07e6SMatthew Dillon 	    bchunk = z->z_RChunks;
15195fee07e6SMatthew Dillon 	    cpu_ccfence();
15205fee07e6SMatthew Dillon 	    chunk->c_Next = bchunk;
15215fee07e6SMatthew Dillon 	    cpu_sfence();
15225fee07e6SMatthew Dillon 
15235fee07e6SMatthew Dillon 	    if (atomic_cmpset_ptr(&z->z_RChunks, bchunk, chunk))
15245fee07e6SMatthew Dillon 		break;
15255fee07e6SMatthew Dillon 	}
15265fee07e6SMatthew Dillon 
15275fee07e6SMatthew Dillon 	/*
15285fee07e6SMatthew Dillon 	 * We have to signal the remote cpu if our actions will cause
15295fee07e6SMatthew Dillon 	 * the remote zone to be placed back on ZoneAry so it can
15305fee07e6SMatthew Dillon 	 * move the zone back on.
15315fee07e6SMatthew Dillon 	 *
15325fee07e6SMatthew Dillon 	 * We only need to deal with NULL->non-NULL RChunk transitions
15335fee07e6SMatthew Dillon 	 * and only if z_RSignal is set.  We interlock by reading rsignal
15345fee07e6SMatthew Dillon 	 * before adding our chunk to RChunks.  This should result in
15355fee07e6SMatthew Dillon 	 * virtually no IPI traffic.
15365fee07e6SMatthew Dillon 	 *
15375fee07e6SMatthew Dillon 	 * We can use a passive IPI to reduce overhead even further.
15385fee07e6SMatthew Dillon 	 */
15395fee07e6SMatthew Dillon 	if (bchunk == NULL && rsignal) {
1540c2f95d8aSMatthew Dillon 	    logmemory(free_request, ptr, type,
1541c2f95d8aSMatthew Dillon 		      (unsigned long)z->z_ChunkSize, 0);
15425fee07e6SMatthew Dillon 	    lwkt_send_ipiq_passive(z->z_CpuGd, kfree_remote, z);
1543df9daea8SMatthew Dillon 	    /* z can get ripped out from under us from this point on */
1544df9daea8SMatthew Dillon 	} else if (rsignal) {
1545df9daea8SMatthew Dillon 	    atomic_subtract_int(&z->z_RCount, 1);
1546df9daea8SMatthew Dillon 	    /* z can get ripped out from under us from this point on */
15475fee07e6SMatthew Dillon 	}
1548b68ad50cSMatthew Dillon 	logmemory_quick(free_end);
1549a108bf71SMatthew Dillon 	return;
1550a108bf71SMatthew Dillon     }
1551a108bf71SMatthew Dillon 
15525fee07e6SMatthew Dillon     /*
15535fee07e6SMatthew Dillon      * kfree locally
15545fee07e6SMatthew Dillon      */
15555bf48697SAggelos Economopoulos     logmemory(free_chunk, ptr, type, (unsigned long)z->z_ChunkSize, 0);
1556f2b5daf9SMatthew Dillon 
1557a108bf71SMatthew Dillon     crit_enter();
1558a108bf71SMatthew Dillon     chunk = ptr;
155910cc6608SMatthew Dillon     chunk_mark_free(z, chunk);
1560a108bf71SMatthew Dillon 
1561a108bf71SMatthew Dillon     /*
1562a108bf71SMatthew Dillon      * Put weird data into the memory to detect modifications after freeing,
1563a108bf71SMatthew Dillon      * illegal pointer use after freeing (we should fault on the odd address),
1564a108bf71SMatthew Dillon      * and so forth.  XXX needs more work, see the old malloc code.
1565a108bf71SMatthew Dillon      */
1566a108bf71SMatthew Dillon #ifdef INVARIANTS
1567f21dfc74SMatthew Dillon     if (use_weird_array) {
1568a108bf71SMatthew Dillon 	    if (z->z_ChunkSize < sizeof(weirdary))
1569a108bf71SMatthew Dillon 		bcopy(weirdary, chunk, z->z_ChunkSize);
1570a108bf71SMatthew Dillon 	    else
1571a108bf71SMatthew Dillon 		bcopy(weirdary, chunk, sizeof(weirdary));
1572f21dfc74SMatthew Dillon     }
1573a108bf71SMatthew Dillon #endif
1574a108bf71SMatthew Dillon 
1575a108bf71SMatthew Dillon     /*
15765fee07e6SMatthew Dillon      * Add this free non-zero'd chunk to a linked list for reuse.  Add
15775fee07e6SMatthew Dillon      * to the front of the linked list so it is more likely to be
15785fee07e6SMatthew Dillon      * reallocated, since it is already in our L1 cache.
1579a108bf71SMatthew Dillon      */
15806ab8e1daSMatthew Dillon #ifdef INVARIANTS
1581c439ad8fSMatthew Dillon     if ((vm_offset_t)chunk < KvaStart || (vm_offset_t)chunk >= KvaEnd)
1582fc92d4aaSHiten Pandya 	panic("BADFREE %p", chunk);
1583a108bf71SMatthew Dillon #endif
15845fee07e6SMatthew Dillon     chunk->c_Next = z->z_LChunks;
15855fee07e6SMatthew Dillon     z->z_LChunks = chunk;
15865fee07e6SMatthew Dillon     if (chunk->c_Next == NULL)
15875fee07e6SMatthew Dillon 	z->z_LChunksp = &chunk->c_Next;
15885fee07e6SMatthew Dillon 
15896ab8e1daSMatthew Dillon #ifdef INVARIANTS
1590c439ad8fSMatthew Dillon     if (chunk->c_Next && (vm_offset_t)chunk->c_Next < KvaStart)
1591a108bf71SMatthew Dillon 	panic("BADFREE2");
15926ab8e1daSMatthew Dillon #endif
1593a108bf71SMatthew Dillon 
1594a108bf71SMatthew Dillon     /*
1595a108bf71SMatthew Dillon      * Bump the number of free chunks.  If it becomes non-zero the zone
1596c1b91053SMatthew Dillon      * must be added back onto the appropriate list.  A fully allocated
1597c1b91053SMatthew Dillon      * zone that sees its first free is considered 'mature' and is placed
1598c1b91053SMatthew Dillon      * at the head, giving the system time to potentially free the remaining
1599c1b91053SMatthew Dillon      * entries even while other allocations are going on and making the zone
1600c1b91053SMatthew Dillon      * freeable.
1601a108bf71SMatthew Dillon      */
1602f21dfc74SMatthew Dillon     if (z->z_NFree++ == 0)
1603c1b91053SMatthew Dillon 	    TAILQ_INSERT_HEAD(&slgd->ZoneAry[z->z_ZoneIndex], z, z_Entry);
1604a108bf71SMatthew Dillon 
1605b8b08456SMatthew Dillon     --type->ks_use[gd->gd_cpuid].inuse;
1606b8b08456SMatthew Dillon     type->ks_use[gd->gd_cpuid].memuse -= z->z_ChunkSize;
1607a108bf71SMatthew Dillon 
1608c2f95d8aSMatthew Dillon     check_zone_free(slgd, z);
1609b68ad50cSMatthew Dillon     logmemory_quick(free_end);
1610a108bf71SMatthew Dillon     crit_exit();
1611a108bf71SMatthew Dillon }
1612a108bf71SMatthew Dillon 
1613c2f95d8aSMatthew Dillon /*
1614c1b91053SMatthew Dillon  * Cleanup slabs which are hanging around due to RChunks or which are wholely
1615c1b91053SMatthew Dillon  * free and can be moved to the free list if not moved by other means.
1616c1b91053SMatthew Dillon  *
1617c1b91053SMatthew Dillon  * Called once every 10 seconds on all cpus.
1618c2f95d8aSMatthew Dillon  */
1619c2f95d8aSMatthew Dillon void
slab_cleanup(void)1620c2f95d8aSMatthew Dillon slab_cleanup(void)
1621c2f95d8aSMatthew Dillon {
1622c2f95d8aSMatthew Dillon     SLGlobalData *slgd = &mycpu->gd_slab;
1623c2f95d8aSMatthew Dillon     SLZone *z;
1624c2f95d8aSMatthew Dillon     int i;
1625c2f95d8aSMatthew Dillon 
1626c2f95d8aSMatthew Dillon     crit_enter();
1627c2f95d8aSMatthew Dillon     for (i = 0; i < NZONES; ++i) {
1628c1b91053SMatthew Dillon 	if ((z = TAILQ_FIRST(&slgd->ZoneAry[i])) == NULL)
1629c2f95d8aSMatthew Dillon 		continue;
1630c2f95d8aSMatthew Dillon 
1631c2f95d8aSMatthew Dillon 	/*
1632c1b91053SMatthew Dillon 	 * Scan zones.
1633c2f95d8aSMatthew Dillon 	 */
1634c2f95d8aSMatthew Dillon 	while (z) {
1635c2f95d8aSMatthew Dillon 	    /*
1636c2f95d8aSMatthew Dillon 	     * Shift all RChunks to the end of the LChunks list.  This is
1637c2f95d8aSMatthew Dillon 	     * an O(1) operation.
1638243dbb26SMatthew Dillon 	     *
1639243dbb26SMatthew Dillon 	     * Then free the zone if possible.
1640c2f95d8aSMatthew Dillon 	     */
1641c2f95d8aSMatthew Dillon 	    clean_zone_rchunks(z);
1642c2f95d8aSMatthew Dillon 	    z = check_zone_free(slgd, z);
1643c2f95d8aSMatthew Dillon 	}
1644c2f95d8aSMatthew Dillon     }
1645c2f95d8aSMatthew Dillon     crit_exit();
1646c2f95d8aSMatthew Dillon }
1647c2f95d8aSMatthew Dillon 
164810cc6608SMatthew Dillon #if defined(INVARIANTS)
16495fee07e6SMatthew Dillon 
165010cc6608SMatthew Dillon /*
165110cc6608SMatthew Dillon  * Helper routines for sanity checks
165210cc6608SMatthew Dillon  */
1653bad949c8SSepherosa Ziehau static void
chunk_mark_allocated(SLZone * z,void * chunk)165410cc6608SMatthew Dillon chunk_mark_allocated(SLZone *z, void *chunk)
165510cc6608SMatthew Dillon {
165610cc6608SMatthew Dillon     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1657e28c8ef4SSascha Wildner     uint32_t *bitptr;
165810cc6608SMatthew Dillon 
16595fee07e6SMatthew Dillon     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
16605fee07e6SMatthew Dillon     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
16615fee07e6SMatthew Dillon 	    ("memory chunk %p bit index %d is illegal", chunk, bitdex));
166210cc6608SMatthew Dillon     bitptr = &z->z_Bitmap[bitdex >> 5];
166310cc6608SMatthew Dillon     bitdex &= 31;
16645fee07e6SMatthew Dillon     KASSERT((*bitptr & (1 << bitdex)) == 0,
16655fee07e6SMatthew Dillon 	    ("memory chunk %p is already allocated!", chunk));
166610cc6608SMatthew Dillon     *bitptr |= 1 << bitdex;
166710cc6608SMatthew Dillon }
166810cc6608SMatthew Dillon 
1669bad949c8SSepherosa Ziehau static void
chunk_mark_free(SLZone * z,void * chunk)167010cc6608SMatthew Dillon chunk_mark_free(SLZone *z, void *chunk)
167110cc6608SMatthew Dillon {
167210cc6608SMatthew Dillon     int bitdex = ((char *)chunk - (char *)z->z_BasePtr) / z->z_ChunkSize;
1673e28c8ef4SSascha Wildner     uint32_t *bitptr;
167410cc6608SMatthew Dillon 
16755fee07e6SMatthew Dillon     KKASSERT((((intptr_t)chunk ^ (intptr_t)z) & ZoneMask) == 0);
16765fee07e6SMatthew Dillon     KASSERT(bitdex >= 0 && bitdex < z->z_NMax,
16775fee07e6SMatthew Dillon 	    ("memory chunk %p bit index %d is illegal!", chunk, bitdex));
167810cc6608SMatthew Dillon     bitptr = &z->z_Bitmap[bitdex >> 5];
167910cc6608SMatthew Dillon     bitdex &= 31;
16805fee07e6SMatthew Dillon     KASSERT((*bitptr & (1 << bitdex)) != 0,
16815fee07e6SMatthew Dillon 	    ("memory chunk %p is already free!", chunk));
168210cc6608SMatthew Dillon     *bitptr &= ~(1 << bitdex);
168310cc6608SMatthew Dillon }
168410cc6608SMatthew Dillon 
168510cc6608SMatthew Dillon #endif
168610cc6608SMatthew Dillon 
1687a108bf71SMatthew Dillon /*
16885b287bbaSMatthew Dillon  * kmem_slab_alloc()
1689a108bf71SMatthew Dillon  *
1690a108bf71SMatthew Dillon  *	Directly allocate and wire kernel memory in PAGE_SIZE chunks with the
1691a108bf71SMatthew Dillon  *	specified alignment.  M_* flags are expected in the flags field.
1692a108bf71SMatthew Dillon  *
1693a108bf71SMatthew Dillon  *	Alignment must be a multiple of PAGE_SIZE.
1694a108bf71SMatthew Dillon  *
1695a108bf71SMatthew Dillon  *	NOTE! XXX For the moment we use vm_map_entry_reserve/release(),
1696a108bf71SMatthew Dillon  *	but when we move zalloc() over to use this function as its backend
1697a108bf71SMatthew Dillon  *	we will have to switch to kreserve/krelease and call reserve(0)
1698a108bf71SMatthew Dillon  *	after the new space is made available.
1699dc1fd4b3SMatthew Dillon  *
1700dc1fd4b3SMatthew Dillon  *	Interrupt code which has preempted other code is not allowed to
1701c397c465SMatthew Dillon  *	use PQ_CACHE pages.  However, if an interrupt thread is run
1702c397c465SMatthew Dillon  *	non-preemptively or blocks and then runs non-preemptively, then
1703b12defdcSMatthew Dillon  *	it is free to use PQ_CACHE pages.  <--- may not apply any longer XXX
1704a108bf71SMatthew Dillon  */
1705e9dbfea1SMatthew Dillon void *
kmem_slab_alloc(vm_size_t size,vm_offset_t align,int flags)1706a108bf71SMatthew Dillon kmem_slab_alloc(vm_size_t size, vm_offset_t align, int flags)
1707a108bf71SMatthew Dillon {
1708a108bf71SMatthew Dillon     vm_size_t i;
1709a108bf71SMatthew Dillon     vm_offset_t addr;
17101de1e800SJoerg Sonnenberger     int count, vmflags, base_vmflags;
1711b12defdcSMatthew Dillon     vm_page_t mbase = NULL;
1712b12defdcSMatthew Dillon     vm_page_t m;
1713dc1fd4b3SMatthew Dillon     thread_t td;
1714a108bf71SMatthew Dillon 
1715a108bf71SMatthew Dillon     size = round_page(size);
17161eeaf6b2SAaron LI     addr = vm_map_min(kernel_map);
1717a108bf71SMatthew Dillon 
1718a108bf71SMatthew Dillon     count = vm_map_entry_reserve(MAP_RESERVE_COUNT);
1719a108bf71SMatthew Dillon     crit_enter();
17201eeaf6b2SAaron LI     vm_map_lock(kernel_map);
17211eeaf6b2SAaron LI     if (vm_map_findspace(kernel_map, addr, size, align, 0, &addr)) {
17221eeaf6b2SAaron LI 	vm_map_unlock(kernel_map);
17238cb2bf45SJoerg Sonnenberger 	if ((flags & M_NULLOK) == 0)
1724a108bf71SMatthew Dillon 	    panic("kmem_slab_alloc(): kernel_map ran out of space!");
1725a108bf71SMatthew Dillon 	vm_map_entry_release(count);
17262de4f77eSMatthew Dillon 	crit_exit();
1727a108bf71SMatthew Dillon 	return(NULL);
1728a108bf71SMatthew Dillon     }
1729e4846942SMatthew Dillon 
1730e4846942SMatthew Dillon     /*
1731e4846942SMatthew Dillon      * kernel_object maps 1:1 to kernel_map.
1732e4846942SMatthew Dillon      */
1733712b6620SAaron LI     vm_object_hold(kernel_object);
1734712b6620SAaron LI     vm_object_reference_locked(kernel_object);
17351eeaf6b2SAaron LI     vm_map_insert(kernel_map, &count,
1736712b6620SAaron LI 		  kernel_object, NULL,
173764b5a8a5SMatthew Dillon 		  addr, NULL,
173864b5a8a5SMatthew Dillon 		  addr, addr + size,
17391b874851SMatthew Dillon 		  VM_MAPTYPE_NORMAL,
17403091de50SMatthew Dillon 		  VM_SUBSYS_KMALLOC,
17413091de50SMatthew Dillon 		  VM_PROT_ALL, VM_PROT_ALL, 0);
1742712b6620SAaron LI     vm_object_drop(kernel_object);
17431eeaf6b2SAaron LI     vm_map_set_wired_quick(kernel_map, addr, size, &count);
17441eeaf6b2SAaron LI     vm_map_unlock(kernel_map);
1745a108bf71SMatthew Dillon 
1746dc1fd4b3SMatthew Dillon     td = curthread;
1747dc1fd4b3SMatthew Dillon 
17481de1e800SJoerg Sonnenberger     base_vmflags = 0;
17491de1e800SJoerg Sonnenberger     if (flags & M_ZERO)
17501de1e800SJoerg Sonnenberger         base_vmflags |= VM_ALLOC_ZERO;
17511de1e800SJoerg Sonnenberger     if (flags & M_USE_RESERVE)
17521de1e800SJoerg Sonnenberger 	base_vmflags |= VM_ALLOC_SYSTEM;
17531de1e800SJoerg Sonnenberger     if (flags & M_USE_INTERRUPT_RESERVE)
17541de1e800SJoerg Sonnenberger         base_vmflags |= VM_ALLOC_INTERRUPT;
175577912481SMatthew Dillon     if ((flags & (M_RNOWAIT|M_WAITOK)) == 0) {
175677912481SMatthew Dillon 	panic("kmem_slab_alloc: bad flags %08x (%p)",
175777912481SMatthew Dillon 	      flags, ((int **)&size)[-1]);
175877912481SMatthew Dillon     }
17591de1e800SJoerg Sonnenberger 
1760a108bf71SMatthew Dillon     /*
1761afd2da4dSMatthew Dillon      * Allocate the pages.  Do not map them yet.  VM_ALLOC_NORMAL can only
1762afd2da4dSMatthew Dillon      * be set if we are not preempting.
1763c397c465SMatthew Dillon      *
1764c397c465SMatthew Dillon      * VM_ALLOC_SYSTEM is automatically set if we are preempting and
1765c397c465SMatthew Dillon      * M_WAITOK was specified as an alternative (i.e. M_USE_RESERVE is
17664ecf7cc9SMatthew Dillon      * implied in this case), though I'm not sure if we really need to
17674ecf7cc9SMatthew Dillon      * do that.
1768fe1e98d0SMatthew Dillon      */
17691de1e800SJoerg Sonnenberger     vmflags = base_vmflags;
1770c397c465SMatthew Dillon     if (flags & M_WAITOK) {
17711de1e800SJoerg Sonnenberger 	if (td->td_preempted)
1772fe1e98d0SMatthew Dillon 	    vmflags |= VM_ALLOC_SYSTEM;
17731de1e800SJoerg Sonnenberger 	else
1774dc1fd4b3SMatthew Dillon 	    vmflags |= VM_ALLOC_NORMAL;
1775dc1fd4b3SMatthew Dillon     }
1776dc1fd4b3SMatthew Dillon 
1777712b6620SAaron LI     vm_object_hold(kernel_object);
1778b12defdcSMatthew Dillon     for (i = 0; i < size; i += PAGE_SIZE) {
1779712b6620SAaron LI 	m = vm_page_alloc(kernel_object, OFF_TO_IDX(addr + i), vmflags);
1780b12defdcSMatthew Dillon 	if (i == 0)
1781b12defdcSMatthew Dillon 		mbase = m;
1782dc1fd4b3SMatthew Dillon 
1783dc1fd4b3SMatthew Dillon 	/*
1784dc1fd4b3SMatthew Dillon 	 * If the allocation failed we either return NULL or we retry.
1785dc1fd4b3SMatthew Dillon 	 *
1786c397c465SMatthew Dillon 	 * If M_WAITOK is specified we wait for more memory and retry.
1787c397c465SMatthew Dillon 	 * If M_WAITOK is specified from a preemption we yield instead of
1788c397c465SMatthew Dillon 	 * wait.  Livelock will not occur because the interrupt thread
1789c397c465SMatthew Dillon 	 * will not be preempting anyone the second time around after the
1790c397c465SMatthew Dillon 	 * yield.
1791dc1fd4b3SMatthew Dillon 	 */
1792a108bf71SMatthew Dillon 	if (m == NULL) {
1793c397c465SMatthew Dillon 	    if (flags & M_WAITOK) {
1794fe1e98d0SMatthew Dillon 		if (td->td_preempted) {
179577912481SMatthew Dillon 		    lwkt_switch();
1796dc1fd4b3SMatthew Dillon 		} else {
17974ecf7cc9SMatthew Dillon 		    vm_wait(0);
1798dc1fd4b3SMatthew Dillon 		}
1799a108bf71SMatthew Dillon 		i -= PAGE_SIZE;	/* retry */
1800a108bf71SMatthew Dillon 		continue;
1801a108bf71SMatthew Dillon 	    }
1802b12defdcSMatthew Dillon 	    break;
1803b12defdcSMatthew Dillon 	}
1804b12defdcSMatthew Dillon     }
1805dc1fd4b3SMatthew Dillon 
1806dc1fd4b3SMatthew Dillon     /*
1807b12defdcSMatthew Dillon      * Check and deal with an allocation failure
1808dc1fd4b3SMatthew Dillon      */
1809b12defdcSMatthew Dillon     if (i != size) {
1810a108bf71SMatthew Dillon 	while (i != 0) {
1811a108bf71SMatthew Dillon 	    i -= PAGE_SIZE;
1812712b6620SAaron LI 	    m = vm_page_lookup(kernel_object, OFF_TO_IDX(addr + i));
181317cde63eSMatthew Dillon 	    /* page should already be busy */
1814a108bf71SMatthew Dillon 	    vm_page_free(m);
1815a108bf71SMatthew Dillon 	}
18161eeaf6b2SAaron LI 	vm_map_lock(kernel_map);
18171eeaf6b2SAaron LI 	vm_map_delete(kernel_map, addr, addr + size, &count);
18181eeaf6b2SAaron LI 	vm_map_unlock(kernel_map);
1819712b6620SAaron LI 	vm_object_drop(kernel_object);
1820b12defdcSMatthew Dillon 
1821a108bf71SMatthew Dillon 	vm_map_entry_release(count);
18222de4f77eSMatthew Dillon 	crit_exit();
1823a108bf71SMatthew Dillon 	return(NULL);
1824a108bf71SMatthew Dillon     }
1825a108bf71SMatthew Dillon 
1826a108bf71SMatthew Dillon     /*
1827dc1fd4b3SMatthew Dillon      * Success!
1828dc1fd4b3SMatthew Dillon      *
1829b12defdcSMatthew Dillon      * NOTE: The VM pages are still busied.  mbase points to the first one
1830b12defdcSMatthew Dillon      *	     but we have to iterate via vm_page_next()
1831a108bf71SMatthew Dillon      */
1832712b6620SAaron LI     vm_object_drop(kernel_object);
1833a108bf71SMatthew Dillon     crit_exit();
1834a108bf71SMatthew Dillon 
1835a108bf71SMatthew Dillon     /*
1836afd2da4dSMatthew Dillon      * Enter the pages into the pmap and deal with M_ZERO.
1837a108bf71SMatthew Dillon      */
1838b12defdcSMatthew Dillon     m = mbase;
1839b12defdcSMatthew Dillon     i = 0;
1840a108bf71SMatthew Dillon 
1841b12defdcSMatthew Dillon     while (i < size) {
1842b12defdcSMatthew Dillon 	/*
1843b12defdcSMatthew Dillon 	 * page should already be busy
1844b12defdcSMatthew Dillon 	 */
1845a108bf71SMatthew Dillon 	m->valid = VM_PAGE_BITS_ALL;
1846a108bf71SMatthew Dillon 	vm_page_wire(m);
1847c713db65SAaron LI 	pmap_enter(kernel_pmap, addr + i, m,
184862cc5940SMatthew Dillon 		   VM_PROT_ALL | VM_PROT_NOSYNC, 1, NULL);
1849afd2da4dSMatthew Dillon 	if (flags & M_ZERO)
1850afd2da4dSMatthew Dillon 		pagezero((char *)addr + i);
185117cde63eSMatthew Dillon 	KKASSERT(m->flags & (PG_WRITEABLE | PG_MAPPED));
185217cde63eSMatthew Dillon 	vm_page_flag_set(m, PG_REFERENCED);
1853a491077eSMatthew Dillon 	vm_page_wakeup(m);
1854b12defdcSMatthew Dillon 
1855b12defdcSMatthew Dillon 	i += PAGE_SIZE;
1856712b6620SAaron LI 	vm_object_hold(kernel_object);
1857b12defdcSMatthew Dillon 	m = vm_page_next(m);
1858712b6620SAaron LI 	vm_object_drop(kernel_object);
1859a108bf71SMatthew Dillon     }
1860b12defdcSMatthew Dillon     smp_invltlb();
1861a108bf71SMatthew Dillon     vm_map_entry_release(count);
1862a108bf71SMatthew Dillon     return((void *)addr);
1863a108bf71SMatthew Dillon }
1864a108bf71SMatthew Dillon 
186538e34349SMatthew Dillon /*
18665b287bbaSMatthew Dillon  * kmem_slab_free()
186738e34349SMatthew Dillon  */
1868e9dbfea1SMatthew Dillon void
kmem_slab_free(void * ptr,vm_size_t size)1869a108bf71SMatthew Dillon kmem_slab_free(void *ptr, vm_size_t size)
1870a108bf71SMatthew Dillon {
1871a108bf71SMatthew Dillon     crit_enter();
18721eeaf6b2SAaron LI     vm_map_remove(kernel_map, (vm_offset_t)ptr, (vm_offset_t)ptr + size);
1873a108bf71SMatthew Dillon     crit_exit();
1874a108bf71SMatthew Dillon }
1875