1*479ab7f0SSascha Wildner /*
2*479ab7f0SSascha Wildner * This module derived from code donated to the FreeBSD Project by
3*479ab7f0SSascha Wildner * Matthew Dillon <dillon@backplane.com>
4*479ab7f0SSascha Wildner *
5*479ab7f0SSascha Wildner * Copyright (c) 1998 The FreeBSD Project
6*479ab7f0SSascha Wildner * All rights reserved.
7*479ab7f0SSascha Wildner *
8*479ab7f0SSascha Wildner * Redistribution and use in source and binary forms, with or without
9*479ab7f0SSascha Wildner * modification, are permitted provided that the following conditions
10*479ab7f0SSascha Wildner * are met:
11*479ab7f0SSascha Wildner * 1. Redistributions of source code must retain the above copyright
12*479ab7f0SSascha Wildner * notice, this list of conditions and the following disclaimer.
13*479ab7f0SSascha Wildner * 2. Redistributions in binary form must reproduce the above copyright
14*479ab7f0SSascha Wildner * notice, this list of conditions and the following disclaimer in the
15*479ab7f0SSascha Wildner * documentation and/or other materials provided with the distribution.
16*479ab7f0SSascha Wildner *
17*479ab7f0SSascha Wildner * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18*479ab7f0SSascha Wildner * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19*479ab7f0SSascha Wildner * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20*479ab7f0SSascha Wildner * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21*479ab7f0SSascha Wildner * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22*479ab7f0SSascha Wildner * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23*479ab7f0SSascha Wildner * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24*479ab7f0SSascha Wildner * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25*479ab7f0SSascha Wildner * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26*479ab7f0SSascha Wildner * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27*479ab7f0SSascha Wildner * SUCH DAMAGE.
28*479ab7f0SSascha Wildner *
29*479ab7f0SSascha Wildner * $FreeBSD: src/lib/libstand/zalloc.c,v 1.5.2.1 2002/12/28 18:04:15 dillon Exp $
30*479ab7f0SSascha Wildner * $DragonFly: src/lib/libstand/zalloc.c,v 1.2 2003/06/17 04:26:51 dillon Exp $
31*479ab7f0SSascha Wildner */
32*479ab7f0SSascha Wildner
33*479ab7f0SSascha Wildner /*
34*479ab7f0SSascha Wildner * LIB/MEMORY/ZALLOC.C - self contained low-overhead memory pool/allocation
35*479ab7f0SSascha Wildner * subsystem
36*479ab7f0SSascha Wildner *
37*479ab7f0SSascha Wildner * This subsystem implements memory pools and memory allocation
38*479ab7f0SSascha Wildner * routines.
39*479ab7f0SSascha Wildner *
40*479ab7f0SSascha Wildner * Pools are managed via a linked list of 'free' areas. Allocating
41*479ab7f0SSascha Wildner * memory creates holes in the freelist, freeing memory fills them.
42*479ab7f0SSascha Wildner * Since the freelist consists only of free memory areas, it is possible
43*479ab7f0SSascha Wildner * to allocate the entire pool without incuring any structural overhead.
44*479ab7f0SSascha Wildner *
45*479ab7f0SSascha Wildner * The system works best when allocating similarly-sized chunks of
46*479ab7f0SSascha Wildner * memory. Care must be taken to avoid fragmentation when
47*479ab7f0SSascha Wildner * allocating/deallocating dissimilar chunks.
48*479ab7f0SSascha Wildner *
49*479ab7f0SSascha Wildner * When a memory pool is first allocated, the entire pool is marked as
50*479ab7f0SSascha Wildner * allocated. This is done mainly because we do not want to modify any
51*479ab7f0SSascha Wildner * portion of a pool's data area until we are given permission. The
52*479ab7f0SSascha Wildner * caller must explicitly deallocate portions of the pool to make them
53*479ab7f0SSascha Wildner * available.
54*479ab7f0SSascha Wildner *
55*479ab7f0SSascha Wildner * z[n]xalloc() works like z[n]alloc() but the allocation is made from
56*479ab7f0SSascha Wildner * within the specified address range. If the segment could not be
57*479ab7f0SSascha Wildner * allocated, NULL is returned. WARNING! The address range will be
58*479ab7f0SSascha Wildner * aligned to an 8 or 16 byte boundry depending on the cpu so if you
59*479ab7f0SSascha Wildner * give an unaligned address range, unexpected results may occur.
60*479ab7f0SSascha Wildner *
61*479ab7f0SSascha Wildner * If a standard allocation fails, the reclaim function will be called
62*479ab7f0SSascha Wildner * to recover some space. This usually causes other portions of the
63*479ab7f0SSascha Wildner * same pool to be released. Memory allocations at this low level
64*479ab7f0SSascha Wildner * should not block but you can do that too in your reclaim function
65*479ab7f0SSascha Wildner * if you want. Reclaim does not function when z[n]xalloc() is used,
66*479ab7f0SSascha Wildner * only for z[n]alloc().
67*479ab7f0SSascha Wildner *
68*479ab7f0SSascha Wildner * Allocation and frees of 0 bytes are valid operations.
69*479ab7f0SSascha Wildner */
70*479ab7f0SSascha Wildner
71*479ab7f0SSascha Wildner #include "zalloc_defs.h"
72*479ab7f0SSascha Wildner
73*479ab7f0SSascha Wildner /*
74*479ab7f0SSascha Wildner * znalloc() - allocate memory (without zeroing) from pool. Call reclaim
75*479ab7f0SSascha Wildner * and retry if appropriate, return NULL if unable to allocate
76*479ab7f0SSascha Wildner * memory.
77*479ab7f0SSascha Wildner */
78*479ab7f0SSascha Wildner
79*479ab7f0SSascha Wildner void *
znalloc(MemPool * mp,uintptr_t bytes)80*479ab7f0SSascha Wildner znalloc(MemPool *mp, uintptr_t bytes)
81*479ab7f0SSascha Wildner {
82*479ab7f0SSascha Wildner /*
83*479ab7f0SSascha Wildner * align according to pool object size (can be 0). This is
84*479ab7f0SSascha Wildner * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
85*479ab7f0SSascha Wildner *
86*479ab7f0SSascha Wildner */
87*479ab7f0SSascha Wildner bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
88*479ab7f0SSascha Wildner
89*479ab7f0SSascha Wildner if (bytes == 0)
90*479ab7f0SSascha Wildner return((void *)-1);
91*479ab7f0SSascha Wildner
92*479ab7f0SSascha Wildner /*
93*479ab7f0SSascha Wildner * locate freelist entry big enough to hold the object. If all objects
94*479ab7f0SSascha Wildner * are the same size, this is a constant-time function.
95*479ab7f0SSascha Wildner */
96*479ab7f0SSascha Wildner
97*479ab7f0SSascha Wildner if (bytes <= mp->mp_Size - mp->mp_Used) {
98*479ab7f0SSascha Wildner MemNode **pmn;
99*479ab7f0SSascha Wildner MemNode *mn;
100*479ab7f0SSascha Wildner
101*479ab7f0SSascha Wildner for (pmn = &mp->mp_First; (mn=*pmn) != NULL; pmn = &mn->mr_Next) {
102*479ab7f0SSascha Wildner if (bytes > mn->mr_Bytes)
103*479ab7f0SSascha Wildner continue;
104*479ab7f0SSascha Wildner
105*479ab7f0SSascha Wildner /*
106*479ab7f0SSascha Wildner * Cut a chunk of memory out of the beginning of this
107*479ab7f0SSascha Wildner * block and fixup the link appropriately.
108*479ab7f0SSascha Wildner */
109*479ab7f0SSascha Wildner
110*479ab7f0SSascha Wildner {
111*479ab7f0SSascha Wildner char *ptr = (char *)mn;
112*479ab7f0SSascha Wildner
113*479ab7f0SSascha Wildner if (mn->mr_Bytes == bytes) {
114*479ab7f0SSascha Wildner *pmn = mn->mr_Next;
115*479ab7f0SSascha Wildner } else {
116*479ab7f0SSascha Wildner mn = (MemNode *)((char *)mn + bytes);
117*479ab7f0SSascha Wildner mn->mr_Next = ((MemNode *)ptr)->mr_Next;
118*479ab7f0SSascha Wildner mn->mr_Bytes = ((MemNode *)ptr)->mr_Bytes - bytes;
119*479ab7f0SSascha Wildner *pmn = mn;
120*479ab7f0SSascha Wildner }
121*479ab7f0SSascha Wildner mp->mp_Used += bytes;
122*479ab7f0SSascha Wildner return(ptr);
123*479ab7f0SSascha Wildner }
124*479ab7f0SSascha Wildner }
125*479ab7f0SSascha Wildner }
126*479ab7f0SSascha Wildner
127*479ab7f0SSascha Wildner /*
128*479ab7f0SSascha Wildner * Memory pool is full, return NULL.
129*479ab7f0SSascha Wildner */
130*479ab7f0SSascha Wildner
131*479ab7f0SSascha Wildner return(NULL);
132*479ab7f0SSascha Wildner }
133*479ab7f0SSascha Wildner
134*479ab7f0SSascha Wildner /*
135*479ab7f0SSascha Wildner * zfree() - free previously allocated memory
136*479ab7f0SSascha Wildner */
137*479ab7f0SSascha Wildner
138*479ab7f0SSascha Wildner void
zfree(MemPool * mp,void * ptr,uintptr_t bytes)139*479ab7f0SSascha Wildner zfree(MemPool *mp, void *ptr, uintptr_t bytes)
140*479ab7f0SSascha Wildner {
141*479ab7f0SSascha Wildner /*
142*479ab7f0SSascha Wildner * align according to pool object size (can be 0). This is
143*479ab7f0SSascha Wildner * inclusive of the MEMNODE_SIZE_MASK minimum alignment.
144*479ab7f0SSascha Wildner */
145*479ab7f0SSascha Wildner bytes = (bytes + MEMNODE_SIZE_MASK) & ~MEMNODE_SIZE_MASK;
146*479ab7f0SSascha Wildner
147*479ab7f0SSascha Wildner if (bytes == 0)
148*479ab7f0SSascha Wildner return;
149*479ab7f0SSascha Wildner
150*479ab7f0SSascha Wildner /*
151*479ab7f0SSascha Wildner * panic if illegal pointer
152*479ab7f0SSascha Wildner */
153*479ab7f0SSascha Wildner
154*479ab7f0SSascha Wildner if ((char *)ptr < (char *)mp->mp_Base ||
155*479ab7f0SSascha Wildner (char *)ptr + bytes > (char *)mp->mp_End ||
156*479ab7f0SSascha Wildner ((uintptr_t)ptr & MEMNODE_SIZE_MASK) != 0)
157*479ab7f0SSascha Wildner panic("zfree(%p,%ju): wild pointer", ptr, (uintmax_t)bytes);
158*479ab7f0SSascha Wildner
159*479ab7f0SSascha Wildner /*
160*479ab7f0SSascha Wildner * free the segment
161*479ab7f0SSascha Wildner */
162*479ab7f0SSascha Wildner
163*479ab7f0SSascha Wildner {
164*479ab7f0SSascha Wildner MemNode **pmn;
165*479ab7f0SSascha Wildner MemNode *mn;
166*479ab7f0SSascha Wildner
167*479ab7f0SSascha Wildner mp->mp_Used -= bytes;
168*479ab7f0SSascha Wildner
169*479ab7f0SSascha Wildner for (pmn = &mp->mp_First; (mn = *pmn) != NULL; pmn = &mn->mr_Next) {
170*479ab7f0SSascha Wildner /*
171*479ab7f0SSascha Wildner * If area between last node and current node
172*479ab7f0SSascha Wildner * - check range
173*479ab7f0SSascha Wildner * - check merge with next area
174*479ab7f0SSascha Wildner * - check merge with previous area
175*479ab7f0SSascha Wildner */
176*479ab7f0SSascha Wildner if ((char *)ptr <= (char *)mn) {
177*479ab7f0SSascha Wildner /*
178*479ab7f0SSascha Wildner * range check
179*479ab7f0SSascha Wildner */
180*479ab7f0SSascha Wildner if ((char *)ptr + bytes > (char *)mn) {
181*479ab7f0SSascha Wildner panic("zfree(%p,%ju): corrupt memlist1", ptr,
182*479ab7f0SSascha Wildner (uintmax_t)bytes);
183*479ab7f0SSascha Wildner }
184*479ab7f0SSascha Wildner
185*479ab7f0SSascha Wildner /*
186*479ab7f0SSascha Wildner * merge against next area or create independant area
187*479ab7f0SSascha Wildner */
188*479ab7f0SSascha Wildner
189*479ab7f0SSascha Wildner if ((char *)ptr + bytes == (char *)mn) {
190*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Next = mn->mr_Next;
191*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Bytes= bytes + mn->mr_Bytes;
192*479ab7f0SSascha Wildner } else {
193*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Next = mn;
194*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Bytes= bytes;
195*479ab7f0SSascha Wildner }
196*479ab7f0SSascha Wildner *pmn = mn = (MemNode *)ptr;
197*479ab7f0SSascha Wildner
198*479ab7f0SSascha Wildner /*
199*479ab7f0SSascha Wildner * merge against previous area (if there is a previous
200*479ab7f0SSascha Wildner * area).
201*479ab7f0SSascha Wildner */
202*479ab7f0SSascha Wildner
203*479ab7f0SSascha Wildner if (pmn != &mp->mp_First) {
204*479ab7f0SSascha Wildner if ((char*)pmn + ((MemNode*)pmn)->mr_Bytes == (char*)ptr) {
205*479ab7f0SSascha Wildner ((MemNode *)pmn)->mr_Next = mn->mr_Next;
206*479ab7f0SSascha Wildner ((MemNode *)pmn)->mr_Bytes += mn->mr_Bytes;
207*479ab7f0SSascha Wildner mn = (MemNode *)pmn;
208*479ab7f0SSascha Wildner }
209*479ab7f0SSascha Wildner }
210*479ab7f0SSascha Wildner return;
211*479ab7f0SSascha Wildner /* NOT REACHED */
212*479ab7f0SSascha Wildner }
213*479ab7f0SSascha Wildner if ((char *)ptr < (char *)mn + mn->mr_Bytes) {
214*479ab7f0SSascha Wildner panic("zfree(%p,%ju): corrupt memlist2", ptr,
215*479ab7f0SSascha Wildner (uintmax_t)bytes);
216*479ab7f0SSascha Wildner }
217*479ab7f0SSascha Wildner }
218*479ab7f0SSascha Wildner /*
219*479ab7f0SSascha Wildner * We are beyond the last MemNode, append new MemNode. Merge against
220*479ab7f0SSascha Wildner * previous area if possible.
221*479ab7f0SSascha Wildner */
222*479ab7f0SSascha Wildner if (pmn == &mp->mp_First ||
223*479ab7f0SSascha Wildner (char *)pmn + ((MemNode *)pmn)->mr_Bytes != (char *)ptr
224*479ab7f0SSascha Wildner ) {
225*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Next = NULL;
226*479ab7f0SSascha Wildner ((MemNode *)ptr)->mr_Bytes = bytes;
227*479ab7f0SSascha Wildner *pmn = (MemNode *)ptr;
228*479ab7f0SSascha Wildner mn = (MemNode *)ptr;
229*479ab7f0SSascha Wildner } else {
230*479ab7f0SSascha Wildner ((MemNode *)pmn)->mr_Bytes += bytes;
231*479ab7f0SSascha Wildner mn = (MemNode *)pmn;
232*479ab7f0SSascha Wildner }
233*479ab7f0SSascha Wildner }
234*479ab7f0SSascha Wildner }
235*479ab7f0SSascha Wildner
236*479ab7f0SSascha Wildner /*
237*479ab7f0SSascha Wildner * zextendPool() - extend memory pool to cover additional space.
238*479ab7f0SSascha Wildner *
239*479ab7f0SSascha Wildner * Note: the added memory starts out as allocated, you
240*479ab7f0SSascha Wildner * must free it to make it available to the memory subsystem.
241*479ab7f0SSascha Wildner *
242*479ab7f0SSascha Wildner * Note: mp_Size may not reflect (mp_End - mp_Base) range
243*479ab7f0SSascha Wildner * due to other parts of the system doing their own sbrk()
244*479ab7f0SSascha Wildner * calls.
245*479ab7f0SSascha Wildner */
246*479ab7f0SSascha Wildner
247*479ab7f0SSascha Wildner void
zextendPool(MemPool * mp,void * base,uintptr_t bytes)248*479ab7f0SSascha Wildner zextendPool(MemPool *mp, void *base, uintptr_t bytes)
249*479ab7f0SSascha Wildner {
250*479ab7f0SSascha Wildner if (mp->mp_Size == 0) {
251*479ab7f0SSascha Wildner mp->mp_Base = base;
252*479ab7f0SSascha Wildner mp->mp_Used = bytes;
253*479ab7f0SSascha Wildner mp->mp_End = (char *)base + bytes;
254*479ab7f0SSascha Wildner mp->mp_Size = bytes;
255*479ab7f0SSascha Wildner } else {
256*479ab7f0SSascha Wildner void *pend = (char *)mp->mp_Base + mp->mp_Size;
257*479ab7f0SSascha Wildner
258*479ab7f0SSascha Wildner if (base < mp->mp_Base) {
259*479ab7f0SSascha Wildner mp->mp_Size += (char *)mp->mp_Base - (char *)base;
260*479ab7f0SSascha Wildner mp->mp_Used += (char *)mp->mp_Base - (char *)base;
261*479ab7f0SSascha Wildner mp->mp_Base = base;
262*479ab7f0SSascha Wildner }
263*479ab7f0SSascha Wildner base = (char *)base + bytes;
264*479ab7f0SSascha Wildner if (base > pend) {
265*479ab7f0SSascha Wildner mp->mp_Size += (char *)base - (char *)pend;
266*479ab7f0SSascha Wildner mp->mp_Used += (char *)base - (char *)pend;
267*479ab7f0SSascha Wildner mp->mp_End = (char *)base;
268*479ab7f0SSascha Wildner }
269*479ab7f0SSascha Wildner }
270*479ab7f0SSascha Wildner }
271*479ab7f0SSascha Wildner
272*479ab7f0SSascha Wildner #ifdef ZALLOCDEBUG
273*479ab7f0SSascha Wildner
274*479ab7f0SSascha Wildner void
zallocstats(MemPool * mp)275*479ab7f0SSascha Wildner zallocstats(MemPool *mp)
276*479ab7f0SSascha Wildner {
277*479ab7f0SSascha Wildner int abytes = 0;
278*479ab7f0SSascha Wildner int hbytes = 0;
279*479ab7f0SSascha Wildner int fcount = 0;
280*479ab7f0SSascha Wildner MemNode *mn;
281*479ab7f0SSascha Wildner
282*479ab7f0SSascha Wildner printf("%d bytes reserved", (int) mp->mp_Size);
283*479ab7f0SSascha Wildner
284*479ab7f0SSascha Wildner mn = mp->mp_First;
285*479ab7f0SSascha Wildner
286*479ab7f0SSascha Wildner if ((void *)mn != (void *)mp->mp_Base) {
287*479ab7f0SSascha Wildner abytes += (char *)mn - (char *)mp->mp_Base;
288*479ab7f0SSascha Wildner }
289*479ab7f0SSascha Wildner
290*479ab7f0SSascha Wildner while (mn) {
291*479ab7f0SSascha Wildner if ((char *)mn + mn->mr_Bytes != mp->mp_End) {
292*479ab7f0SSascha Wildner hbytes += mn->mr_Bytes;
293*479ab7f0SSascha Wildner ++fcount;
294*479ab7f0SSascha Wildner }
295*479ab7f0SSascha Wildner if (mn->mr_Next)
296*479ab7f0SSascha Wildner abytes += (char *)mn->mr_Next - ((char *)mn + mn->mr_Bytes);
297*479ab7f0SSascha Wildner mn = mn->mr_Next;
298*479ab7f0SSascha Wildner }
299*479ab7f0SSascha Wildner printf(" %d bytes allocated\n%d fragments (%d bytes fragmented)\n",
300*479ab7f0SSascha Wildner abytes,
301*479ab7f0SSascha Wildner fcount,
302*479ab7f0SSascha Wildner hbytes
303*479ab7f0SSascha Wildner );
304*479ab7f0SSascha Wildner }
305*479ab7f0SSascha Wildner
306*479ab7f0SSascha Wildner #endif
307*479ab7f0SSascha Wildner
308