xref: /dflybsd-src/contrib/gcc-8.0/libbacktrace/mmap.c (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj /* mmap.c -- Memory allocation with mmap.
2*38fd1498Szrj    Copyright (C) 2012-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Written by Ian Lance Taylor, Google.
4*38fd1498Szrj 
5*38fd1498Szrj Redistribution and use in source and binary forms, with or without
6*38fd1498Szrj modification, are permitted provided that the following conditions are
7*38fd1498Szrj met:
8*38fd1498Szrj 
9*38fd1498Szrj     (1) Redistributions of source code must retain the above copyright
10*38fd1498Szrj     notice, this list of conditions and the following disclaimer.
11*38fd1498Szrj 
12*38fd1498Szrj     (2) Redistributions in binary form must reproduce the above copyright
13*38fd1498Szrj     notice, this list of conditions and the following disclaimer in
14*38fd1498Szrj     the documentation and/or other materials provided with the
15*38fd1498Szrj     distribution.
16*38fd1498Szrj 
17*38fd1498Szrj     (3) The name of the author may not be used to
18*38fd1498Szrj     endorse or promote products derived from this software without
19*38fd1498Szrj     specific prior written permission.
20*38fd1498Szrj 
21*38fd1498Szrj THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22*38fd1498Szrj IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23*38fd1498Szrj WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24*38fd1498Szrj DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25*38fd1498Szrj INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26*38fd1498Szrj (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27*38fd1498Szrj SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28*38fd1498Szrj HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29*38fd1498Szrj STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30*38fd1498Szrj IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31*38fd1498Szrj POSSIBILITY OF SUCH DAMAGE.  */
32*38fd1498Szrj 
33*38fd1498Szrj #include "config.h"
34*38fd1498Szrj 
35*38fd1498Szrj #include <errno.h>
36*38fd1498Szrj #include <string.h>
37*38fd1498Szrj #include <stdlib.h>
38*38fd1498Szrj #include <unistd.h>
39*38fd1498Szrj #include <sys/types.h>
40*38fd1498Szrj #include <sys/mman.h>
41*38fd1498Szrj 
42*38fd1498Szrj #include "backtrace.h"
43*38fd1498Szrj #include "internal.h"
44*38fd1498Szrj 
45*38fd1498Szrj /* Memory allocation on systems that provide anonymous mmap.  This
46*38fd1498Szrj    permits the backtrace functions to be invoked from a signal
47*38fd1498Szrj    handler, assuming that mmap is async-signal safe.  */
48*38fd1498Szrj 
49*38fd1498Szrj #ifndef MAP_ANONYMOUS
50*38fd1498Szrj #define MAP_ANONYMOUS MAP_ANON
51*38fd1498Szrj #endif
52*38fd1498Szrj 
53*38fd1498Szrj #ifndef MAP_FAILED
54*38fd1498Szrj #define MAP_FAILED ((void *)-1)
55*38fd1498Szrj #endif
56*38fd1498Szrj 
57*38fd1498Szrj /* A list of free memory blocks.  */
58*38fd1498Szrj 
59*38fd1498Szrj struct backtrace_freelist_struct
60*38fd1498Szrj {
61*38fd1498Szrj   /* Next on list.  */
62*38fd1498Szrj   struct backtrace_freelist_struct *next;
63*38fd1498Szrj   /* Size of this block, including this structure.  */
64*38fd1498Szrj   size_t size;
65*38fd1498Szrj };
66*38fd1498Szrj 
67*38fd1498Szrj /* Free memory allocated by backtrace_alloc.  */
68*38fd1498Szrj 
69*38fd1498Szrj static void
backtrace_free_locked(struct backtrace_state * state,void * addr,size_t size)70*38fd1498Szrj backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
71*38fd1498Szrj {
72*38fd1498Szrj   /* Just leak small blocks.  We don't have to be perfect.  Don't put
73*38fd1498Szrj      more than 16 entries on the free list, to avoid wasting time
74*38fd1498Szrj      searching when allocating a block.  If we have more than 16
75*38fd1498Szrj      entries, leak the smallest entry.  */
76*38fd1498Szrj 
77*38fd1498Szrj   if (size >= sizeof (struct backtrace_freelist_struct))
78*38fd1498Szrj     {
79*38fd1498Szrj       size_t c;
80*38fd1498Szrj       struct backtrace_freelist_struct **ppsmall;
81*38fd1498Szrj       struct backtrace_freelist_struct **pp;
82*38fd1498Szrj       struct backtrace_freelist_struct *p;
83*38fd1498Szrj 
84*38fd1498Szrj       c = 0;
85*38fd1498Szrj       ppsmall = NULL;
86*38fd1498Szrj       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
87*38fd1498Szrj 	{
88*38fd1498Szrj 	  if (ppsmall == NULL || (*pp)->size < (*ppsmall)->size)
89*38fd1498Szrj 	    ppsmall = pp;
90*38fd1498Szrj 	  ++c;
91*38fd1498Szrj 	}
92*38fd1498Szrj       if (c >= 16)
93*38fd1498Szrj 	{
94*38fd1498Szrj 	  if (size <= (*ppsmall)->size)
95*38fd1498Szrj 	    return;
96*38fd1498Szrj 	  *ppsmall = (*ppsmall)->next;
97*38fd1498Szrj 	}
98*38fd1498Szrj 
99*38fd1498Szrj       p = (struct backtrace_freelist_struct *) addr;
100*38fd1498Szrj       p->next = state->freelist;
101*38fd1498Szrj       p->size = size;
102*38fd1498Szrj       state->freelist = p;
103*38fd1498Szrj     }
104*38fd1498Szrj }
105*38fd1498Szrj 
106*38fd1498Szrj /* Allocate memory like malloc.  If ERROR_CALLBACK is NULL, don't
107*38fd1498Szrj    report an error.  */
108*38fd1498Szrj 
109*38fd1498Szrj void *
backtrace_alloc(struct backtrace_state * state,size_t size,backtrace_error_callback error_callback,void * data)110*38fd1498Szrj backtrace_alloc (struct backtrace_state *state,
111*38fd1498Szrj 		 size_t size, backtrace_error_callback error_callback,
112*38fd1498Szrj 		 void *data)
113*38fd1498Szrj {
114*38fd1498Szrj   void *ret;
115*38fd1498Szrj   int locked;
116*38fd1498Szrj   struct backtrace_freelist_struct **pp;
117*38fd1498Szrj   size_t pagesize;
118*38fd1498Szrj   size_t asksize;
119*38fd1498Szrj   void *page;
120*38fd1498Szrj 
121*38fd1498Szrj   ret = NULL;
122*38fd1498Szrj 
123*38fd1498Szrj   /* If we can acquire the lock, then see if there is space on the
124*38fd1498Szrj      free list.  If we can't acquire the lock, drop straight into
125*38fd1498Szrj      using mmap.  __sync_lock_test_and_set returns the old state of
126*38fd1498Szrj      the lock, so we have acquired it if it returns 0.  */
127*38fd1498Szrj 
128*38fd1498Szrj   if (!state->threaded)
129*38fd1498Szrj     locked = 1;
130*38fd1498Szrj   else
131*38fd1498Szrj     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
132*38fd1498Szrj 
133*38fd1498Szrj   if (locked)
134*38fd1498Szrj     {
135*38fd1498Szrj       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
136*38fd1498Szrj 	{
137*38fd1498Szrj 	  if ((*pp)->size >= size)
138*38fd1498Szrj 	    {
139*38fd1498Szrj 	      struct backtrace_freelist_struct *p;
140*38fd1498Szrj 
141*38fd1498Szrj 	      p = *pp;
142*38fd1498Szrj 	      *pp = p->next;
143*38fd1498Szrj 
144*38fd1498Szrj 	      /* Round for alignment; we assume that no type we care about
145*38fd1498Szrj 		 is more than 8 bytes.  */
146*38fd1498Szrj 	      size = (size + 7) & ~ (size_t) 7;
147*38fd1498Szrj 	      if (size < p->size)
148*38fd1498Szrj 		backtrace_free_locked (state, (char *) p + size,
149*38fd1498Szrj 				       p->size - size);
150*38fd1498Szrj 
151*38fd1498Szrj 	      ret = (void *) p;
152*38fd1498Szrj 
153*38fd1498Szrj 	      break;
154*38fd1498Szrj 	    }
155*38fd1498Szrj 	}
156*38fd1498Szrj 
157*38fd1498Szrj       if (state->threaded)
158*38fd1498Szrj 	__sync_lock_release (&state->lock_alloc);
159*38fd1498Szrj     }
160*38fd1498Szrj 
161*38fd1498Szrj   if (ret == NULL)
162*38fd1498Szrj     {
163*38fd1498Szrj       /* Allocate a new page.  */
164*38fd1498Szrj 
165*38fd1498Szrj       pagesize = getpagesize ();
166*38fd1498Szrj       asksize = (size + pagesize - 1) & ~ (pagesize - 1);
167*38fd1498Szrj       page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
168*38fd1498Szrj 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
169*38fd1498Szrj       if (page == MAP_FAILED)
170*38fd1498Szrj 	{
171*38fd1498Szrj 	  if (error_callback)
172*38fd1498Szrj 	    error_callback (data, "mmap", errno);
173*38fd1498Szrj 	}
174*38fd1498Szrj       else
175*38fd1498Szrj 	{
176*38fd1498Szrj 	  size = (size + 7) & ~ (size_t) 7;
177*38fd1498Szrj 	  if (size < asksize)
178*38fd1498Szrj 	    backtrace_free (state, (char *) page + size, asksize - size,
179*38fd1498Szrj 			    error_callback, data);
180*38fd1498Szrj 
181*38fd1498Szrj 	  ret = page;
182*38fd1498Szrj 	}
183*38fd1498Szrj     }
184*38fd1498Szrj 
185*38fd1498Szrj   return ret;
186*38fd1498Szrj }
187*38fd1498Szrj 
188*38fd1498Szrj /* Free memory allocated by backtrace_alloc.  */
189*38fd1498Szrj 
190*38fd1498Szrj void
backtrace_free(struct backtrace_state * state,void * addr,size_t size,backtrace_error_callback error_callback ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)191*38fd1498Szrj backtrace_free (struct backtrace_state *state, void *addr, size_t size,
192*38fd1498Szrj 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
193*38fd1498Szrj 		void *data ATTRIBUTE_UNUSED)
194*38fd1498Szrj {
195*38fd1498Szrj   int locked;
196*38fd1498Szrj 
197*38fd1498Szrj   /* If we are freeing a large aligned block, just release it back to
198*38fd1498Szrj      the system.  This case arises when growing a vector for a large
199*38fd1498Szrj      binary with lots of debug info.  Calling munmap here may cause us
200*38fd1498Szrj      to call mmap again if there is also a large shared library; we
201*38fd1498Szrj      just live with that.  */
202*38fd1498Szrj   if (size >= 16 * 4096)
203*38fd1498Szrj     {
204*38fd1498Szrj       size_t pagesize;
205*38fd1498Szrj 
206*38fd1498Szrj       pagesize = getpagesize ();
207*38fd1498Szrj       if (((uintptr_t) addr & (pagesize - 1)) == 0
208*38fd1498Szrj 	  && (size & (pagesize - 1)) == 0)
209*38fd1498Szrj 	{
210*38fd1498Szrj 	  /* If munmap fails for some reason, just add the block to
211*38fd1498Szrj 	     the freelist.  */
212*38fd1498Szrj 	  if (munmap (addr, size) == 0)
213*38fd1498Szrj 	    return;
214*38fd1498Szrj 	}
215*38fd1498Szrj     }
216*38fd1498Szrj 
217*38fd1498Szrj   /* If we can acquire the lock, add the new space to the free list.
218*38fd1498Szrj      If we can't acquire the lock, just leak the memory.
219*38fd1498Szrj      __sync_lock_test_and_set returns the old state of the lock, so we
220*38fd1498Szrj      have acquired it if it returns 0.  */
221*38fd1498Szrj 
222*38fd1498Szrj   if (!state->threaded)
223*38fd1498Szrj     locked = 1;
224*38fd1498Szrj   else
225*38fd1498Szrj     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
226*38fd1498Szrj 
227*38fd1498Szrj   if (locked)
228*38fd1498Szrj     {
229*38fd1498Szrj       backtrace_free_locked (state, addr, size);
230*38fd1498Szrj 
231*38fd1498Szrj       if (state->threaded)
232*38fd1498Szrj 	__sync_lock_release (&state->lock_alloc);
233*38fd1498Szrj     }
234*38fd1498Szrj }
235*38fd1498Szrj 
236*38fd1498Szrj /* Grow VEC by SIZE bytes.  */
237*38fd1498Szrj 
238*38fd1498Szrj void *
backtrace_vector_grow(struct backtrace_state * state,size_t size,backtrace_error_callback error_callback,void * data,struct backtrace_vector * vec)239*38fd1498Szrj backtrace_vector_grow (struct backtrace_state *state,size_t size,
240*38fd1498Szrj 		       backtrace_error_callback error_callback,
241*38fd1498Szrj 		       void *data, struct backtrace_vector *vec)
242*38fd1498Szrj {
243*38fd1498Szrj   void *ret;
244*38fd1498Szrj 
245*38fd1498Szrj   if (size > vec->alc)
246*38fd1498Szrj     {
247*38fd1498Szrj       size_t pagesize;
248*38fd1498Szrj       size_t alc;
249*38fd1498Szrj       void *base;
250*38fd1498Szrj 
251*38fd1498Szrj       pagesize = getpagesize ();
252*38fd1498Szrj       alc = vec->size + size;
253*38fd1498Szrj       if (vec->size == 0)
254*38fd1498Szrj 	alc = 16 * size;
255*38fd1498Szrj       else if (alc < pagesize)
256*38fd1498Szrj 	{
257*38fd1498Szrj 	  alc *= 2;
258*38fd1498Szrj 	  if (alc > pagesize)
259*38fd1498Szrj 	    alc = pagesize;
260*38fd1498Szrj 	}
261*38fd1498Szrj       else
262*38fd1498Szrj 	{
263*38fd1498Szrj 	  alc *= 2;
264*38fd1498Szrj 	  alc = (alc + pagesize - 1) & ~ (pagesize - 1);
265*38fd1498Szrj 	}
266*38fd1498Szrj       base = backtrace_alloc (state, alc, error_callback, data);
267*38fd1498Szrj       if (base == NULL)
268*38fd1498Szrj 	return NULL;
269*38fd1498Szrj       if (vec->base != NULL)
270*38fd1498Szrj 	{
271*38fd1498Szrj 	  memcpy (base, vec->base, vec->size);
272*38fd1498Szrj 	  backtrace_free (state, vec->base, vec->size + vec->alc,
273*38fd1498Szrj 			  error_callback, data);
274*38fd1498Szrj 	}
275*38fd1498Szrj       vec->base = base;
276*38fd1498Szrj       vec->alc = alc - vec->size;
277*38fd1498Szrj     }
278*38fd1498Szrj 
279*38fd1498Szrj   ret = (char *) vec->base + vec->size;
280*38fd1498Szrj   vec->size += size;
281*38fd1498Szrj   vec->alc -= size;
282*38fd1498Szrj   return ret;
283*38fd1498Szrj }
284*38fd1498Szrj 
285*38fd1498Szrj /* Finish the current allocation on VEC.  */
286*38fd1498Szrj 
287*38fd1498Szrj void *
backtrace_vector_finish(struct backtrace_state * state ATTRIBUTE_UNUSED,struct backtrace_vector * vec,backtrace_error_callback error_callback ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)288*38fd1498Szrj backtrace_vector_finish (
289*38fd1498Szrj   struct backtrace_state *state ATTRIBUTE_UNUSED,
290*38fd1498Szrj   struct backtrace_vector *vec,
291*38fd1498Szrj   backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
292*38fd1498Szrj   void *data ATTRIBUTE_UNUSED)
293*38fd1498Szrj {
294*38fd1498Szrj   void *ret;
295*38fd1498Szrj 
296*38fd1498Szrj   ret = vec->base;
297*38fd1498Szrj   vec->base = (char *) vec->base + vec->size;
298*38fd1498Szrj   vec->size = 0;
299*38fd1498Szrj   return ret;
300*38fd1498Szrj }
301*38fd1498Szrj 
302*38fd1498Szrj /* Release any extra space allocated for VEC.  */
303*38fd1498Szrj 
304*38fd1498Szrj int
backtrace_vector_release(struct backtrace_state * state,struct backtrace_vector * vec,backtrace_error_callback error_callback,void * data)305*38fd1498Szrj backtrace_vector_release (struct backtrace_state *state,
306*38fd1498Szrj 			  struct backtrace_vector *vec,
307*38fd1498Szrj 			  backtrace_error_callback error_callback,
308*38fd1498Szrj 			  void *data)
309*38fd1498Szrj {
310*38fd1498Szrj   size_t size;
311*38fd1498Szrj   size_t alc;
312*38fd1498Szrj   size_t aligned;
313*38fd1498Szrj 
314*38fd1498Szrj   /* Make sure that the block that we free is aligned on an 8-byte
315*38fd1498Szrj      boundary.  */
316*38fd1498Szrj   size = vec->size;
317*38fd1498Szrj   alc = vec->alc;
318*38fd1498Szrj   aligned = (size + 7) & ~ (size_t) 7;
319*38fd1498Szrj   alc -= aligned - size;
320*38fd1498Szrj 
321*38fd1498Szrj   backtrace_free (state, (char *) vec->base + aligned, alc,
322*38fd1498Szrj 		  error_callback, data);
323*38fd1498Szrj   vec->alc = 0;
324*38fd1498Szrj   return 1;
325*38fd1498Szrj }
326