xref: /netbsd-src/external/gpl3/gcc.old/dist/libbacktrace/mmap.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /* mmap.c -- Memory allocation with mmap.
2    Copyright (C) 2012-2013 Free Software Foundation, Inc.
3    Written by Ian Lance Taylor, Google.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
8 
9     (1) Redistributions of source code must retain the above copyright
10     notice, this list of conditions and the following disclaimer.
11 
12     (2) Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in
14     the documentation and/or other materials provided with the
15     distribution.
16 
17     (3) The name of the author may not be used to
18     endorse or promote products derived from this software without
19     specific prior written permission.
20 
21 THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22 IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
23 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
25 INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.  */
32 
33 #include "config.h"
34 
35 #include <errno.h>
36 #include <string.h>
37 #include <stdlib.h>
38 #include <unistd.h>
39 #include <sys/types.h>
40 #include <sys/mman.h>
41 
42 #include "backtrace.h"
43 #include "internal.h"
44 
45 /* Memory allocation on systems that provide anonymous mmap.  This
46    permits the backtrace functions to be invoked from a signal
47    handler, assuming that mmap is async-signal safe.  */
48 
49 #ifndef MAP_ANONYMOUS
50 #define MAP_ANONYMOUS MAP_ANON
51 #endif
52 
53 /* A list of free memory blocks.  */
54 
55 struct backtrace_freelist_struct
56 {
57   /* Next on list.  */
58   struct backtrace_freelist_struct *next;
59   /* Size of this block, including this structure.  */
60   size_t size;
61 };
62 
63 /* Free memory allocated by backtrace_alloc.  */
64 
65 static void
66 backtrace_free_locked (struct backtrace_state *state, void *addr, size_t size)
67 {
68   /* Just leak small blocks.  We don't have to be perfect.  */
69   if (size >= sizeof (struct backtrace_freelist_struct))
70     {
71       struct backtrace_freelist_struct *p;
72 
73       p = (struct backtrace_freelist_struct *) addr;
74       p->next = state->freelist;
75       p->size = size;
76       state->freelist = p;
77     }
78 }
79 
80 /* Allocate memory like malloc.  */
81 
82 void *
83 backtrace_alloc (struct backtrace_state *state,
84 		 size_t size, backtrace_error_callback error_callback,
85 		 void *data)
86 {
87   void *ret;
88   int locked;
89   struct backtrace_freelist_struct **pp;
90   size_t pagesize;
91   size_t asksize;
92   void *page;
93 
94   ret = NULL;
95 
96   /* If we can acquire the lock, then see if there is space on the
97      free list.  If we can't acquire the lock, drop straight into
98      using mmap.  __sync_lock_test_and_set returns the old state of
99      the lock, so we have acquired it if it returns 0.  */
100 
101   if (!state->threaded)
102     locked = 1;
103   else
104     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
105 
106   if (locked)
107     {
108       for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next)
109 	{
110 	  if ((*pp)->size >= size)
111 	    {
112 	      struct backtrace_freelist_struct *p;
113 
114 	      p = *pp;
115 	      *pp = p->next;
116 
117 	      /* Round for alignment; we assume that no type we care about
118 		 is more than 8 bytes.  */
119 	      size = (size + 7) & ~ (size_t) 7;
120 	      if (size < p->size)
121 		backtrace_free_locked (state, (char *) p + size,
122 				       p->size - size);
123 
124 	      ret = (void *) p;
125 
126 	      break;
127 	    }
128 	}
129 
130       if (state->threaded)
131 	__sync_lock_release (&state->lock_alloc);
132     }
133 
134   if (ret == NULL)
135     {
136       /* Allocate a new page.  */
137 
138       pagesize = getpagesize ();
139       asksize = (size + pagesize - 1) & ~ (pagesize - 1);
140       page = mmap (NULL, asksize, PROT_READ | PROT_WRITE,
141 		   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
142       if (page == NULL)
143 	error_callback (data, "mmap", errno);
144       else
145 	{
146 	  size = (size + 7) & ~ (size_t) 7;
147 	  if (size < asksize)
148 	    backtrace_free (state, (char *) page + size, asksize - size,
149 			    error_callback, data);
150 
151 	  ret = page;
152 	}
153     }
154 
155   return ret;
156 }
157 
158 /* Free memory allocated by backtrace_alloc.  */
159 
160 void
161 backtrace_free (struct backtrace_state *state, void *addr, size_t size,
162 		backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
163 		void *data ATTRIBUTE_UNUSED)
164 {
165   int locked;
166 
167   /* If we can acquire the lock, add the new space to the free list.
168      If we can't acquire the lock, just leak the memory.
169      __sync_lock_test_and_set returns the old state of the lock, so we
170      have acquired it if it returns 0.  */
171 
172   if (!state->threaded)
173     locked = 1;
174   else
175     locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0;
176 
177   if (locked)
178     {
179       backtrace_free_locked (state, addr, size);
180 
181       if (state->threaded)
182 	__sync_lock_release (&state->lock_alloc);
183     }
184 }
185 
186 /* Grow VEC by SIZE bytes.  */
187 
188 void *
189 backtrace_vector_grow (struct backtrace_state *state,size_t size,
190 		       backtrace_error_callback error_callback,
191 		       void *data, struct backtrace_vector *vec)
192 {
193   void *ret;
194 
195   if (size > vec->alc)
196     {
197       size_t pagesize;
198       size_t alc;
199       void *base;
200 
201       pagesize = getpagesize ();
202       alc = vec->size + size;
203       if (vec->size == 0)
204 	alc = 16 * size;
205       else if (alc < pagesize)
206 	{
207 	  alc *= 2;
208 	  if (alc > pagesize)
209 	    alc = pagesize;
210 	}
211       else
212 	alc = (alc + pagesize - 1) & ~ (pagesize - 1);
213       base = backtrace_alloc (state, alc, error_callback, data);
214       if (base == NULL)
215 	return NULL;
216       if (vec->base != NULL)
217 	{
218 	  memcpy (base, vec->base, vec->size);
219 	  backtrace_free (state, vec->base, vec->alc, error_callback, data);
220 	}
221       vec->base = base;
222       vec->alc = alc - vec->size;
223     }
224 
225   ret = (char *) vec->base + vec->size;
226   vec->size += size;
227   vec->alc -= size;
228   return ret;
229 }
230 
231 /* Finish the current allocation on VEC.  */
232 
233 void *
234 backtrace_vector_finish (
235   struct backtrace_state *state ATTRIBUTE_UNUSED,
236   struct backtrace_vector *vec,
237   backtrace_error_callback error_callback ATTRIBUTE_UNUSED,
238   void *data ATTRIBUTE_UNUSED)
239 {
240   void *ret;
241 
242   ret = vec->base;
243   vec->base = (char *) vec->base + vec->size;
244   vec->size = 0;
245   return ret;
246 }
247 
248 /* Release any extra space allocated for VEC.  */
249 
250 int
251 backtrace_vector_release (struct backtrace_state *state,
252 			  struct backtrace_vector *vec,
253 			  backtrace_error_callback error_callback,
254 			  void *data)
255 {
256   size_t size;
257   size_t alc;
258   size_t aligned;
259 
260   /* Make sure that the block that we free is aligned on an 8-byte
261      boundary.  */
262   size = vec->size;
263   alc = vec->alc;
264   aligned = (size + 7) & ~ (size_t) 7;
265   alc -= aligned - size;
266 
267   backtrace_free (state, (char *) vec->base + aligned, alc,
268 		  error_callback, data);
269   vec->alc = 0;
270   return 1;
271 }
272