xref: /netbsd-src/sys/external/bsd/sljit/dist/sljit_src/sljitUtils.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: sljitUtils.c,v 1.9 2016/05/29 17:19:01 alnsn Exp $	*/
2 
3 /*
4  *    Stack-less Just-In-Time compiler
5  *
6  *    Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without modification, are
9  * permitted provided that the following conditions are met:
10  *
11  *   1. Redistributions of source code must retain the above copyright notice, this list of
12  *      conditions and the following disclaimer.
13  *
14  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
15  *      of conditions and the following disclaimer in the documentation and/or other materials
16  *      provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
21  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /* ------------------------------------------------------------------------ */
30 /*  Locks                                                                   */
31 /* ------------------------------------------------------------------------ */
32 
33 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
34 
35 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
36 
37 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
38 
39 static SLJIT_INLINE void allocator_grab_lock(void)
40 {
41 	/* Always successful. */
42 }
43 
44 static SLJIT_INLINE void allocator_release_lock(void)
45 {
46 	/* Always successful. */
47 }
48 
49 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
50 
51 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
52 
53 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
54 {
55 	/* Always successful. */
56 }
57 
58 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
59 {
60 	/* Always successful. */
61 }
62 
63 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
64 
65 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */
66 
67 #include "windows.h"
68 
69 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
70 
71 static HANDLE allocator_mutex = 0;
72 
73 static SLJIT_INLINE void allocator_grab_lock(void)
74 {
75 	/* No idea what to do if an error occures. Static mutexes should never fail... */
76 	if (!allocator_mutex)
77 		allocator_mutex = CreateMutex(NULL, TRUE, NULL);
78 	else
79 		WaitForSingleObject(allocator_mutex, INFINITE);
80 }
81 
82 static SLJIT_INLINE void allocator_release_lock(void)
83 {
84 	ReleaseMutex(allocator_mutex);
85 }
86 
87 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
88 
89 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
90 
91 static HANDLE global_mutex = 0;
92 
93 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
94 {
95 	/* No idea what to do if an error occures. Static mutexes should never fail... */
96 	if (!global_mutex)
97 		global_mutex = CreateMutex(NULL, TRUE, NULL);
98 	else
99 		WaitForSingleObject(global_mutex, INFINITE);
100 }
101 
102 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
103 {
104 	ReleaseMutex(global_mutex);
105 }
106 
107 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
108 
109 #else /* _WIN32 */
110 
111 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
112 
113 #ifdef _KERNEL
114 
115 #include <sys/mutex.h>
116 
117 /* Defined in sljit_mod.c */
118 extern kmutex_t sljit_allocator_mutex;
119 
120 static SLJIT_INLINE void allocator_grab_lock(void)
121 {
122 	mutex_enter(&sljit_allocator_mutex);
123 }
124 
125 static SLJIT_INLINE void allocator_release_lock(void)
126 {
127 	mutex_exit(&sljit_allocator_mutex);
128 }
129 #else
130 
131 #include <pthread.h>
132 
133 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER;
134 
135 static SLJIT_INLINE void allocator_grab_lock(void)
136 {
137 	pthread_mutex_lock(&allocator_mutex);
138 }
139 
140 static SLJIT_INLINE void allocator_release_lock(void)
141 {
142 	pthread_mutex_unlock(&allocator_mutex);
143 }
144 #endif
145 
146 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */
147 
148 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK)
149 
150 #ifdef _KERNEL
151 
152 #include <sys/mutex.h>
153 
154 /* Defined in sljit_mod.c */
155 extern kmutex_t sljit_global_mutex;
156 
157 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
158 {
159 	mutex_enter(&sljit_global_mutex);
160 }
161 
162 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
163 {
164 	mutex_exit(&sljit_global_mutex);
165 }
166 #else
167 
168 #include <pthread.h>
169 
170 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER;
171 
172 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void)
173 {
174 	pthread_mutex_lock(&global_mutex);
175 }
176 
177 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void)
178 {
179 	pthread_mutex_unlock(&global_mutex);
180 }
181 #endif
182 
183 #endif /* SLJIT_UTIL_GLOBAL_LOCK */
184 
185 #endif /* _WIN32 */
186 
187 /* ------------------------------------------------------------------------ */
188 /*  Stack                                                                   */
189 /* ------------------------------------------------------------------------ */
190 
191 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR)
192 
193 #ifdef _KERNEL
194 #include <sys/param.h>
195 #include <uvm/uvm.h>
196 #elif defined(_WIN32)
197 #include "windows.h"
198 #else
199 /* Provides mmap function. */
200 #include <sys/mman.h>
201 /* For detecting the page size. */
202 #include <unistd.h>
203 
204 #ifndef MAP_ANON
205 
206 #include <fcntl.h>
207 
208 /* Some old systems does not have MAP_ANON. */
209 static sljit_s32 dev_zero = -1;
210 
211 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED)
212 
213 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
214 {
215 	dev_zero = open("/dev/zero", O_RDWR);
216 	return dev_zero < 0;
217 }
218 
219 #else /* SLJIT_SINGLE_THREADED */
220 
221 #include <pthread.h>
222 
223 static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER;
224 
225 static SLJIT_INLINE sljit_s32 open_dev_zero(void)
226 {
227 	pthread_mutex_lock(&dev_zero_mutex);
228 	/* The dev_zero might be initialized by another thread during the waiting. */
229 	if (dev_zero < 0) {
230 		dev_zero = open("/dev/zero", O_RDWR);
231 	}
232 	pthread_mutex_unlock(&dev_zero_mutex);
233 	return dev_zero < 0;
234 }
235 
236 #endif /* SLJIT_SINGLE_THREADED */
237 
238 #endif
239 
240 #endif
241 
242 #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */
243 
244 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK)
245 
246 /* Planning to make it even more clever in the future. */
247 static sljit_sw sljit_page_align = 0;
248 
249 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit, void *allocator_data)
250 {
251 	struct sljit_stack *stack;
252 	union {
253 		void *ptr;
254 		sljit_uw uw;
255 	} base;
256 #ifdef _WIN32
257 	SYSTEM_INFO si;
258 #endif
259 #ifdef _KERNEL
260 	vaddr_t v;
261 #endif
262 
263 	SLJIT_UNUSED_ARG(allocator_data);
264 	if (limit > max_limit || limit < 1)
265 		return NULL;
266 
267 #ifdef _WIN32
268 	if (!sljit_page_align) {
269 		GetSystemInfo(&si);
270 		sljit_page_align = si.dwPageSize - 1;
271 	}
272 #else
273 	if (!sljit_page_align) {
274 #ifdef _KERNEL
275 		sljit_page_align = PAGE_SIZE;
276 #else
277 		sljit_page_align = sysconf(_SC_PAGESIZE);
278 #endif
279 		/* Should never happen. */
280 		if (sljit_page_align < 0)
281 			sljit_page_align = 4096;
282 		sljit_page_align--;
283 	}
284 #endif
285 
286 	/* Align limit and max_limit. */
287 	max_limit = (max_limit + sljit_page_align) & ~sljit_page_align;
288 
289 	stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack), allocator_data);
290 	if (!stack)
291 		return NULL;
292 
293 #ifdef _KERNEL
294 	v = uvm_km_alloc(kernel_map, max_limit, PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
295 	base.ptr = (void *)v;
296 	if (base.ptr == NULL) {
297 		SLJIT_FREE(stack, allocator_data);
298 		return NULL;
299 	}
300 	stack->base = base.uw;
301 	stack->limit = stack->base + limit;
302 	stack->max_limit = stack->base + max_limit;
303 #elif defined(_WIN32)
304 	base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE);
305 	if (!base.ptr) {
306 		SLJIT_FREE(stack, allocator_data);
307 		return NULL;
308 	}
309 	stack->base = base.uw;
310 	stack->limit = stack->base;
311 	stack->max_limit = stack->base + max_limit;
312 	if (sljit_stack_resize(stack, stack->base + limit)) {
313 		sljit_free_stack(stack, allocator_data);
314 		return NULL;
315 	}
316 #else
317 #ifdef MAP_ANON
318 	base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
319 #else
320 	if (dev_zero < 0) {
321 		if (open_dev_zero()) {
322 			SLJIT_FREE(stack, allocator_data);
323 			return NULL;
324 		}
325 	}
326 	base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0);
327 #endif
328 	if (base.ptr == MAP_FAILED) {
329 		SLJIT_FREE(stack, allocator_data);
330 		return NULL;
331 	}
332 	stack->base = base.uw;
333 	stack->limit = stack->base + limit;
334 	stack->max_limit = stack->base + max_limit;
335 #endif
336 	stack->top = stack->base;
337 	return stack;
338 }
339 
340 #undef PAGE_ALIGN
341 
342 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack, void *allocator_data)
343 {
344 	SLJIT_UNUSED_ARG(allocator_data);
345 #ifdef _KERNEL
346 	uvm_km_free(kernel_map, (vaddr_t)stack->base,
347 	    stack->max_limit - stack->base, UVM_KMF_WIRED);
348 #elif defined(_WIN32)
349 	VirtualFree((void*)stack->base, 0, MEM_RELEASE);
350 #else
351 	munmap((void*)stack->base, stack->max_limit - stack->base);
352 #endif
353 	SLJIT_FREE(stack, allocator_data);
354 }
355 
356 SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit)
357 {
358 	if ((new_limit > stack->max_limit) || (new_limit < stack->base))
359 		return -1;
360 #ifdef _WIN32
361 	sljit_uw aligned_new_limit =
362 	    (new_limit + sljit_page_align) & ~sljit_page_align;
363 	sljit_uw aligned_old_limit =
364 	    (stack->limit + sljit_page_align) & ~sljit_page_align;
365 	if (aligned_new_limit != aligned_old_limit) {
366 		if (aligned_new_limit > aligned_old_limit) {
367 			if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE))
368 				return -1;
369 		}
370 		else {
371 			if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT))
372 				return -1;
373 		}
374 	}
375 	stack->limit = new_limit;
376 	return 0;
377 #else
378 	if (new_limit >= stack->limit) {
379 		stack->limit = new_limit;
380 		return 0;
381 	}
382 #if defined(POSIX_MADV_DONTNEED)
383 # define MADVISE(new, old) posix_madvise((new), (old), POSIX_MADV_DONTNEED)
384 #elif defined(MADV_DONTNEED)
385 # define MADVISE(new, old) madvise((new), (old), MADV_DONTNEED)
386 #endif
387 #ifdef MADVISE
388 	sljit_uw aligned_new_limit =
389 	    (new_limit + sljit_page_align) & ~sljit_page_align;
390 	sljit_uw aligned_old_limit =
391 	    (stack->limit + sljit_page_align) & ~sljit_page_align;
392 	/* If madvise is available, we release the unnecessary space. */
393 	if (aligned_new_limit < aligned_old_limit)
394 		MADVISE((void*)aligned_new_limit,
395 		    aligned_old_limit - aligned_new_limit);
396 #endif
397 	stack->limit = new_limit;
398 	return 0;
399 #endif
400 }
401 
402 #endif /* SLJIT_UTIL_STACK */
403 
404 #endif
405