1 /* $NetBSD: sljitUtils.c,v 1.7 2014/06/17 19:33:20 alnsn Exp $ */ 2 3 /* 4 * Stack-less Just-In-Time compiler 5 * 6 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without modification, are 9 * permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, this list of 12 * conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright notice, this list 15 * of conditions and the following disclaimer in the documentation and/or other materials 16 * provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 21 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* ------------------------------------------------------------------------ */ 30 /* Locks */ 31 /* ------------------------------------------------------------------------ */ 32 33 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 34 35 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) 36 37 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 38 39 static SLJIT_INLINE void allocator_grab_lock(void) 40 { 41 /* Always successful. */ 42 } 43 44 static SLJIT_INLINE void allocator_release_lock(void) 45 { 46 /* Always successful. */ 47 } 48 49 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 50 51 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 52 53 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 54 { 55 /* Always successful. */ 56 } 57 58 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 59 { 60 /* Always successful. */ 61 } 62 63 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 64 65 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */ 66 67 #include "windows.h" 68 69 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 70 71 static HANDLE allocator_mutex = 0; 72 73 static SLJIT_INLINE void allocator_grab_lock(void) 74 { 75 /* No idea what to do if an error occures. Static mutexes should never fail... */ 76 if (!allocator_mutex) 77 allocator_mutex = CreateMutex(NULL, TRUE, NULL); 78 else 79 WaitForSingleObject(allocator_mutex, INFINITE); 80 } 81 82 static SLJIT_INLINE void allocator_release_lock(void) 83 { 84 ReleaseMutex(allocator_mutex); 85 } 86 87 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 88 89 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 90 91 static HANDLE global_mutex = 0; 92 93 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 94 { 95 /* No idea what to do if an error occures. Static mutexes should never fail... */ 96 if (!global_mutex) 97 global_mutex = CreateMutex(NULL, TRUE, NULL); 98 else 99 WaitForSingleObject(global_mutex, INFINITE); 100 } 101 102 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 103 { 104 ReleaseMutex(global_mutex); 105 } 106 107 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 108 109 #else /* _WIN32 */ 110 111 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 112 113 #ifdef _KERNEL 114 115 #include <sys/mutex.h> 116 117 /* Defined in sljit_mod.c */ 118 extern kmutex_t sljit_allocator_mutex; 119 120 static SLJIT_INLINE void allocator_grab_lock(void) 121 { 122 mutex_enter(&sljit_allocator_mutex); 123 } 124 125 static SLJIT_INLINE void allocator_release_lock(void) 126 { 127 mutex_exit(&sljit_allocator_mutex); 128 } 129 #else 130 131 #include <pthread.h> 132 133 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER; 134 135 static SLJIT_INLINE void allocator_grab_lock(void) 136 { 137 pthread_mutex_lock(&allocator_mutex); 138 } 139 140 static SLJIT_INLINE void allocator_release_lock(void) 141 { 142 pthread_mutex_unlock(&allocator_mutex); 143 } 144 #endif 145 146 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 147 148 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 149 150 #ifdef _KERNEL 151 152 #include <sys/mutex.h> 153 154 /* Defined in sljit_mod.c */ 155 extern kmutex_t sljit_global_mutex; 156 157 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 158 { 159 mutex_enter(&sljit_global_mutex); 160 } 161 162 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 163 { 164 mutex_exit(&sljit_global_mutex); 165 } 166 #else 167 168 #include <pthread.h> 169 170 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER; 171 172 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 173 { 174 pthread_mutex_lock(&global_mutex); 175 } 176 177 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 178 { 179 pthread_mutex_unlock(&global_mutex); 180 } 181 #endif 182 183 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 184 185 #endif /* _WIN32 */ 186 187 /* ------------------------------------------------------------------------ */ 188 /* Stack */ 189 /* ------------------------------------------------------------------------ */ 190 191 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 192 193 #ifdef _KERNEL 194 #include <sys/param.h> 195 #include <uvm/uvm.h> 196 #elif defined(_WIN32) 197 #include "windows.h" 198 #else 199 /* Provides mmap function. */ 200 #include <sys/mman.h> 201 /* For detecting the page size. */ 202 #include <unistd.h> 203 204 #ifndef MAP_ANON 205 206 #include <fcntl.h> 207 208 /* Some old systems does not have MAP_ANON. */ 209 static sljit_si dev_zero = -1; 210 211 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) 212 213 static SLJIT_INLINE sljit_si open_dev_zero(void) 214 { 215 dev_zero = open("/dev/zero", O_RDWR); 216 return dev_zero < 0; 217 } 218 219 #else /* SLJIT_SINGLE_THREADED */ 220 221 #include <pthread.h> 222 223 static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER; 224 225 static SLJIT_INLINE sljit_si open_dev_zero(void) 226 { 227 pthread_mutex_lock(&dev_zero_mutex); 228 dev_zero = open("/dev/zero", O_RDWR); 229 pthread_mutex_unlock(&dev_zero_mutex); 230 return dev_zero < 0; 231 } 232 233 #endif /* SLJIT_SINGLE_THREADED */ 234 235 #endif 236 237 #endif 238 239 #endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */ 240 241 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) 242 243 /* Planning to make it even more clever in the future. */ 244 static sljit_sw sljit_page_align = 0; 245 246 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit) 247 { 248 struct sljit_stack *stack; 249 union { 250 void *ptr; 251 sljit_uw uw; 252 } base; 253 #ifdef _WIN32 254 SYSTEM_INFO si; 255 #endif 256 #ifdef _KERNEL 257 vaddr_t v; 258 #endif 259 260 if (limit > max_limit || limit < 1) 261 return NULL; 262 263 #ifdef _WIN32 264 if (!sljit_page_align) { 265 GetSystemInfo(&si); 266 sljit_page_align = si.dwPageSize - 1; 267 } 268 #else 269 if (!sljit_page_align) { 270 #ifdef _KERNEL 271 sljit_page_align = PAGE_SIZE; 272 #else 273 sljit_page_align = sysconf(_SC_PAGESIZE); 274 #endif 275 /* Should never happen. */ 276 if (sljit_page_align < 0) 277 sljit_page_align = 4096; 278 sljit_page_align--; 279 } 280 #endif 281 282 /* Align limit and max_limit. */ 283 max_limit = (max_limit + sljit_page_align) & ~sljit_page_align; 284 285 stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack)); 286 if (!stack) 287 return NULL; 288 289 #ifdef _KERNEL 290 v = uvm_km_alloc(kernel_map, max_limit, PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO); 291 base.ptr = (void *)v; 292 if (base.ptr == NULL) { 293 SLJIT_FREE(stack); 294 return NULL; 295 } 296 stack->base = base.uw; 297 stack->limit = stack->base + limit; 298 stack->max_limit = stack->base + max_limit; 299 #elif defined(_WIN32) 300 base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE); 301 if (!base.ptr) { 302 SLJIT_FREE(stack); 303 return NULL; 304 } 305 stack->base = base.uw; 306 stack->limit = stack->base; 307 stack->max_limit = stack->base + max_limit; 308 if (sljit_stack_resize(stack, stack->base + limit)) { 309 sljit_free_stack(stack); 310 return NULL; 311 } 312 #else 313 #ifdef MAP_ANON 314 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 315 #else 316 if (dev_zero < 0) { 317 if (open_dev_zero()) { 318 SLJIT_FREE(stack); 319 return NULL; 320 } 321 } 322 base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0); 323 #endif 324 if (base.ptr == MAP_FAILED) { 325 SLJIT_FREE(stack); 326 return NULL; 327 } 328 stack->base = base.uw; 329 stack->limit = stack->base + limit; 330 stack->max_limit = stack->base + max_limit; 331 #endif 332 stack->top = stack->base; 333 return stack; 334 } 335 336 #undef PAGE_ALIGN 337 338 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack) 339 { 340 #ifdef _KERNEL 341 uvm_km_free(kernel_map, (vaddr_t)stack->base, 342 stack->max_limit - stack->base, UVM_KMF_WIRED); 343 #elif defined(_WIN32) 344 VirtualFree((void*)stack->base, 0, MEM_RELEASE); 345 #else 346 munmap((void*)stack->base, stack->max_limit - stack->base); 347 #endif 348 SLJIT_FREE(stack); 349 } 350 351 SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit) 352 { 353 if ((new_limit > stack->max_limit) || (new_limit < stack->base)) 354 return -1; 355 #ifdef _WIN32 356 sljit_uw aligned_new_limit = 357 (new_limit + sljit_page_align) & ~sljit_page_align; 358 sljit_uw aligned_old_limit = 359 (stack->limit + sljit_page_align) & ~sljit_page_align; 360 if (aligned_new_limit != aligned_old_limit) { 361 if (aligned_new_limit > aligned_old_limit) { 362 if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE)) 363 return -1; 364 } 365 else { 366 if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT)) 367 return -1; 368 } 369 } 370 stack->limit = new_limit; 371 return 0; 372 #else 373 if (new_limit >= stack->limit) { 374 stack->limit = new_limit; 375 return 0; 376 } 377 #if defined(POSIX_MADV_DONTNEED) 378 # define MADVISE(new, old) posix_madvise((new), (old), POSIX_MADV_DONTNEED) 379 #elif defined(MADV_DONTNEED) 380 # define MADVISE(new, old) madvise((new), (old), MADV_DONTNEED) 381 #endif 382 #ifdef MADVISE 383 sljit_uw aligned_new_limit = 384 (new_limit + sljit_page_align) & ~sljit_page_align; 385 sljit_uw aligned_old_limit = 386 (stack->limit + sljit_page_align) & ~sljit_page_align; 387 /* If madvise is available, we release the unnecessary space. */ 388 if (aligned_new_limit < aligned_old_limit) 389 MADVISE((void*)aligned_new_limit, 390 aligned_old_limit - aligned_new_limit); 391 #endif 392 stack->limit = new_limit; 393 return 0; 394 #endif 395 } 396 397 #endif /* SLJIT_UTIL_STACK */ 398 399 #endif 400