1 /* 2 * Stack-less Just-In-Time compiler 3 * 4 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without modification, are 7 * permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright notice, this list of 10 * conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list 13 * of conditions and the following disclaimer in the documentation and/or other materials 14 * provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* ------------------------------------------------------------------------ */ 28 /* Locks */ 29 /* ------------------------------------------------------------------------ */ 30 31 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 32 33 #if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) 34 35 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 36 37 static SLJIT_INLINE void allocator_grab_lock(void) 38 { 39 /* Always successful. */ 40 } 41 42 static SLJIT_INLINE void allocator_release_lock(void) 43 { 44 /* Always successful. */ 45 } 46 47 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 48 49 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 50 51 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 52 { 53 /* Always successful. */ 54 } 55 56 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 57 { 58 /* Always successful. */ 59 } 60 61 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 62 63 #elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */ 64 65 #include "windows.h" 66 67 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 68 69 static HANDLE allocator_mutex = 0; 70 71 static SLJIT_INLINE void allocator_grab_lock(void) 72 { 73 /* No idea what to do if an error occures. Static mutexes should never fail... */ 74 if (!allocator_mutex) 75 allocator_mutex = CreateMutex(NULL, TRUE, NULL); 76 else 77 WaitForSingleObject(allocator_mutex, INFINITE); 78 } 79 80 static SLJIT_INLINE void allocator_release_lock(void) 81 { 82 ReleaseMutex(allocator_mutex); 83 } 84 85 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 86 87 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 88 89 static HANDLE global_mutex = 0; 90 91 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 92 { 93 /* No idea what to do if an error occures. Static mutexes should never fail... */ 94 if (!global_mutex) 95 global_mutex = CreateMutex(NULL, TRUE, NULL); 96 else 97 WaitForSingleObject(global_mutex, INFINITE); 98 } 99 100 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 101 { 102 ReleaseMutex(global_mutex); 103 } 104 105 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 106 107 #else /* _WIN32 */ 108 109 #if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) 110 111 #ifdef _KERNEL 112 113 #include <sys/mutex.h> 114 115 /* Defined in sljit_mod.c */ 116 extern kmutex_t sljit_allocator_mutex; 117 118 static SLJIT_INLINE void allocator_grab_lock(void) 119 { 120 mutex_enter(&sljit_allocator_mutex); 121 } 122 123 static SLJIT_INLINE void allocator_release_lock(void) 124 { 125 mutex_exit(&sljit_allocator_mutex); 126 } 127 #else 128 129 #include <pthread.h> 130 131 static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER; 132 133 static SLJIT_INLINE void allocator_grab_lock(void) 134 { 135 pthread_mutex_lock(&allocator_mutex); 136 } 137 138 static SLJIT_INLINE void allocator_release_lock(void) 139 { 140 pthread_mutex_unlock(&allocator_mutex); 141 } 142 #endif 143 144 #endif /* SLJIT_EXECUTABLE_ALLOCATOR */ 145 146 #if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) 147 148 #ifdef _KERNEL 149 150 #include <sys/mutex.h> 151 152 /* Defined in sljit_mod.c */ 153 extern kmutex_t sljit_global_mutex; 154 155 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 156 { 157 mutex_enter(&sljit_global_mutex); 158 } 159 160 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 161 { 162 mutex_exit(&sljit_global_mutex); 163 } 164 #else 165 166 #include <pthread.h> 167 168 static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER; 169 170 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) 171 { 172 pthread_mutex_lock(&global_mutex); 173 } 174 175 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) 176 { 177 pthread_mutex_unlock(&global_mutex); 178 } 179 #endif 180 181 #endif /* SLJIT_UTIL_GLOBAL_LOCK */ 182 183 #endif /* _WIN32 */ 184 185 /* ------------------------------------------------------------------------ */ 186 /* Stack */ 187 /* ------------------------------------------------------------------------ */ 188 189 #if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) 190 191 #ifdef _KERNEL 192 #include <sys/param.h> 193 #include <uvm/uvm.h> 194 #elif defined(_WIN32) 195 #include "windows.h" 196 #else 197 #include <sys/mman.h> 198 #include <unistd.h> 199 #endif 200 201 /* Planning to make it even more clever in the future. */ 202 static sljit_w sljit_page_align = 0; 203 204 SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit) 205 { 206 struct sljit_stack *stack; 207 union { 208 void *ptr; 209 sljit_uw uw; 210 } base; 211 #ifdef _WIN32 212 SYSTEM_INFO si; 213 #endif 214 #ifdef _KERNEL 215 vaddr_t v; 216 #endif 217 218 if (limit > max_limit || limit < 1) 219 return NULL; 220 221 #ifdef _WIN32 222 if (!sljit_page_align) { 223 GetSystemInfo(&si); 224 sljit_page_align = si.dwPageSize - 1; 225 } 226 #else 227 if (!sljit_page_align) { 228 #ifdef _KERNEL 229 sljit_page_align = PAGE_SIZE; 230 #else 231 sljit_page_align = sysconf(_SC_PAGESIZE); 232 #endif 233 /* Should never happen. */ 234 if (sljit_page_align < 0) 235 sljit_page_align = 4096; 236 sljit_page_align--; 237 } 238 #endif 239 240 /* Align limit and max_limit. */ 241 max_limit = (max_limit + sljit_page_align) & ~sljit_page_align; 242 243 stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack)); 244 if (!stack) 245 return NULL; 246 247 #ifdef _KERNEL 248 v = uvm_km_alloc(kernel_map, max_limit, PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO); 249 base.ptr = (void *)v; 250 if (base.ptr == NULL) { 251 SLJIT_FREE(stack); 252 return NULL; 253 } 254 stack->base = base.uw; 255 stack->limit = stack->base + limit; 256 stack->max_limit = stack->base + max_limit; 257 #elif defined(_WIN32) 258 base.ptr = VirtualAlloc(0, max_limit, MEM_RESERVE, PAGE_READWRITE); 259 if (!base.ptr) { 260 SLJIT_FREE(stack); 261 return NULL; 262 } 263 stack->base = base.uw; 264 stack->limit = stack->base; 265 stack->max_limit = stack->base + max_limit; 266 if (sljit_stack_resize(stack, stack->base + limit)) { 267 sljit_free_stack(stack); 268 return NULL; 269 } 270 #else 271 base.ptr = mmap(0, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 272 if (base.ptr == MAP_FAILED) { 273 SLJIT_FREE(stack); 274 return NULL; 275 } 276 stack->base = base.uw; 277 stack->limit = stack->base + limit; 278 stack->max_limit = stack->base + max_limit; 279 #endif 280 stack->top = stack->base; 281 return stack; 282 } 283 284 #undef PAGE_ALIGN 285 286 SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack) 287 { 288 #ifdef _KERNEL 289 uvm_km_free(kernel_map, (vaddr_t)stack->base, 290 stack->max_limit - stack->base, UVM_KMF_WIRED); 291 #elif defined(_WIN32) 292 VirtualFree((void*)stack->base, 0, MEM_RELEASE); 293 #else 294 munmap((void*)stack->base, stack->max_limit - stack->base); 295 #endif 296 SLJIT_FREE(stack); 297 } 298 299 SLJIT_API_FUNC_ATTRIBUTE sljit_w SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit) 300 { 301 if ((new_limit > stack->max_limit) || (new_limit < stack->base)) 302 return -1; 303 #ifdef _WIN32 304 sljit_uw aligned_new_limit = 305 (new_limit + sljit_page_align) & ~sljit_page_align; 306 sljit_uw aligned_old_limit = 307 (stack->limit + sljit_page_align) & ~sljit_page_align; 308 if (aligned_new_limit != aligned_old_limit) { 309 if (aligned_new_limit > aligned_old_limit) { 310 if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE)) 311 return -1; 312 } 313 else { 314 if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT)) 315 return -1; 316 } 317 } 318 stack->limit = new_limit; 319 return 0; 320 #else 321 if (new_limit >= stack->limit) { 322 stack->limit = new_limit; 323 return 0; 324 } 325 #if defined(POSIX_MADV_DONTNEED) 326 # define MADVISE(new, old) posix_madvise((new), (old), POSIX_MADV_DONTNEED) 327 #elif defined(MADV_DONTNEED) 328 # define MADVISE(new, old) madvise((new), (old), MADV_DONTNEED) 329 #endif 330 #ifdef MADVISE 331 sljit_uw aligned_new_limit = 332 (new_limit + sljit_page_align) & ~sljit_page_align; 333 sljit_uw aligned_old_limit = 334 (stack->limit + sljit_page_align) & ~sljit_page_align; 335 /* If madvise is available, we release the unnecessary space. */ 336 if (aligned_new_limit < aligned_old_limit) 337 MADVISE((void*)aligned_new_limit, 338 aligned_old_limit - aligned_new_limit); 339 #endif 340 stack->limit = new_limit; 341 return 0; 342 #endif 343 } 344 345 #endif /* SLJIT_UTIL_STACK */ 346 347 #endif 348