1 /* $NetBSD: sljitNativeX86_32.c,v 1.5 2016/05/29 17:09:33 alnsn Exp $ */ 2 3 /* 4 * Stack-less Just-In-Time compiler 5 * 6 * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without modification, are 9 * permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, this list of 12 * conditions and the following disclaimer. 13 * 14 * 2. Redistributions in binary form must reproduce the above copyright notice, this list 15 * of conditions and the following disclaimer in the documentation and/or other materials 16 * provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT 21 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 26 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* x86 32-bit arch dependent functions. */ 30 31 static sljit_s32 emit_do_imm(struct sljit_compiler *compiler, sljit_u8 opcode, sljit_sw imm) 32 { 33 sljit_u8 *inst; 34 35 inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_sw)); 36 FAIL_IF(!inst); 37 INC_SIZE(1 + sizeof(sljit_sw)); 38 *inst++ = opcode; 39 *(sljit_sw*)inst = imm; 40 return SLJIT_SUCCESS; 41 } 42 43 static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr, sljit_s32 type) 44 { 45 if (type == SLJIT_JUMP) { 46 *code_ptr++ = JMP_i32; 47 jump->addr++; 48 } 49 else if (type >= SLJIT_FAST_CALL) { 50 *code_ptr++ = CALL_i32; 51 jump->addr++; 52 } 53 else { 54 *code_ptr++ = GROUP_0F; 55 *code_ptr++ = get_jump_code(type); 56 jump->addr += 2; 57 } 58 59 if (jump->flags & JUMP_LABEL) 60 jump->flags |= PATCH_MW; 61 else 62 *(sljit_sw*)code_ptr = jump->u.target - (jump->addr + 4); 63 code_ptr += 4; 64 65 return code_ptr; 66 } 67 68 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compiler, 69 sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds, 70 sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) 71 { 72 sljit_s32 size; 73 sljit_u8 *inst; 74 75 CHECK_ERROR(); 76 CHECK(check_sljit_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 77 set_emit_enter(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 78 79 compiler->args = args; 80 compiler->flags_saved = 0; 81 82 size = 1 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3); 83 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 84 size += (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0); 85 #else 86 size += (args > 0 ? (2 + args * 3) : 0); 87 #endif 88 inst = (sljit_u8*)ensure_buf(compiler, 1 + size); 89 FAIL_IF(!inst); 90 91 INC_SIZE(size); 92 PUSH_REG(reg_map[TMP_REG1]); 93 #if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 94 if (args > 0) { 95 *inst++ = MOV_r_rm; 96 *inst++ = MOD_REG | (reg_map[TMP_REG1] << 3) | 0x4 /* esp */; 97 } 98 #endif 99 if (saveds > 2 || scratches > 7) 100 PUSH_REG(reg_map[SLJIT_S2]); 101 if (saveds > 1 || scratches > 8) 102 PUSH_REG(reg_map[SLJIT_S1]); 103 if (saveds > 0 || scratches > 9) 104 PUSH_REG(reg_map[SLJIT_S0]); 105 106 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 107 if (args > 0) { 108 *inst++ = MOV_r_rm; 109 *inst++ = MOD_REG | (reg_map[SLJIT_S0] << 3) | reg_map[SLJIT_R2]; 110 } 111 if (args > 1) { 112 *inst++ = MOV_r_rm; 113 *inst++ = MOD_REG | (reg_map[SLJIT_S1] << 3) | reg_map[SLJIT_R1]; 114 } 115 if (args > 2) { 116 *inst++ = MOV_r_rm; 117 *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | 0x4 /* esp */; 118 *inst++ = 0x24; 119 *inst++ = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */ 120 } 121 #else 122 if (args > 0) { 123 *inst++ = MOV_r_rm; 124 *inst++ = MOD_DISP8 | (reg_map[SLJIT_S0] << 3) | reg_map[TMP_REG1]; 125 *inst++ = sizeof(sljit_sw) * 2; 126 } 127 if (args > 1) { 128 *inst++ = MOV_r_rm; 129 *inst++ = MOD_DISP8 | (reg_map[SLJIT_S1] << 3) | reg_map[TMP_REG1]; 130 *inst++ = sizeof(sljit_sw) * 3; 131 } 132 if (args > 2) { 133 *inst++ = MOV_r_rm; 134 *inst++ = MOD_DISP8 | (reg_map[SLJIT_S2] << 3) | reg_map[TMP_REG1]; 135 *inst++ = sizeof(sljit_sw) * 4; 136 } 137 #endif 138 139 SLJIT_COMPILE_ASSERT(SLJIT_LOCALS_OFFSET >= (2 + 4) * sizeof(sljit_uw), require_at_least_two_words); 140 #if defined(__APPLE__) 141 /* Ignore pushed registers and SLJIT_LOCALS_OFFSET when computing the aligned local size. */ 142 saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); 143 local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; 144 #else 145 if (options & SLJIT_DOUBLE_ALIGNMENT) { 146 local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7); 147 148 inst = (sljit_u8*)ensure_buf(compiler, 1 + 17); 149 FAIL_IF(!inst); 150 151 INC_SIZE(17); 152 inst[0] = MOV_r_rm; 153 inst[1] = MOD_REG | (reg_map[TMP_REG1] << 3) | reg_map[SLJIT_SP]; 154 inst[2] = GROUP_F7; 155 inst[3] = MOD_REG | (0 << 3) | reg_map[SLJIT_SP]; 156 *(sljit_sw*)(inst + 4) = 0x4; 157 inst[8] = JNE_i8; 158 inst[9] = 6; 159 inst[10] = GROUP_BINARY_81; 160 inst[11] = MOD_REG | (5 << 3) | reg_map[SLJIT_SP]; 161 *(sljit_sw*)(inst + 12) = 0x4; 162 inst[16] = PUSH_r + reg_map[TMP_REG1]; 163 } 164 else 165 local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3); 166 #endif 167 168 compiler->local_size = local_size; 169 #ifdef _WIN32 170 if (local_size > 1024) { 171 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 172 FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size)); 173 #else 174 local_size -= SLJIT_LOCALS_OFFSET; 175 FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_R0], local_size)); 176 FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, 177 SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, SLJIT_LOCALS_OFFSET)); 178 #endif 179 FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); 180 } 181 #endif 182 183 SLJIT_ASSERT(local_size > 0); 184 return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, 185 SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size); 186 } 187 188 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *compiler, 189 sljit_s32 options, sljit_s32 args, sljit_s32 scratches, sljit_s32 saveds, 190 sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size) 191 { 192 CHECK_ERROR(); 193 CHECK(check_sljit_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size)); 194 set_set_context(compiler, options, args, scratches, saveds, fscratches, fsaveds, local_size); 195 196 compiler->args = args; 197 198 #if defined(__APPLE__) 199 saveds = (2 + (scratches > 7 ? (scratches - 7) : 0) + (saveds <= 3 ? saveds : 3)) * sizeof(sljit_uw); 200 compiler->local_size = ((SLJIT_LOCALS_OFFSET + saveds + local_size + 15) & ~15) - saveds; 201 #else 202 if (options & SLJIT_DOUBLE_ALIGNMENT) 203 compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 7) & ~7); 204 else 205 compiler->local_size = SLJIT_LOCALS_OFFSET + ((local_size + 3) & ~3); 206 #endif 207 return SLJIT_SUCCESS; 208 } 209 210 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw) 211 { 212 sljit_s32 size; 213 sljit_u8 *inst; 214 215 CHECK_ERROR(); 216 CHECK(check_sljit_emit_return(compiler, op, src, srcw)); 217 SLJIT_ASSERT(compiler->args >= 0); 218 219 compiler->flags_saved = 0; 220 FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); 221 222 SLJIT_ASSERT(compiler->local_size > 0); 223 FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, 224 SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, compiler->local_size)); 225 226 #if !defined(__APPLE__) 227 if (compiler->options & SLJIT_DOUBLE_ALIGNMENT) { 228 inst = (sljit_u8*)ensure_buf(compiler, 1 + 3); 229 FAIL_IF(!inst); 230 231 INC_SIZE(3); 232 inst[0] = MOV_r_rm; 233 inst[1] = (reg_map[SLJIT_SP] << 3) | 0x4 /* SIB */; 234 inst[2] = (4 << 3) | reg_map[SLJIT_SP]; 235 } 236 #endif 237 238 size = 2 + (compiler->scratches > 7 ? (compiler->scratches - 7) : 0) + 239 (compiler->saveds <= 3 ? compiler->saveds : 3); 240 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 241 if (compiler->args > 2) 242 size += 2; 243 #else 244 if (compiler->args > 0) 245 size += 2; 246 #endif 247 inst = (sljit_u8*)ensure_buf(compiler, 1 + size); 248 FAIL_IF(!inst); 249 250 INC_SIZE(size); 251 252 if (compiler->saveds > 0 || compiler->scratches > 9) 253 POP_REG(reg_map[SLJIT_S0]); 254 if (compiler->saveds > 1 || compiler->scratches > 8) 255 POP_REG(reg_map[SLJIT_S1]); 256 if (compiler->saveds > 2 || compiler->scratches > 7) 257 POP_REG(reg_map[SLJIT_S2]); 258 POP_REG(reg_map[TMP_REG1]); 259 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 260 if (compiler->args > 2) 261 RET_I16(sizeof(sljit_sw)); 262 else 263 RET(); 264 #else 265 RET(); 266 #endif 267 268 return SLJIT_SUCCESS; 269 } 270 271 /* --------------------------------------------------------------------- */ 272 /* Operators */ 273 /* --------------------------------------------------------------------- */ 274 275 /* Size contains the flags as well. */ 276 static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size, 277 /* The register or immediate operand. */ 278 sljit_s32 a, sljit_sw imma, 279 /* The general operand (not immediate). */ 280 sljit_s32 b, sljit_sw immb) 281 { 282 sljit_u8 *inst; 283 sljit_u8 *buf_ptr; 284 sljit_s32 flags = size & ~0xf; 285 sljit_s32 inst_size; 286 287 /* Both cannot be switched on. */ 288 SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); 289 /* Size flags not allowed for typed instructions. */ 290 SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); 291 /* Both size flags cannot be switched on. */ 292 SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); 293 /* SSE2 and immediate is not possible. */ 294 SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); 295 SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) 296 && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) 297 && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); 298 299 size &= 0xf; 300 inst_size = size; 301 302 if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) 303 inst_size++; 304 if (flags & EX86_PREF_66) 305 inst_size++; 306 307 /* Calculate size of b. */ 308 inst_size += 1; /* mod r/m byte. */ 309 if (b & SLJIT_MEM) { 310 if ((b & REG_MASK) == SLJIT_UNUSED) 311 inst_size += sizeof(sljit_sw); 312 else if (immb != 0 && !(b & OFFS_REG_MASK)) { 313 /* Immediate operand. */ 314 if (immb <= 127 && immb >= -128) 315 inst_size += sizeof(sljit_s8); 316 else 317 inst_size += sizeof(sljit_sw); 318 } 319 320 if ((b & REG_MASK) == SLJIT_SP && !(b & OFFS_REG_MASK)) 321 b |= TO_OFFS_REG(SLJIT_SP); 322 323 if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) 324 inst_size += 1; /* SIB byte. */ 325 } 326 327 /* Calculate size of a. */ 328 if (a & SLJIT_IMM) { 329 if (flags & EX86_BIN_INS) { 330 if (imma <= 127 && imma >= -128) { 331 inst_size += 1; 332 flags |= EX86_BYTE_ARG; 333 } else 334 inst_size += 4; 335 } 336 else if (flags & EX86_SHIFT_INS) { 337 imma &= 0x1f; 338 if (imma != 1) { 339 inst_size ++; 340 flags |= EX86_BYTE_ARG; 341 } 342 } else if (flags & EX86_BYTE_ARG) 343 inst_size++; 344 else if (flags & EX86_HALF_ARG) 345 inst_size += sizeof(short); 346 else 347 inst_size += sizeof(sljit_sw); 348 } 349 else 350 SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); 351 352 inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size); 353 PTR_FAIL_IF(!inst); 354 355 /* Encoding the byte. */ 356 INC_SIZE(inst_size); 357 if (flags & EX86_PREF_F2) 358 *inst++ = 0xf2; 359 if (flags & EX86_PREF_F3) 360 *inst++ = 0xf3; 361 if (flags & EX86_PREF_66) 362 *inst++ = 0x66; 363 364 buf_ptr = inst + size; 365 366 /* Encode mod/rm byte. */ 367 if (!(flags & EX86_SHIFT_INS)) { 368 if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) 369 *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; 370 371 if ((a & SLJIT_IMM) || (a == 0)) 372 *buf_ptr = 0; 373 else if (!(flags & EX86_SSE2_OP1)) 374 *buf_ptr = reg_map[a] << 3; 375 else 376 *buf_ptr = a << 3; 377 } 378 else { 379 if (a & SLJIT_IMM) { 380 if (imma == 1) 381 *inst = GROUP_SHIFT_1; 382 else 383 *inst = GROUP_SHIFT_N; 384 } else 385 *inst = GROUP_SHIFT_CL; 386 *buf_ptr = 0; 387 } 388 389 if (!(b & SLJIT_MEM)) 390 *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_map[b] : b); 391 else if ((b & REG_MASK) != SLJIT_UNUSED) { 392 if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) { 393 if (immb != 0) { 394 if (immb <= 127 && immb >= -128) 395 *buf_ptr |= 0x40; 396 else 397 *buf_ptr |= 0x80; 398 } 399 400 if ((b & OFFS_REG_MASK) == SLJIT_UNUSED) 401 *buf_ptr++ |= reg_map[b & REG_MASK]; 402 else { 403 *buf_ptr++ |= 0x04; 404 *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3); 405 } 406 407 if (immb != 0) { 408 if (immb <= 127 && immb >= -128) 409 *buf_ptr++ = immb; /* 8 bit displacement. */ 410 else { 411 *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ 412 buf_ptr += sizeof(sljit_sw); 413 } 414 } 415 } 416 else { 417 *buf_ptr++ |= 0x04; 418 *buf_ptr++ = reg_map[b & REG_MASK] | (reg_map[OFFS_REG(b)] << 3) | (immb << 6); 419 } 420 } 421 else { 422 *buf_ptr++ |= 0x05; 423 *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ 424 buf_ptr += sizeof(sljit_sw); 425 } 426 427 if (a & SLJIT_IMM) { 428 if (flags & EX86_BYTE_ARG) 429 *buf_ptr = imma; 430 else if (flags & EX86_HALF_ARG) 431 *(short*)buf_ptr = imma; 432 else if (!(flags & EX86_SHIFT_INS)) 433 *(sljit_sw*)buf_ptr = imma; 434 } 435 436 return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); 437 } 438 439 /* --------------------------------------------------------------------- */ 440 /* Call / return instructions */ 441 /* --------------------------------------------------------------------- */ 442 443 static SLJIT_INLINE sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 type) 444 { 445 sljit_u8 *inst; 446 447 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) 448 inst = (sljit_u8*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2); 449 FAIL_IF(!inst); 450 INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2); 451 452 if (type >= SLJIT_CALL3) 453 PUSH_REG(reg_map[SLJIT_R2]); 454 *inst++ = MOV_r_rm; 455 *inst++ = MOD_REG | (reg_map[SLJIT_R2] << 3) | reg_map[SLJIT_R0]; 456 #else 457 inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0)); 458 FAIL_IF(!inst); 459 INC_SIZE(4 * (type - SLJIT_CALL0)); 460 461 *inst++ = MOV_rm_r; 462 *inst++ = MOD_DISP8 | (reg_map[SLJIT_R0] << 3) | 0x4 /* SIB */; 463 *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 464 *inst++ = 0; 465 if (type >= SLJIT_CALL2) { 466 *inst++ = MOV_rm_r; 467 *inst++ = MOD_DISP8 | (reg_map[SLJIT_R1] << 3) | 0x4 /* SIB */; 468 *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 469 *inst++ = sizeof(sljit_sw); 470 } 471 if (type >= SLJIT_CALL3) { 472 *inst++ = MOV_rm_r; 473 *inst++ = MOD_DISP8 | (reg_map[SLJIT_R2] << 3) | 0x4 /* SIB */; 474 *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_SP]; 475 *inst++ = 2 * sizeof(sljit_sw); 476 } 477 #endif 478 return SLJIT_SUCCESS; 479 } 480 481 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw) 482 { 483 sljit_u8 *inst; 484 485 CHECK_ERROR(); 486 CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw)); 487 ADJUST_LOCAL_OFFSET(dst, dstw); 488 489 CHECK_EXTRA_REGS(dst, dstw, (void)0); 490 491 /* For UNUSED dst. Uncommon, but possible. */ 492 if (dst == SLJIT_UNUSED) 493 dst = TMP_REG1; 494 495 if (FAST_IS_REG(dst)) { 496 /* Unused dest is possible here. */ 497 inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); 498 FAIL_IF(!inst); 499 500 INC_SIZE(1); 501 POP_REG(reg_map[dst]); 502 return SLJIT_SUCCESS; 503 } 504 505 /* Memory. */ 506 inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); 507 FAIL_IF(!inst); 508 *inst++ = POP_rm; 509 return SLJIT_SUCCESS; 510 } 511 512 SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_s32 src, sljit_sw srcw) 513 { 514 sljit_u8 *inst; 515 516 CHECK_ERROR(); 517 CHECK(check_sljit_emit_fast_return(compiler, src, srcw)); 518 ADJUST_LOCAL_OFFSET(src, srcw); 519 520 CHECK_EXTRA_REGS(src, srcw, (void)0); 521 522 if (FAST_IS_REG(src)) { 523 inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 1); 524 FAIL_IF(!inst); 525 526 INC_SIZE(1 + 1); 527 PUSH_REG(reg_map[src]); 528 } 529 else if (src & SLJIT_MEM) { 530 inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); 531 FAIL_IF(!inst); 532 *inst++ = GROUP_FF; 533 *inst |= PUSH_rm; 534 535 inst = (sljit_u8*)ensure_buf(compiler, 1 + 1); 536 FAIL_IF(!inst); 537 INC_SIZE(1); 538 } 539 else { 540 /* SLJIT_IMM. */ 541 inst = (sljit_u8*)ensure_buf(compiler, 1 + 5 + 1); 542 FAIL_IF(!inst); 543 544 INC_SIZE(5 + 1); 545 *inst++ = PUSH_i32; 546 *(sljit_sw*)inst = srcw; 547 inst += sizeof(sljit_sw); 548 } 549 550 RET(); 551 return SLJIT_SUCCESS; 552 } 553