sljitNativeX86_32.c revision 1.1.1.2 1 /*
2 * Stack-less Just-In-Time compiler
3 *
4 * Copyright 2009-2012 Zoltan Herczeg (hzmester (at) freemail.hu). All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without modification, are
7 * permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this list of
10 * conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
13 * of conditions and the following disclaimer in the documentation and/or other materials
14 * provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
19 * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /* x86 32-bit arch dependent functions. */
28
29 static int emit_do_imm(struct sljit_compiler *compiler, sljit_ub opcode, sljit_w imm)
30 {
31 sljit_ub *buf;
32
33 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_w));
34 FAIL_IF(!buf);
35 INC_SIZE(1 + sizeof(sljit_w));
36 *buf++ = opcode;
37 *(sljit_w*)buf = imm;
38 return SLJIT_SUCCESS;
39 }
40
41 static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, int type)
42 {
43 if (type == SLJIT_JUMP) {
44 *code_ptr++ = 0xe9;
45 jump->addr++;
46 }
47 else if (type >= SLJIT_FAST_CALL) {
48 *code_ptr++ = 0xe8;
49 jump->addr++;
50 }
51 else {
52 *code_ptr++ = 0x0f;
53 *code_ptr++ = get_jump_code(type);
54 jump->addr += 2;
55 }
56
57 if (jump->flags & JUMP_LABEL)
58 jump->flags |= PATCH_MW;
59 else
60 *(sljit_w*)code_ptr = jump->u.target - (jump->addr + 4);
61 code_ptr += 4;
62
63 return code_ptr;
64 }
65
66 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_enter(struct sljit_compiler *compiler, int args, int temporaries, int saveds, int local_size)
67 {
68 int size;
69 int locals_offset;
70 sljit_ub *buf;
71
72 CHECK_ERROR();
73 check_sljit_emit_enter(compiler, args, temporaries, saveds, local_size);
74
75 compiler->temporaries = temporaries;
76 compiler->saveds = saveds;
77 compiler->args = args;
78 compiler->flags_saved = 0;
79 #if (defined SLJIT_DEBUG && SLJIT_DEBUG)
80 compiler->logical_local_size = local_size;
81 #endif
82
83 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
84 size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0);
85 #else
86 size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (2 + args * 3) : 0);
87 #endif
88 buf = (sljit_ub*)ensure_buf(compiler, 1 + size);
89 FAIL_IF(!buf);
90
91 INC_SIZE(size);
92 PUSH_REG(reg_map[TMP_REGISTER]);
93 #if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
94 if (args > 0) {
95 *buf++ = 0x8b;
96 *buf++ = 0xc4 | (reg_map[TMP_REGISTER] << 3);
97 }
98 #endif
99 if (saveds > 2)
100 PUSH_REG(reg_map[SLJIT_SAVED_REG3]);
101 if (saveds > 1)
102 PUSH_REG(reg_map[SLJIT_SAVED_REG2]);
103 if (saveds > 0)
104 PUSH_REG(reg_map[SLJIT_SAVED_REG1]);
105
106 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
107 if (args > 0) {
108 *buf++ = 0x8b;
109 *buf++ = 0xc0 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[SLJIT_TEMPORARY_REG3];
110 }
111 if (args > 1) {
112 *buf++ = 0x8b;
113 *buf++ = 0xc0 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[SLJIT_TEMPORARY_REG2];
114 }
115 if (args > 2) {
116 *buf++ = 0x8b;
117 *buf++ = 0x44 | (reg_map[SLJIT_SAVED_REG3] << 3);
118 *buf++ = 0x24;
119 *buf++ = sizeof(sljit_w) * (3 + 2); /* saveds >= 3 as well. */
120 }
121 #else
122 if (args > 0) {
123 *buf++ = 0x8b;
124 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[TMP_REGISTER];
125 *buf++ = sizeof(sljit_w) * 2;
126 }
127 if (args > 1) {
128 *buf++ = 0x8b;
129 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[TMP_REGISTER];
130 *buf++ = sizeof(sljit_w) * 3;
131 }
132 if (args > 2) {
133 *buf++ = 0x8b;
134 *buf++ = 0x40 | (reg_map[SLJIT_SAVED_REG3] << 3) | reg_map[TMP_REGISTER];
135 *buf++ = sizeof(sljit_w) * 4;
136 }
137 #endif
138
139 locals_offset = 2 * sizeof(sljit_uw);
140 compiler->temporaries_start = locals_offset;
141 if (temporaries > 3)
142 locals_offset += (temporaries - 3) * sizeof(sljit_uw);
143 compiler->saveds_start = locals_offset;
144 if (saveds > 3)
145 locals_offset += (saveds - 3) * sizeof(sljit_uw);
146 compiler->locals_offset = locals_offset;
147 local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
148
149 #ifdef _WIN32
150 if (local_size > 1024) {
151 FAIL_IF(emit_do_imm(compiler, 0xb8 + reg_map[SLJIT_TEMPORARY_REG1], local_size));
152 FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack)));
153 }
154 #endif
155
156 compiler->local_size = local_size;
157 SLJIT_ASSERT(local_size > 0);
158 return emit_non_cum_binary(compiler, 0x2b, 0x29, 0x5 << 3, 0x2d,
159 SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size);
160
161 return SLJIT_SUCCESS;
162 }
163
164 SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, int args, int temporaries, int saveds, int local_size)
165 {
166 int locals_offset;
167
168 CHECK_ERROR_VOID();
169 check_sljit_set_context(compiler, args, temporaries, saveds, local_size);
170
171 compiler->temporaries = temporaries;
172 compiler->saveds = saveds;
173 compiler->args = args;
174 #if (defined SLJIT_DEBUG && SLJIT_DEBUG)
175 compiler->logical_local_size = local_size;
176 #endif
177
178 locals_offset = 2 * sizeof(sljit_uw);
179 compiler->temporaries_start = locals_offset;
180 if (temporaries > 3)
181 locals_offset += (temporaries - 3) * sizeof(sljit_uw);
182 compiler->saveds_start = locals_offset;
183 if (saveds > 3)
184 locals_offset += (saveds - 3) * sizeof(sljit_uw);
185 compiler->locals_offset = locals_offset;
186 compiler->local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1));
187 }
188
189 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_return(struct sljit_compiler *compiler, int op, int src, sljit_w srcw)
190 {
191 int size;
192 sljit_ub *buf;
193
194 CHECK_ERROR();
195 check_sljit_emit_return(compiler, op, src, srcw);
196 SLJIT_ASSERT(compiler->args >= 0);
197
198 compiler->flags_saved = 0;
199 FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
200
201 SLJIT_ASSERT(compiler->local_size > 0);
202 FAIL_IF(emit_cum_binary(compiler, 0x03, 0x01, 0x0 << 3, 0x05,
203 SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size));
204
205 size = 2 + (compiler->saveds <= 3 ? compiler->saveds : 3);
206 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
207 if (compiler->args > 2)
208 size += 2;
209 #else
210 if (compiler->args > 0)
211 size += 2;
212 #endif
213 buf = (sljit_ub*)ensure_buf(compiler, 1 + size);
214 FAIL_IF(!buf);
215
216 INC_SIZE(size);
217
218 if (compiler->saveds > 0)
219 POP_REG(reg_map[SLJIT_SAVED_REG1]);
220 if (compiler->saveds > 1)
221 POP_REG(reg_map[SLJIT_SAVED_REG2]);
222 if (compiler->saveds > 2)
223 POP_REG(reg_map[SLJIT_SAVED_REG3]);
224 POP_REG(reg_map[TMP_REGISTER]);
225 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
226 if (compiler->args > 2)
227 RETN(sizeof(sljit_w));
228 else
229 RET();
230 #else
231 if (compiler->args > 0)
232 RETN(compiler->args * sizeof(sljit_w));
233 else
234 RET();
235 #endif
236
237 return SLJIT_SUCCESS;
238 }
239
240 /* --------------------------------------------------------------------- */
241 /* Operators */
242 /* --------------------------------------------------------------------- */
243
244 /* Size contains the flags as well. */
245 static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, int size,
246 /* The register or immediate operand. */
247 int a, sljit_w imma,
248 /* The general operand (not immediate). */
249 int b, sljit_w immb)
250 {
251 sljit_ub *buf;
252 sljit_ub *buf_ptr;
253 int flags = size & ~0xf;
254 int inst_size;
255
256 /* Both cannot be switched on. */
257 SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
258 /* Size flags not allowed for typed instructions. */
259 SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
260 /* Both size flags cannot be switched on. */
261 SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
262 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
263 /* SSE2 and immediate is not possible. */
264 SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
265 #endif
266
267 size &= 0xf;
268 inst_size = size;
269
270 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
271 if (flags & EX86_PREF_F2)
272 inst_size++;
273 #endif
274 if (flags & EX86_PREF_66)
275 inst_size++;
276
277 /* Calculate size of b. */
278 inst_size += 1; /* mod r/m byte. */
279 if (b & SLJIT_MEM) {
280 if ((b & 0x0f) == SLJIT_UNUSED)
281 inst_size += sizeof(sljit_w);
282 else if (immb != 0 && !(b & 0xf0)) {
283 /* Immediate operand. */
284 if (immb <= 127 && immb >= -128)
285 inst_size += sizeof(sljit_b);
286 else
287 inst_size += sizeof(sljit_w);
288 }
289
290 if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0))
291 b |= SLJIT_LOCALS_REG << 4;
292
293 if ((b & 0xf0) != SLJIT_UNUSED)
294 inst_size += 1; /* SIB byte. */
295 }
296
297 /* Calculate size of a. */
298 if (a & SLJIT_IMM) {
299 if (flags & EX86_BIN_INS) {
300 if (imma <= 127 && imma >= -128) {
301 inst_size += 1;
302 flags |= EX86_BYTE_ARG;
303 } else
304 inst_size += 4;
305 }
306 else if (flags & EX86_SHIFT_INS) {
307 imma &= 0x1f;
308 if (imma != 1) {
309 inst_size ++;
310 flags |= EX86_BYTE_ARG;
311 }
312 } else if (flags & EX86_BYTE_ARG)
313 inst_size++;
314 else if (flags & EX86_HALF_ARG)
315 inst_size += sizeof(short);
316 else
317 inst_size += sizeof(sljit_w);
318 }
319 else
320 SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
321
322 buf = (sljit_ub*)ensure_buf(compiler, 1 + inst_size);
323 PTR_FAIL_IF(!buf);
324
325 /* Encoding the byte. */
326 INC_SIZE(inst_size);
327 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
328 if (flags & EX86_PREF_F2)
329 *buf++ = 0xf2;
330 #endif
331 if (flags & EX86_PREF_66)
332 *buf++ = 0x66;
333
334 buf_ptr = buf + size;
335
336 /* Encode mod/rm byte. */
337 if (!(flags & EX86_SHIFT_INS)) {
338 if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
339 *buf = (flags & EX86_BYTE_ARG) ? 0x83 : 0x81;
340
341 if ((a & SLJIT_IMM) || (a == 0))
342 *buf_ptr = 0;
343 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
344 else if (!(flags & EX86_SSE2))
345 *buf_ptr = reg_map[a] << 3;
346 else
347 *buf_ptr = a << 3;
348 #else
349 else
350 *buf_ptr = reg_map[a] << 3;
351 #endif
352 }
353 else {
354 if (a & SLJIT_IMM) {
355 if (imma == 1)
356 *buf = 0xd1;
357 else
358 *buf = 0xc1;
359 } else
360 *buf = 0xd3;
361 *buf_ptr = 0;
362 }
363
364 if (!(b & SLJIT_MEM))
365 #if (defined SLJIT_SSE2 && SLJIT_SSE2)
366 *buf_ptr++ |= 0xc0 + ((!(flags & EX86_SSE2)) ? reg_map[b] : b);
367 #else
368 *buf_ptr++ |= 0xc0 + reg_map[b];
369 #endif
370 else if ((b & 0x0f) != SLJIT_UNUSED) {
371 if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) {
372 if (immb != 0) {
373 if (immb <= 127 && immb >= -128)
374 *buf_ptr |= 0x40;
375 else
376 *buf_ptr |= 0x80;
377 }
378
379 if ((b & 0xf0) == SLJIT_UNUSED)
380 *buf_ptr++ |= reg_map[b & 0x0f];
381 else {
382 *buf_ptr++ |= 0x04;
383 *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3);
384 }
385
386 if (immb != 0) {
387 if (immb <= 127 && immb >= -128)
388 *buf_ptr++ = immb; /* 8 bit displacement. */
389 else {
390 *(sljit_w*)buf_ptr = immb; /* 32 bit displacement. */
391 buf_ptr += sizeof(sljit_w);
392 }
393 }
394 }
395 else {
396 *buf_ptr++ |= 0x04;
397 *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3) | (immb << 6);
398 }
399 }
400 else {
401 *buf_ptr++ |= 0x05;
402 *(sljit_w*)buf_ptr = immb; /* 32 bit displacement. */
403 buf_ptr += sizeof(sljit_w);
404 }
405
406 if (a & SLJIT_IMM) {
407 if (flags & EX86_BYTE_ARG)
408 *buf_ptr = imma;
409 else if (flags & EX86_HALF_ARG)
410 *(short*)buf_ptr = imma;
411 else if (!(flags & EX86_SHIFT_INS))
412 *(sljit_w*)buf_ptr = imma;
413 }
414
415 return !(flags & EX86_SHIFT_INS) ? buf : (buf + 1);
416 }
417
418 /* --------------------------------------------------------------------- */
419 /* Call / return instructions */
420 /* --------------------------------------------------------------------- */
421
422 static SLJIT_INLINE int call_with_args(struct sljit_compiler *compiler, int type)
423 {
424 sljit_ub *buf;
425
426 #if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL)
427 buf = (sljit_ub*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2);
428 FAIL_IF(!buf);
429 INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2);
430
431 if (type >= SLJIT_CALL3)
432 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG3]);
433 *buf++ = 0x8b;
434 *buf++ = 0xc0 | (reg_map[SLJIT_TEMPORARY_REG3] << 3) | reg_map[SLJIT_TEMPORARY_REG1];
435 #else
436 buf = (sljit_ub*)ensure_buf(compiler, type - SLJIT_CALL0 + 1);
437 FAIL_IF(!buf);
438 INC_SIZE(type - SLJIT_CALL0);
439 if (type >= SLJIT_CALL3)
440 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG3]);
441 if (type >= SLJIT_CALL2)
442 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG2]);
443 PUSH_REG(reg_map[SLJIT_TEMPORARY_REG1]);
444 #endif
445 return SLJIT_SUCCESS;
446 }
447
448 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_fast_enter(struct sljit_compiler *compiler, int dst, sljit_w dstw)
449 {
450 sljit_ub *buf;
451
452 CHECK_ERROR();
453 check_sljit_emit_fast_enter(compiler, dst, dstw);
454 ADJUST_LOCAL_OFFSET(dst, dstw);
455
456 CHECK_EXTRA_REGS(dst, dstw, (void)0);
457
458 if (dst >= SLJIT_TEMPORARY_REG1 && dst <= SLJIT_NO_REGISTERS) {
459 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
460 FAIL_IF(!buf);
461
462 INC_SIZE(1);
463 POP_REG(reg_map[dst]);
464 return SLJIT_SUCCESS;
465 }
466 else if (dst & SLJIT_MEM) {
467 buf = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw);
468 FAIL_IF(!buf);
469 *buf++ = 0x8f;
470 return SLJIT_SUCCESS;
471 }
472
473 /* For UNUSED dst. Uncommon, but possible. */
474 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
475 FAIL_IF(!buf);
476
477 INC_SIZE(1);
478 POP_REG(reg_map[TMP_REGISTER]);
479 return SLJIT_SUCCESS;
480 }
481
482 SLJIT_API_FUNC_ATTRIBUTE int sljit_emit_fast_return(struct sljit_compiler *compiler, int src, sljit_w srcw)
483 {
484 sljit_ub *buf;
485
486 CHECK_ERROR();
487 check_sljit_emit_fast_return(compiler, src, srcw);
488 ADJUST_LOCAL_OFFSET(src, srcw);
489
490 CHECK_EXTRA_REGS(src, srcw, (void)0);
491
492 if (src >= SLJIT_TEMPORARY_REG1 && src <= SLJIT_NO_REGISTERS) {
493 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1);
494 FAIL_IF(!buf);
495
496 INC_SIZE(1 + 1);
497 PUSH_REG(reg_map[src]);
498 }
499 else if (src & SLJIT_MEM) {
500 buf = emit_x86_instruction(compiler, 1, 0, 0, src, srcw);
501 FAIL_IF(!buf);
502 *buf++ = 0xff;
503 *buf |= 6 << 3;
504
505 buf = (sljit_ub*)ensure_buf(compiler, 1 + 1);
506 FAIL_IF(!buf);
507 INC_SIZE(1);
508 }
509 else {
510 /* SLJIT_IMM. */
511 buf = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1);
512 FAIL_IF(!buf);
513
514 INC_SIZE(5 + 1);
515 *buf++ = 0x68;
516 *(sljit_w*)buf = srcw;
517 buf += sizeof(sljit_w);
518 }
519
520 RET();
521 return SLJIT_SUCCESS;
522 }
523