source: webkit/trunk/JavaScriptCore/assembler/X86Assembler.h@ 43433

Last change on this file since 43433 was 43433, checked in by [email protected], 16 years ago

2009-05-09 Geoffrey Garen <[email protected]>

Windows build fix.

  • assembler/X86Assembler.h: (JSC::X86Assembler::patchLoadToLEA):
File size: 55.3 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44
45#define REPTACH_OFFSET_CALL_R11 3
46#endif
47
48namespace X86 {
49 typedef enum {
50 eax,
51 ecx,
52 edx,
53 ebx,
54 esp,
55 ebp,
56 esi,
57 edi,
58
59#if PLATFORM(X86_64)
60 r8,
61 r9,
62 r10,
63 r11,
64 r12,
65 r13,
66 r14,
67 r15,
68#endif
69 } RegisterID;
70
71 typedef enum {
72 xmm0,
73 xmm1,
74 xmm2,
75 xmm3,
76 xmm4,
77 xmm5,
78 xmm6,
79 xmm7,
80 } XMMRegisterID;
81}
82
83class X86Assembler {
84public:
85 typedef X86::RegisterID RegisterID;
86 typedef X86::XMMRegisterID XMMRegisterID;
87
88 typedef enum {
89 ConditionO,
90 ConditionNO,
91 ConditionB,
92 ConditionAE,
93 ConditionE,
94 ConditionNE,
95 ConditionBE,
96 ConditionA,
97 ConditionS,
98 ConditionNS,
99 ConditionP,
100 ConditionNP,
101 ConditionL,
102 ConditionGE,
103 ConditionLE,
104 ConditionG,
105
106 ConditionC = ConditionB,
107 ConditionNC = ConditionAE,
108 } Condition;
109
110private:
111 typedef enum {
112 OP_ADD_EvGv = 0x01,
113 OP_ADD_GvEv = 0x03,
114 OP_OR_EvGv = 0x09,
115 OP_OR_GvEv = 0x0B,
116 OP_2BYTE_ESCAPE = 0x0F,
117 OP_AND_EvGv = 0x21,
118 OP_SUB_EvGv = 0x29,
119 OP_SUB_GvEv = 0x2B,
120 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
121 OP_XOR_EvGv = 0x31,
122 OP_CMP_EvGv = 0x39,
123 OP_CMP_GvEv = 0x3B,
124#if PLATFORM(X86_64)
125 PRE_REX = 0x40,
126#endif
127 OP_PUSH_EAX = 0x50,
128 OP_POP_EAX = 0x58,
129#if PLATFORM(X86_64)
130 OP_MOVSXD_GvEv = 0x63,
131#endif
132 PRE_OPERAND_SIZE = 0x66,
133 PRE_SSE_66 = 0x66,
134 OP_PUSH_Iz = 0x68,
135 OP_IMUL_GvEvIz = 0x69,
136 OP_GROUP1_EvIz = 0x81,
137 OP_GROUP1_EvIb = 0x83,
138 OP_TEST_EvGv = 0x85,
139 OP_XCHG_EvGv = 0x87,
140 OP_MOV_EvGv = 0x89,
141 OP_MOV_GvEv = 0x8B,
142 OP_LEA = 0x8D,
143 OP_GROUP1A_Ev = 0x8F,
144 OP_CDQ = 0x99,
145 OP_MOV_EAXOv = 0xA1,
146 OP_MOV_OvEAX = 0xA3,
147 OP_MOV_EAXIv = 0xB8,
148 OP_GROUP2_EvIb = 0xC1,
149 OP_RET = 0xC3,
150 OP_GROUP11_EvIz = 0xC7,
151 OP_INT3 = 0xCC,
152 OP_GROUP2_Ev1 = 0xD1,
153 OP_GROUP2_EvCL = 0xD3,
154 OP_CALL_rel32 = 0xE8,
155 OP_JMP_rel32 = 0xE9,
156 PRE_SSE_F2 = 0xF2,
157 OP_HLT = 0xF4,
158 OP_GROUP3_EbIb = 0xF6,
159 OP_GROUP3_Ev = 0xF7,
160 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
161 OP_GROUP5_Ev = 0xFF,
162 } OneByteOpcodeID;
163
164 typedef enum {
165 OP2_MOVSD_VsdWsd = 0x10,
166 OP2_MOVSD_WsdVsd = 0x11,
167 OP2_CVTSI2SD_VsdEd = 0x2A,
168 OP2_CVTTSD2SI_GdWsd = 0x2C,
169 OP2_UCOMISD_VsdWsd = 0x2E,
170 OP2_ADDSD_VsdWsd = 0x58,
171 OP2_MULSD_VsdWsd = 0x59,
172 OP2_SUBSD_VsdWsd = 0x5C,
173 OP2_MOVD_VdEd = 0x6E,
174 OP2_MOVD_EdVd = 0x7E,
175 OP2_JCC_rel32 = 0x80,
176 OP_SETCC = 0x90,
177 OP2_IMUL_GvEv = 0xAF,
178 OP2_MOVZX_GvEb = 0xB6,
179 OP2_MOVZX_GvEw = 0xB7,
180 OP2_PEXTRW_GdUdIb = 0xC5,
181 } TwoByteOpcodeID;
182
183 TwoByteOpcodeID jccRel32(Condition cond)
184 {
185 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
186 }
187
188 TwoByteOpcodeID setccOpcode(Condition cond)
189 {
190 return (TwoByteOpcodeID)(OP_SETCC + cond);
191 }
192
193 typedef enum {
194 GROUP1_OP_ADD = 0,
195 GROUP1_OP_OR = 1,
196 GROUP1_OP_AND = 4,
197 GROUP1_OP_SUB = 5,
198 GROUP1_OP_XOR = 6,
199 GROUP1_OP_CMP = 7,
200
201 GROUP1A_OP_POP = 0,
202
203 GROUP2_OP_SHL = 4,
204 GROUP2_OP_SAR = 7,
205
206 GROUP3_OP_TEST = 0,
207 GROUP3_OP_NOT = 2,
208 GROUP3_OP_IDIV = 7,
209
210 GROUP5_OP_CALLN = 2,
211 GROUP5_OP_JMPN = 4,
212 GROUP5_OP_PUSH = 6,
213
214 GROUP11_MOV = 0,
215 } GroupOpcodeID;
216
217 class X86InstructionFormatter;
218public:
219
220 class JmpSrc {
221 friend class X86Assembler;
222 friend class X86InstructionFormatter;
223 public:
224 JmpSrc()
225 : m_offset(-1)
226 {
227 }
228
229 private:
230 JmpSrc(int offset)
231 : m_offset(offset)
232 {
233 }
234
235 int m_offset;
236 };
237
238 class JmpDst {
239 friend class X86Assembler;
240 friend class X86InstructionFormatter;
241 public:
242 JmpDst()
243 : m_offset(-1)
244 , m_used(false)
245 {
246 }
247
248 bool isUsed() const { return m_used; }
249 void used() { m_used = true; }
250 private:
251 JmpDst(int offset)
252 : m_offset(offset)
253 , m_used(false)
254 {
255 ASSERT(m_offset == offset);
256 }
257
258 int m_offset : 31;
259 bool m_used : 1;
260 };
261
262 X86Assembler()
263 {
264 }
265
266 size_t size() const { return m_formatter.size(); }
267
268 // Stack operations:
269
270 void push_r(RegisterID reg)
271 {
272 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
273 }
274
275 void pop_r(RegisterID reg)
276 {
277 m_formatter.oneByteOp(OP_POP_EAX, reg);
278 }
279
280 void push_i32(int imm)
281 {
282 m_formatter.oneByteOp(OP_PUSH_Iz);
283 m_formatter.immediate32(imm);
284 }
285
286 void push_m(int offset, RegisterID base)
287 {
288 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
289 }
290
291 void pop_m(int offset, RegisterID base)
292 {
293 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
294 }
295
296 // Arithmetic operations:
297
298 void addl_rr(RegisterID src, RegisterID dst)
299 {
300 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
301 }
302
303 void addl_mr(int offset, RegisterID base, RegisterID dst)
304 {
305 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
306 }
307
308 void addl_ir(int imm, RegisterID dst)
309 {
310 if (CAN_SIGN_EXTEND_8_32(imm)) {
311 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
312 m_formatter.immediate8(imm);
313 } else {
314 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
315 m_formatter.immediate32(imm);
316 }
317 }
318
319 void addl_im(int imm, int offset, RegisterID base)
320 {
321 if (CAN_SIGN_EXTEND_8_32(imm)) {
322 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
323 m_formatter.immediate8(imm);
324 } else {
325 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
326 m_formatter.immediate32(imm);
327 }
328 }
329
330#if PLATFORM(X86_64)
331 void addq_rr(RegisterID src, RegisterID dst)
332 {
333 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
334 }
335
336 void addq_ir(int imm, RegisterID dst)
337 {
338 if (CAN_SIGN_EXTEND_8_32(imm)) {
339 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
340 m_formatter.immediate8(imm);
341 } else {
342 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
343 m_formatter.immediate32(imm);
344 }
345 }
346#else
347 void addl_im(int imm, void* addr)
348 {
349 if (CAN_SIGN_EXTEND_8_32(imm)) {
350 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
351 m_formatter.immediate8(imm);
352 } else {
353 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
354 m_formatter.immediate32(imm);
355 }
356 }
357#endif
358
359 void andl_rr(RegisterID src, RegisterID dst)
360 {
361 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
362 }
363
364 void andl_ir(int imm, RegisterID dst)
365 {
366 if (CAN_SIGN_EXTEND_8_32(imm)) {
367 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
368 m_formatter.immediate8(imm);
369 } else {
370 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
371 m_formatter.immediate32(imm);
372 }
373 }
374
375#if PLATFORM(X86_64)
376 void andq_rr(RegisterID src, RegisterID dst)
377 {
378 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
379 }
380
381 void andq_ir(int imm, RegisterID dst)
382 {
383 if (CAN_SIGN_EXTEND_8_32(imm)) {
384 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
385 m_formatter.immediate8(imm);
386 } else {
387 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
388 m_formatter.immediate32(imm);
389 }
390 }
391#endif
392
393 void notl_r(RegisterID dst)
394 {
395 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
396 }
397
398 void orl_rr(RegisterID src, RegisterID dst)
399 {
400 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
401 }
402
403 void orl_mr(int offset, RegisterID base, RegisterID dst)
404 {
405 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
406 }
407
408 void orl_ir(int imm, RegisterID dst)
409 {
410 if (CAN_SIGN_EXTEND_8_32(imm)) {
411 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
412 m_formatter.immediate8(imm);
413 } else {
414 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
415 m_formatter.immediate32(imm);
416 }
417 }
418
419#if PLATFORM(X86_64)
420 void orq_rr(RegisterID src, RegisterID dst)
421 {
422 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
423 }
424
425 void orq_ir(int imm, RegisterID dst)
426 {
427 if (CAN_SIGN_EXTEND_8_32(imm)) {
428 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
429 m_formatter.immediate8(imm);
430 } else {
431 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
432 m_formatter.immediate32(imm);
433 }
434 }
435#endif
436
437 void subl_rr(RegisterID src, RegisterID dst)
438 {
439 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
440 }
441
442 void subl_mr(int offset, RegisterID base, RegisterID dst)
443 {
444 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
445 }
446
447 void subl_ir(int imm, RegisterID dst)
448 {
449 if (CAN_SIGN_EXTEND_8_32(imm)) {
450 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
451 m_formatter.immediate8(imm);
452 } else {
453 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
454 m_formatter.immediate32(imm);
455 }
456 }
457
458 void subl_im(int imm, int offset, RegisterID base)
459 {
460 if (CAN_SIGN_EXTEND_8_32(imm)) {
461 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
462 m_formatter.immediate8(imm);
463 } else {
464 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
465 m_formatter.immediate32(imm);
466 }
467 }
468
469#if PLATFORM(X86_64)
470 void subq_rr(RegisterID src, RegisterID dst)
471 {
472 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
473 }
474
475 void subq_ir(int imm, RegisterID dst)
476 {
477 if (CAN_SIGN_EXTEND_8_32(imm)) {
478 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
479 m_formatter.immediate8(imm);
480 } else {
481 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
482 m_formatter.immediate32(imm);
483 }
484 }
485#else
486 void subl_im(int imm, void* addr)
487 {
488 if (CAN_SIGN_EXTEND_8_32(imm)) {
489 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
490 m_formatter.immediate8(imm);
491 } else {
492 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
493 m_formatter.immediate32(imm);
494 }
495 }
496#endif
497
498 void xorl_rr(RegisterID src, RegisterID dst)
499 {
500 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
501 }
502
503 void xorl_ir(int imm, RegisterID dst)
504 {
505 if (CAN_SIGN_EXTEND_8_32(imm)) {
506 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
507 m_formatter.immediate8(imm);
508 } else {
509 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
510 m_formatter.immediate32(imm);
511 }
512 }
513
514#if PLATFORM(X86_64)
515 void xorq_rr(RegisterID src, RegisterID dst)
516 {
517 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
518 }
519
520 void xorq_ir(int imm, RegisterID dst)
521 {
522 if (CAN_SIGN_EXTEND_8_32(imm)) {
523 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
524 m_formatter.immediate8(imm);
525 } else {
526 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
527 m_formatter.immediate32(imm);
528 }
529 }
530#endif
531
532 void sarl_i8r(int imm, RegisterID dst)
533 {
534 if (imm == 1)
535 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
536 else {
537 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
538 m_formatter.immediate8(imm);
539 }
540 }
541
542 void sarl_CLr(RegisterID dst)
543 {
544 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
545 }
546
547 void shll_i8r(int imm, RegisterID dst)
548 {
549 if (imm == 1)
550 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
551 else {
552 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
553 m_formatter.immediate8(imm);
554 }
555 }
556
557 void shll_CLr(RegisterID dst)
558 {
559 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
560 }
561
562#if PLATFORM(X86_64)
563 void sarq_CLr(RegisterID dst)
564 {
565 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
566 }
567
568 void sarq_i8r(int imm, RegisterID dst)
569 {
570 if (imm == 1)
571 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
572 else {
573 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
574 m_formatter.immediate8(imm);
575 }
576 }
577#endif
578
579 void imull_rr(RegisterID src, RegisterID dst)
580 {
581 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
582 }
583
584 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
585 {
586 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
587 m_formatter.immediate32(value);
588 }
589
590 void idivl_r(RegisterID dst)
591 {
592 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
593 }
594
595 // Comparisons:
596
597 void cmpl_rr(RegisterID src, RegisterID dst)
598 {
599 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
600 }
601
602 void cmpl_rm(RegisterID src, int offset, RegisterID base)
603 {
604 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
605 }
606
607 void cmpl_mr(int offset, RegisterID base, RegisterID src)
608 {
609 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
610 }
611
612 void cmpl_ir(int imm, RegisterID dst)
613 {
614 if (CAN_SIGN_EXTEND_8_32(imm)) {
615 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
616 m_formatter.immediate8(imm);
617 } else {
618 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
619 m_formatter.immediate32(imm);
620 }
621 }
622
623 void cmpl_ir_force32(int imm, RegisterID dst)
624 {
625 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
626 m_formatter.immediate32(imm);
627 }
628
629 void cmpl_im(int imm, int offset, RegisterID base)
630 {
631 if (CAN_SIGN_EXTEND_8_32(imm)) {
632 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
633 m_formatter.immediate8(imm);
634 } else {
635 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
636 m_formatter.immediate32(imm);
637 }
638 }
639
640 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
641 {
642 if (CAN_SIGN_EXTEND_8_32(imm)) {
643 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
644 m_formatter.immediate8(imm);
645 } else {
646 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
647 m_formatter.immediate32(imm);
648 }
649 }
650
651 void cmpl_im_force32(int imm, int offset, RegisterID base)
652 {
653 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
654 m_formatter.immediate32(imm);
655 }
656
657#if PLATFORM(X86_64)
658 void cmpq_rr(RegisterID src, RegisterID dst)
659 {
660 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
661 }
662
663 void cmpq_rm(RegisterID src, int offset, RegisterID base)
664 {
665 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
666 }
667
668 void cmpq_mr(int offset, RegisterID base, RegisterID src)
669 {
670 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
671 }
672
673 void cmpq_ir(int imm, RegisterID dst)
674 {
675 if (CAN_SIGN_EXTEND_8_32(imm)) {
676 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
677 m_formatter.immediate8(imm);
678 } else {
679 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
680 m_formatter.immediate32(imm);
681 }
682 }
683
684 void cmpq_im(int imm, int offset, RegisterID base)
685 {
686 if (CAN_SIGN_EXTEND_8_32(imm)) {
687 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
688 m_formatter.immediate8(imm);
689 } else {
690 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
691 m_formatter.immediate32(imm);
692 }
693 }
694
695 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
696 {
697 if (CAN_SIGN_EXTEND_8_32(imm)) {
698 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
699 m_formatter.immediate8(imm);
700 } else {
701 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
702 m_formatter.immediate32(imm);
703 }
704 }
705#else
706 void cmpl_rm(RegisterID reg, void* addr)
707 {
708 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
709 }
710
711 void cmpl_im(int imm, void* addr)
712 {
713 if (CAN_SIGN_EXTEND_8_32(imm)) {
714 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
715 m_formatter.immediate8(imm);
716 } else {
717 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
718 m_formatter.immediate32(imm);
719 }
720 }
721#endif
722
723 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
724 {
725 m_formatter.prefix(PRE_OPERAND_SIZE);
726 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
727 }
728
729 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
730 {
731 if (CAN_SIGN_EXTEND_8_32(imm)) {
732 m_formatter.prefix(PRE_OPERAND_SIZE);
733 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
734 m_formatter.immediate8(imm);
735 } else {
736 m_formatter.prefix(PRE_OPERAND_SIZE);
737 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
738 m_formatter.immediate16(imm);
739 }
740 }
741
742 void testl_rr(RegisterID src, RegisterID dst)
743 {
744 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
745 }
746
747 void testl_i32r(int imm, RegisterID dst)
748 {
749 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
750 m_formatter.immediate32(imm);
751 }
752
753 void testl_i32m(int imm, int offset, RegisterID base)
754 {
755 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
756 m_formatter.immediate32(imm);
757 }
758
759 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
760 {
761 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
762 m_formatter.immediate32(imm);
763 }
764
765#if PLATFORM(X86_64)
766 void testq_rr(RegisterID src, RegisterID dst)
767 {
768 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
769 }
770
771 void testq_i32r(int imm, RegisterID dst)
772 {
773 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
774 m_formatter.immediate32(imm);
775 }
776
777 void testq_i32m(int imm, int offset, RegisterID base)
778 {
779 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
780 m_formatter.immediate32(imm);
781 }
782
783 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
784 {
785 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
786 m_formatter.immediate32(imm);
787 }
788#endif
789
790 void testw_rr(RegisterID src, RegisterID dst)
791 {
792 m_formatter.prefix(PRE_OPERAND_SIZE);
793 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
794 }
795
796 void testb_i8r(int imm, RegisterID dst)
797 {
798 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
799 m_formatter.immediate8(imm);
800 }
801
802 void setCC_r(Condition cond, RegisterID dst)
803 {
804 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
805 }
806
807 void sete_r(RegisterID dst)
808 {
809 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
810 }
811
812 void setz_r(RegisterID dst)
813 {
814 sete_r(dst);
815 }
816
817 void setne_r(RegisterID dst)
818 {
819 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
820 }
821
822 void setnz_r(RegisterID dst)
823 {
824 setne_r(dst);
825 }
826
827 // Various move ops:
828
829 void cdq()
830 {
831 m_formatter.oneByteOp(OP_CDQ);
832 }
833
834 void xchgl_rr(RegisterID src, RegisterID dst)
835 {
836 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
837 }
838
839#if PLATFORM(X86_64)
840 void xchgq_rr(RegisterID src, RegisterID dst)
841 {
842 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
843 }
844#endif
845
846 void movl_rr(RegisterID src, RegisterID dst)
847 {
848 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
849 }
850
851 void movl_rm(RegisterID src, int offset, RegisterID base)
852 {
853 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
854 }
855
856 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
857 {
858 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
859 }
860
861 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
862 {
863 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
864 }
865
866 void movl_mEAX(void* addr)
867 {
868 m_formatter.oneByteOp(OP_MOV_EAXOv);
869#if PLATFORM(X86_64)
870 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
871#else
872 m_formatter.immediate32(reinterpret_cast<int>(addr));
873#endif
874 }
875
876 void movl_mr(int offset, RegisterID base, RegisterID dst)
877 {
878 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
879 }
880
881 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
882 {
883 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
884 }
885
886 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
887 {
888 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
889 }
890
891 void movl_i32r(int imm, RegisterID dst)
892 {
893 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
894 m_formatter.immediate32(imm);
895 }
896
897 void movl_i32m(int imm, int offset, RegisterID base)
898 {
899 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
900 m_formatter.immediate32(imm);
901 }
902
903 void movl_EAXm(void* addr)
904 {
905 m_formatter.oneByteOp(OP_MOV_OvEAX);
906#if PLATFORM(X86_64)
907 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
908#else
909 m_formatter.immediate32(reinterpret_cast<int>(addr));
910#endif
911 }
912
913#if PLATFORM(X86_64)
914 void movq_rr(RegisterID src, RegisterID dst)
915 {
916 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
917 }
918
919 void movq_rm(RegisterID src, int offset, RegisterID base)
920 {
921 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
922 }
923
924 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
925 {
926 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
927 }
928
929 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
930 {
931 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
932 }
933
934 void movq_mEAX(void* addr)
935 {
936 m_formatter.oneByteOp64(OP_MOV_EAXOv);
937 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
938 }
939
940 void movq_EAXm(void* addr)
941 {
942 m_formatter.oneByteOp64(OP_MOV_OvEAX);
943 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
944 }
945
946 void movq_mr(int offset, RegisterID base, RegisterID dst)
947 {
948 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
949 }
950
951 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
952 {
953 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
954 }
955
956 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
957 {
958 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
959 }
960
961 void movq_i32m(int imm, int offset, RegisterID base)
962 {
963 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
964 m_formatter.immediate32(imm);
965 }
966
967 void movq_i64r(int64_t imm, RegisterID dst)
968 {
969 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
970 m_formatter.immediate64(imm);
971 }
972
973 void movsxd_rr(RegisterID src, RegisterID dst)
974 {
975 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
976 }
977
978
979#else
980 void movl_rm(RegisterID src, void* addr)
981 {
982 if (src == X86::eax)
983 movl_EAXm(addr);
984 else
985 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
986 }
987
988 void movl_mr(void* addr, RegisterID dst)
989 {
990 if (dst == X86::eax)
991 movl_mEAX(addr);
992 else
993 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
994 }
995
996 void movl_i32m(int imm, void* addr)
997 {
998 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
999 m_formatter.immediate32(imm);
1000 }
1001#endif
1002
1003 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1004 {
1005 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1006 }
1007
1008 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1009 {
1010 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1011 }
1012
1013 void movzbl_rr(RegisterID src, RegisterID dst)
1014 {
1015 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1016 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1017 // REX prefixes are defined to be silently ignored by the processor.
1018 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1019 }
1020
1021 void leal_mr(int offset, RegisterID base, RegisterID dst)
1022 {
1023 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1024 }
1025#if PLATFORM(X86_64)
1026 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1027 {
1028 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1029 }
1030#endif
1031
1032 // Flow control:
1033
1034 JmpSrc call()
1035 {
1036 m_formatter.oneByteOp(OP_CALL_rel32);
1037 return m_formatter.immediateRel32();
1038 }
1039
1040 JmpSrc call(RegisterID dst)
1041 {
1042 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1043 return JmpSrc(m_formatter.size());
1044 }
1045
1046 void call_m(int offset, RegisterID base)
1047 {
1048 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1049 }
1050
1051 JmpSrc jmp()
1052 {
1053 m_formatter.oneByteOp(OP_JMP_rel32);
1054 return m_formatter.immediateRel32();
1055 }
1056
1057 // Return a JmpSrc so we have a label to the jump, so we can use this
1058 // To make a tail recursive call on x86-64. The MacroAssembler
1059 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1060 JmpSrc jmp_r(RegisterID dst)
1061 {
1062 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1063 return JmpSrc(m_formatter.size());
1064 }
1065
1066 void jmp_m(int offset, RegisterID base)
1067 {
1068 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1069 }
1070
1071 JmpSrc jne()
1072 {
1073 m_formatter.twoByteOp(jccRel32(ConditionNE));
1074 return m_formatter.immediateRel32();
1075 }
1076
1077 JmpSrc jnz()
1078 {
1079 return jne();
1080 }
1081
1082 JmpSrc je()
1083 {
1084 m_formatter.twoByteOp(jccRel32(ConditionE));
1085 return m_formatter.immediateRel32();
1086 }
1087
1088 JmpSrc jl()
1089 {
1090 m_formatter.twoByteOp(jccRel32(ConditionL));
1091 return m_formatter.immediateRel32();
1092 }
1093
1094 JmpSrc jb()
1095 {
1096 m_formatter.twoByteOp(jccRel32(ConditionB));
1097 return m_formatter.immediateRel32();
1098 }
1099
1100 JmpSrc jle()
1101 {
1102 m_formatter.twoByteOp(jccRel32(ConditionLE));
1103 return m_formatter.immediateRel32();
1104 }
1105
1106 JmpSrc jbe()
1107 {
1108 m_formatter.twoByteOp(jccRel32(ConditionBE));
1109 return m_formatter.immediateRel32();
1110 }
1111
1112 JmpSrc jge()
1113 {
1114 m_formatter.twoByteOp(jccRel32(ConditionGE));
1115 return m_formatter.immediateRel32();
1116 }
1117
1118 JmpSrc jg()
1119 {
1120 m_formatter.twoByteOp(jccRel32(ConditionG));
1121 return m_formatter.immediateRel32();
1122 }
1123
1124 JmpSrc ja()
1125 {
1126 m_formatter.twoByteOp(jccRel32(ConditionA));
1127 return m_formatter.immediateRel32();
1128 }
1129
1130 JmpSrc jae()
1131 {
1132 m_formatter.twoByteOp(jccRel32(ConditionAE));
1133 return m_formatter.immediateRel32();
1134 }
1135
1136 JmpSrc jo()
1137 {
1138 m_formatter.twoByteOp(jccRel32(ConditionO));
1139 return m_formatter.immediateRel32();
1140 }
1141
1142 JmpSrc jp()
1143 {
1144 m_formatter.twoByteOp(jccRel32(ConditionP));
1145 return m_formatter.immediateRel32();
1146 }
1147
1148 JmpSrc js()
1149 {
1150 m_formatter.twoByteOp(jccRel32(ConditionS));
1151 return m_formatter.immediateRel32();
1152 }
1153
1154 JmpSrc jCC(Condition cond)
1155 {
1156 m_formatter.twoByteOp(jccRel32(cond));
1157 return m_formatter.immediateRel32();
1158 }
1159
1160 // SSE operations:
1161
1162 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1163 {
1164 m_formatter.prefix(PRE_SSE_F2);
1165 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1166 }
1167
1168 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1169 {
1170 m_formatter.prefix(PRE_SSE_F2);
1171 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1172 }
1173
1174 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1175 {
1176 m_formatter.prefix(PRE_SSE_F2);
1177 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1178 }
1179
1180 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1181 {
1182 m_formatter.prefix(PRE_SSE_F2);
1183 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1184 }
1185
1186 void movd_rr(XMMRegisterID src, RegisterID dst)
1187 {
1188 m_formatter.prefix(PRE_SSE_66);
1189 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1190 }
1191
1192#if PLATFORM(X86_64)
1193 void movq_rr(XMMRegisterID src, RegisterID dst)
1194 {
1195 m_formatter.prefix(PRE_SSE_66);
1196 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1197 }
1198
1199 void movq_rr(RegisterID src, XMMRegisterID dst)
1200 {
1201 m_formatter.prefix(PRE_SSE_66);
1202 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1203 }
1204#endif
1205
1206 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1207 {
1208 m_formatter.prefix(PRE_SSE_F2);
1209 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1210 }
1211
1212 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1213 {
1214 m_formatter.prefix(PRE_SSE_F2);
1215 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1216 }
1217
1218 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1219 {
1220 m_formatter.prefix(PRE_SSE_F2);
1221 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1222 }
1223
1224 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1225 {
1226 m_formatter.prefix(PRE_SSE_F2);
1227 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1228 }
1229
1230 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1231 {
1232 m_formatter.prefix(PRE_SSE_66);
1233 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1234 m_formatter.immediate8(whichWord);
1235 }
1236
1237 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1238 {
1239 m_formatter.prefix(PRE_SSE_F2);
1240 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1241 }
1242
1243 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1244 {
1245 m_formatter.prefix(PRE_SSE_F2);
1246 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1247 }
1248
1249 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1250 {
1251 m_formatter.prefix(PRE_SSE_66);
1252 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1253 }
1254
1255 // Misc instructions:
1256
1257 void int3()
1258 {
1259 m_formatter.oneByteOp(OP_INT3);
1260 }
1261
1262 void ret()
1263 {
1264 m_formatter.oneByteOp(OP_RET);
1265 }
1266
1267 void predictNotTaken()
1268 {
1269 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1270 }
1271
1272 // Assembler admin methods:
1273
1274 JmpDst label()
1275 {
1276 return JmpDst(m_formatter.size());
1277 }
1278
1279 JmpDst align(int alignment)
1280 {
1281 while (!m_formatter.isAligned(alignment))
1282 m_formatter.oneByteOp(OP_HLT);
1283
1284 return label();
1285 }
1286
1287 // Linking & patching:
1288
1289 void linkJump(JmpSrc from, JmpDst to)
1290 {
1291 ASSERT(to.m_offset != -1);
1292 ASSERT(from.m_offset != -1);
1293
1294 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1295 }
1296
1297 static void linkJump(void* code, JmpSrc from, void* to)
1298 {
1299 ASSERT(from.m_offset != -1);
1300 ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1301 ASSERT(linkOffset == static_cast<int>(linkOffset));
1302 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
1303 }
1304
1305 static void patchLoadToLEA(intptr_t where)
1306 {
1307 char* ptr = reinterpret_cast<char*>(where);
1308 ptr[0] = static_cast<char>(OP_LEA);
1309 }
1310
1311 static void patchJump(intptr_t where, void* destination)
1312 {
1313 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1314 ASSERT(offset == static_cast<int32_t>(offset));
1315 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1316 }
1317
1318#if PLATFORM(X86_64)
1319 // FIXME: transition these functions out of here - the assembler
1320 // shouldn't know that that this is mov/call pair using r11. :-/
1321 static void patchMacroAssemblerCall(intptr_t where, void* destination)
1322 {
1323 patchAddress(reinterpret_cast<void*>(where - REPTACH_OFFSET_CALL_R11), JmpDst(0), destination);
1324 }
1325#else
1326 static void patchMacroAssemblerCall(intptr_t where, void* destination)
1327 {
1328 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1329 ASSERT(offset == static_cast<int32_t>(offset));
1330 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1331 }
1332#endif
1333
1334 void linkCall(JmpSrc from, JmpDst to)
1335 {
1336 ASSERT(to.m_offset != -1);
1337 ASSERT(from.m_offset != -1);
1338
1339 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1340 }
1341
1342 static void linkCall(void* code, JmpSrc from, void* to)
1343 {
1344 ASSERT(from.m_offset != -1);
1345 ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1346 ASSERT(linkOffset == static_cast<int>(linkOffset));
1347 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
1348 }
1349
1350 static void patchCall(intptr_t where, void* destination)
1351 {
1352 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1353 ASSERT(offset == static_cast<int32_t>(offset));
1354 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1355 }
1356
1357 static void patchAddress(void* code, JmpDst position, void* value)
1358 {
1359 ASSERT(position.m_offset != -1);
1360
1361 reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
1362 }
1363
1364 static unsigned getCallReturnOffset(JmpSrc call)
1365 {
1366 ASSERT(call.m_offset >= 0);
1367 return call.m_offset;
1368 }
1369
1370 static void* getRelocatedAddress(void* code, JmpSrc jump)
1371 {
1372 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1373 }
1374
1375 static void* getRelocatedAddress(void* code, JmpDst destination)
1376 {
1377 ASSERT(destination.m_offset != -1);
1378
1379 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1380 }
1381
1382 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1383 {
1384 return dst.m_offset - src.m_offset;
1385 }
1386
1387 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1388 {
1389 return dst.m_offset - src.m_offset;
1390 }
1391
1392 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1393 {
1394 return dst.m_offset - src.m_offset;
1395 }
1396
1397 static void patchImmediate(intptr_t where, int32_t value)
1398 {
1399 reinterpret_cast<int32_t*>(where)[-1] = value;
1400 }
1401
1402 static void patchPointer(intptr_t where, intptr_t value)
1403 {
1404 reinterpret_cast<intptr_t*>(where)[-1] = value;
1405 }
1406
1407 void* executableCopy(ExecutablePool* allocator)
1408 {
1409 void* copy = m_formatter.executableCopy(allocator);
1410 ASSERT(copy);
1411 return copy;
1412 }
1413
1414private:
1415
1416 class X86InstructionFormatter {
1417
1418 static const int maxInstructionSize = 16;
1419
1420 public:
1421
1422 // Legacy prefix bytes:
1423 //
1424 // These are emmitted prior to the instruction.
1425
1426 void prefix(OneByteOpcodeID pre)
1427 {
1428 m_buffer.putByte(pre);
1429 }
1430
1431 // Word-sized operands / no operand instruction formatters.
1432 //
1433 // In addition to the opcode, the following operand permutations are supported:
1434 // * None - instruction takes no operands.
1435 // * One register - the low three bits of the RegisterID are added into the opcode.
1436 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1437 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1438 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1439 //
1440 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1441 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1442 //
1443 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1444
1445 void oneByteOp(OneByteOpcodeID opcode)
1446 {
1447 m_buffer.ensureSpace(maxInstructionSize);
1448 m_buffer.putByteUnchecked(opcode);
1449 }
1450
1451 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1452 {
1453 m_buffer.ensureSpace(maxInstructionSize);
1454 emitRexIfNeeded(0, 0, reg);
1455 m_buffer.putByteUnchecked(opcode + (reg & 7));
1456 }
1457
1458 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1459 {
1460 m_buffer.ensureSpace(maxInstructionSize);
1461 emitRexIfNeeded(reg, 0, rm);
1462 m_buffer.putByteUnchecked(opcode);
1463 registerModRM(reg, rm);
1464 }
1465
1466 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1467 {
1468 m_buffer.ensureSpace(maxInstructionSize);
1469 emitRexIfNeeded(reg, 0, base);
1470 m_buffer.putByteUnchecked(opcode);
1471 memoryModRM(reg, base, offset);
1472 }
1473
1474 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1475 {
1476 m_buffer.ensureSpace(maxInstructionSize);
1477 emitRexIfNeeded(reg, 0, base);
1478 m_buffer.putByteUnchecked(opcode);
1479 memoryModRM_disp32(reg, base, offset);
1480 }
1481
1482 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1483 {
1484 m_buffer.ensureSpace(maxInstructionSize);
1485 emitRexIfNeeded(reg, index, base);
1486 m_buffer.putByteUnchecked(opcode);
1487 memoryModRM(reg, base, index, scale, offset);
1488 }
1489
1490#if !PLATFORM(X86_64)
1491 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1492 {
1493 m_buffer.ensureSpace(maxInstructionSize);
1494 m_buffer.putByteUnchecked(opcode);
1495 memoryModRM(reg, address);
1496 }
1497#endif
1498
1499 void twoByteOp(TwoByteOpcodeID opcode)
1500 {
1501 m_buffer.ensureSpace(maxInstructionSize);
1502 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1503 m_buffer.putByteUnchecked(opcode);
1504 }
1505
1506 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1507 {
1508 m_buffer.ensureSpace(maxInstructionSize);
1509 emitRexIfNeeded(reg, 0, rm);
1510 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1511 m_buffer.putByteUnchecked(opcode);
1512 registerModRM(reg, rm);
1513 }
1514
1515 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1516 {
1517 m_buffer.ensureSpace(maxInstructionSize);
1518 emitRexIfNeeded(reg, 0, base);
1519 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1520 m_buffer.putByteUnchecked(opcode);
1521 memoryModRM(reg, base, offset);
1522 }
1523
1524 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1525 {
1526 m_buffer.ensureSpace(maxInstructionSize);
1527 emitRexIfNeeded(reg, index, base);
1528 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1529 m_buffer.putByteUnchecked(opcode);
1530 memoryModRM(reg, base, index, scale, offset);
1531 }
1532
1533#if PLATFORM(X86_64)
1534 // Quad-word-sized operands:
1535 //
1536 // Used to format 64-bit operantions, planting a REX.w prefix.
1537 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1538 // the normal (non-'64'-postfixed) formatters should be used.
1539
1540 void oneByteOp64(OneByteOpcodeID opcode)
1541 {
1542 m_buffer.ensureSpace(maxInstructionSize);
1543 emitRexW(0, 0, 0);
1544 m_buffer.putByteUnchecked(opcode);
1545 }
1546
1547 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1548 {
1549 m_buffer.ensureSpace(maxInstructionSize);
1550 emitRexW(0, 0, reg);
1551 m_buffer.putByteUnchecked(opcode + (reg & 7));
1552 }
1553
1554 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1555 {
1556 m_buffer.ensureSpace(maxInstructionSize);
1557 emitRexW(reg, 0, rm);
1558 m_buffer.putByteUnchecked(opcode);
1559 registerModRM(reg, rm);
1560 }
1561
1562 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1563 {
1564 m_buffer.ensureSpace(maxInstructionSize);
1565 emitRexW(reg, 0, base);
1566 m_buffer.putByteUnchecked(opcode);
1567 memoryModRM(reg, base, offset);
1568 }
1569
1570 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1571 {
1572 m_buffer.ensureSpace(maxInstructionSize);
1573 emitRexW(reg, 0, base);
1574 m_buffer.putByteUnchecked(opcode);
1575 memoryModRM_disp32(reg, base, offset);
1576 }
1577
1578 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1579 {
1580 m_buffer.ensureSpace(maxInstructionSize);
1581 emitRexW(reg, index, base);
1582 m_buffer.putByteUnchecked(opcode);
1583 memoryModRM(reg, base, index, scale, offset);
1584 }
1585
1586 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1587 {
1588 m_buffer.ensureSpace(maxInstructionSize);
1589 emitRexW(reg, 0, rm);
1590 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1591 m_buffer.putByteUnchecked(opcode);
1592 registerModRM(reg, rm);
1593 }
1594#endif
1595
1596 // Byte-operands:
1597 //
1598 // These methods format byte operations. Byte operations differ from the normal
1599 // formatters in the circumstances under which they will decide to emit REX prefixes.
1600 // These should be used where any register operand signifies a byte register.
1601 //
1602 // The disctinction is due to the handling of register numbers in the range 4..7 on
1603 // x86-64. These register numbers may either represent the second byte of the first
1604 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1605 //
1606 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1607 // be accessed where a REX prefix is present), these are likely best treated as
1608 // deprecated. In order to ensure the correct registers spl..dil are selected a
1609 // REX prefix will be emitted for any byte register operand in the range 4..15.
1610 //
1611 // These formatters may be used in instructions where a mix of operand sizes, in which
1612 // case an unnecessary REX will be emitted, for example:
1613 // movzbl %al, %edi
1614 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1615 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1616 // be silently ignored by the processor.
1617 //
1618 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1619 // is provided to check byte register operands.
1620
1621 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1622 {
1623 m_buffer.ensureSpace(maxInstructionSize);
1624 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1625 m_buffer.putByteUnchecked(opcode);
1626 registerModRM(groupOp, rm);
1627 }
1628
1629 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1630 {
1631 m_buffer.ensureSpace(maxInstructionSize);
1632 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1633 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1634 m_buffer.putByteUnchecked(opcode);
1635 registerModRM(reg, rm);
1636 }
1637
1638 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1639 {
1640 m_buffer.ensureSpace(maxInstructionSize);
1641 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1642 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1643 m_buffer.putByteUnchecked(opcode);
1644 registerModRM(groupOp, rm);
1645 }
1646
1647 // Immediates:
1648 //
1649 // An immedaite should be appended where appropriate after an op has been emitted.
1650 // The writes are unchecked since the opcode formatters above will have ensured space.
1651
1652 void immediate8(int imm)
1653 {
1654 m_buffer.putByteUnchecked(imm);
1655 }
1656
1657 void immediate16(int imm)
1658 {
1659 m_buffer.putShortUnchecked(imm);
1660 }
1661
1662 void immediate32(int imm)
1663 {
1664 m_buffer.putIntUnchecked(imm);
1665 }
1666
1667 void immediate64(int64_t imm)
1668 {
1669 m_buffer.putInt64Unchecked(imm);
1670 }
1671
1672 JmpSrc immediateRel32()
1673 {
1674 m_buffer.putIntUnchecked(0);
1675 return JmpSrc(m_buffer.size());
1676 }
1677
1678 // Administrative methods:
1679
1680 size_t size() const { return m_buffer.size(); }
1681 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1682 void* data() const { return m_buffer.data(); }
1683 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1684
1685 private:
1686
1687 // Internals; ModRm and REX formatters.
1688
1689 static const RegisterID noBase = X86::ebp;
1690 static const RegisterID hasSib = X86::esp;
1691 static const RegisterID noIndex = X86::esp;
1692#if PLATFORM(X86_64)
1693 static const RegisterID noBase2 = X86::r13;
1694 static const RegisterID hasSib2 = X86::r12;
1695
1696 // Registers r8 & above require a REX prefixe.
1697 inline bool regRequiresRex(int reg)
1698 {
1699 return (reg >= X86::r8);
1700 }
1701
1702 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1703 inline bool byteRegRequiresRex(int reg)
1704 {
1705 return (reg >= X86::esp);
1706 }
1707
1708 // Format a REX prefix byte.
1709 inline void emitRex(bool w, int r, int x, int b)
1710 {
1711 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1712 }
1713
1714 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1715 inline void emitRexW(int r, int x, int b)
1716 {
1717 emitRex(true, r, x, b);
1718 }
1719
1720 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1721 // regRequiresRex() to check other registers (i.e. address base & index).
1722 inline void emitRexIf(bool condition, int r, int x, int b)
1723 {
1724 if (condition) emitRex(false, r, x, b);
1725 }
1726
1727 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1728 inline void emitRexIfNeeded(int r, int x, int b)
1729 {
1730 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1731 }
1732#else
1733 // No REX prefix bytes on 32-bit x86.
1734 inline bool regRequiresRex(int) { return false; }
1735 inline bool byteRegRequiresRex(int) { return false; }
1736 inline void emitRexIf(bool, int, int, int) {}
1737 inline void emitRexIfNeeded(int, int, int) {}
1738#endif
1739
1740 enum ModRmMode {
1741 ModRmMemoryNoDisp,
1742 ModRmMemoryDisp8,
1743 ModRmMemoryDisp32,
1744 ModRmRegister,
1745 };
1746
1747 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1748 {
1749 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1750 }
1751
1752 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1753 {
1754 ASSERT(mode != ModRmRegister);
1755
1756 putModRm(mode, reg, hasSib);
1757 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1758 }
1759
1760 void registerModRM(int reg, RegisterID rm)
1761 {
1762 putModRm(ModRmRegister, reg, rm);
1763 }
1764
1765 void memoryModRM(int reg, RegisterID base, int offset)
1766 {
1767 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1768#if PLATFORM(X86_64)
1769 if ((base == hasSib) || (base == hasSib2)) {
1770#else
1771 if (base == hasSib) {
1772#endif
1773 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1774 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1775 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1776 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1777 m_buffer.putByteUnchecked(offset);
1778 } else {
1779 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1780 m_buffer.putIntUnchecked(offset);
1781 }
1782 } else {
1783#if PLATFORM(X86_64)
1784 if (!offset && (base != noBase) && (base != noBase2))
1785#else
1786 if (!offset && (base != noBase))
1787#endif
1788 putModRm(ModRmMemoryNoDisp, reg, base);
1789 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1790 putModRm(ModRmMemoryDisp8, reg, base);
1791 m_buffer.putByteUnchecked(offset);
1792 } else {
1793 putModRm(ModRmMemoryDisp32, reg, base);
1794 m_buffer.putIntUnchecked(offset);
1795 }
1796 }
1797 }
1798
1799 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1800 {
1801 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1802#if PLATFORM(X86_64)
1803 if ((base == hasSib) || (base == hasSib2)) {
1804#else
1805 if (base == hasSib) {
1806#endif
1807 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1808 m_buffer.putIntUnchecked(offset);
1809 } else {
1810 putModRm(ModRmMemoryDisp32, reg, base);
1811 m_buffer.putIntUnchecked(offset);
1812 }
1813 }
1814
1815 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1816 {
1817 ASSERT(index != noIndex);
1818
1819#if PLATFORM(X86_64)
1820 if (!offset && (base != noBase) && (base != noBase2))
1821#else
1822 if (!offset && (base != noBase))
1823#endif
1824 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1825 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1826 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1827 m_buffer.putByteUnchecked(offset);
1828 } else {
1829 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1830 m_buffer.putIntUnchecked(offset);
1831 }
1832 }
1833
1834#if !PLATFORM(X86_64)
1835 void memoryModRM(int reg, void* address)
1836 {
1837 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1838 putModRm(ModRmMemoryNoDisp, reg, noBase);
1839 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1840 }
1841#endif
1842
1843 AssemblerBuffer m_buffer;
1844 } m_formatter;
1845};
1846
1847} // namespace JSC
1848
1849#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1850
1851#endif // X86Assembler_h
Note: See TracBrowser for help on using the repository browser.