source: webkit/trunk/JavaScriptCore/assembler/X86Assembler.h@ 44843

Last change on this file since 44843 was 44478, checked in by [email protected], 16 years ago

2009-06-05 Gavin Barraclough <[email protected]>

Rudder stamped by Sam Weinig.

Add missing ASSERT.

  • assembler/X86Assembler.h: (JSC::X86Assembler::getRelocatedAddress):
File size: 57.5 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44#endif
45
46namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57#if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66#endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79}
80
81class X86Assembler {
82public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85 typedef XMMRegisterID FPRegisterID;
86
87 typedef enum {
88 ConditionO,
89 ConditionNO,
90 ConditionB,
91 ConditionAE,
92 ConditionE,
93 ConditionNE,
94 ConditionBE,
95 ConditionA,
96 ConditionS,
97 ConditionNS,
98 ConditionP,
99 ConditionNP,
100 ConditionL,
101 ConditionGE,
102 ConditionLE,
103 ConditionG,
104
105 ConditionC = ConditionB,
106 ConditionNC = ConditionAE,
107 } Condition;
108
109private:
110 typedef enum {
111 OP_ADD_EvGv = 0x01,
112 OP_ADD_GvEv = 0x03,
113 OP_OR_EvGv = 0x09,
114 OP_OR_GvEv = 0x0B,
115 OP_2BYTE_ESCAPE = 0x0F,
116 OP_AND_EvGv = 0x21,
117 OP_SUB_EvGv = 0x29,
118 OP_SUB_GvEv = 0x2B,
119 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
120 OP_XOR_EvGv = 0x31,
121 OP_CMP_EvGv = 0x39,
122 OP_CMP_GvEv = 0x3B,
123#if PLATFORM(X86_64)
124 PRE_REX = 0x40,
125#endif
126 OP_PUSH_EAX = 0x50,
127 OP_POP_EAX = 0x58,
128#if PLATFORM(X86_64)
129 OP_MOVSXD_GvEv = 0x63,
130#endif
131 PRE_OPERAND_SIZE = 0x66,
132 PRE_SSE_66 = 0x66,
133 OP_PUSH_Iz = 0x68,
134 OP_IMUL_GvEvIz = 0x69,
135 OP_GROUP1_EvIz = 0x81,
136 OP_GROUP1_EvIb = 0x83,
137 OP_TEST_EvGv = 0x85,
138 OP_XCHG_EvGv = 0x87,
139 OP_MOV_EvGv = 0x89,
140 OP_MOV_GvEv = 0x8B,
141 OP_LEA = 0x8D,
142 OP_GROUP1A_Ev = 0x8F,
143 OP_CDQ = 0x99,
144 OP_MOV_EAXOv = 0xA1,
145 OP_MOV_OvEAX = 0xA3,
146 OP_MOV_EAXIv = 0xB8,
147 OP_GROUP2_EvIb = 0xC1,
148 OP_RET = 0xC3,
149 OP_GROUP11_EvIz = 0xC7,
150 OP_INT3 = 0xCC,
151 OP_GROUP2_Ev1 = 0xD1,
152 OP_GROUP2_EvCL = 0xD3,
153 OP_CALL_rel32 = 0xE8,
154 OP_JMP_rel32 = 0xE9,
155 PRE_SSE_F2 = 0xF2,
156 OP_HLT = 0xF4,
157 OP_GROUP3_EbIb = 0xF6,
158 OP_GROUP3_Ev = 0xF7,
159 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
160 OP_GROUP5_Ev = 0xFF,
161 } OneByteOpcodeID;
162
163 typedef enum {
164 OP2_MOVSD_VsdWsd = 0x10,
165 OP2_MOVSD_WsdVsd = 0x11,
166 OP2_CVTSI2SD_VsdEd = 0x2A,
167 OP2_CVTTSD2SI_GdWsd = 0x2C,
168 OP2_UCOMISD_VsdWsd = 0x2E,
169 OP2_ADDSD_VsdWsd = 0x58,
170 OP2_MULSD_VsdWsd = 0x59,
171 OP2_SUBSD_VsdWsd = 0x5C,
172 OP2_MOVD_VdEd = 0x6E,
173 OP2_MOVD_EdVd = 0x7E,
174 OP2_JCC_rel32 = 0x80,
175 OP_SETCC = 0x90,
176 OP2_IMUL_GvEv = 0xAF,
177 OP2_MOVZX_GvEb = 0xB6,
178 OP2_MOVZX_GvEw = 0xB7,
179 OP2_PEXTRW_GdUdIb = 0xC5,
180 } TwoByteOpcodeID;
181
182 TwoByteOpcodeID jccRel32(Condition cond)
183 {
184 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
185 }
186
187 TwoByteOpcodeID setccOpcode(Condition cond)
188 {
189 return (TwoByteOpcodeID)(OP_SETCC + cond);
190 }
191
192 typedef enum {
193 GROUP1_OP_ADD = 0,
194 GROUP1_OP_OR = 1,
195 GROUP1_OP_ADC = 2,
196 GROUP1_OP_AND = 4,
197 GROUP1_OP_SUB = 5,
198 GROUP1_OP_XOR = 6,
199 GROUP1_OP_CMP = 7,
200
201 GROUP1A_OP_POP = 0,
202
203 GROUP2_OP_SHL = 4,
204 GROUP2_OP_SAR = 7,
205
206 GROUP3_OP_TEST = 0,
207 GROUP3_OP_NOT = 2,
208 GROUP3_OP_IDIV = 7,
209
210 GROUP5_OP_CALLN = 2,
211 GROUP5_OP_JMPN = 4,
212 GROUP5_OP_PUSH = 6,
213
214 GROUP11_MOV = 0,
215 } GroupOpcodeID;
216
217 class X86InstructionFormatter;
218public:
219
220 class JmpSrc {
221 friend class X86Assembler;
222 friend class X86InstructionFormatter;
223 public:
224 JmpSrc()
225 : m_offset(-1)
226 {
227 }
228
229 private:
230 JmpSrc(int offset)
231 : m_offset(offset)
232 {
233 }
234
235 int m_offset;
236 };
237
238 class JmpDst {
239 friend class X86Assembler;
240 friend class X86InstructionFormatter;
241 public:
242 JmpDst()
243 : m_offset(-1)
244 , m_used(false)
245 {
246 }
247
248 bool isUsed() const { return m_used; }
249 void used() { m_used = true; }
250 private:
251 JmpDst(int offset)
252 : m_offset(offset)
253 , m_used(false)
254 {
255 ASSERT(m_offset == offset);
256 }
257
258 int m_offset : 31;
259 bool m_used : 1;
260 };
261
262 X86Assembler()
263 {
264 }
265
266 size_t size() const { return m_formatter.size(); }
267
268 // Stack operations:
269
270 void push_r(RegisterID reg)
271 {
272 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
273 }
274
275 void pop_r(RegisterID reg)
276 {
277 m_formatter.oneByteOp(OP_POP_EAX, reg);
278 }
279
280 void push_i32(int imm)
281 {
282 m_formatter.oneByteOp(OP_PUSH_Iz);
283 m_formatter.immediate32(imm);
284 }
285
286 void push_m(int offset, RegisterID base)
287 {
288 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
289 }
290
291 void pop_m(int offset, RegisterID base)
292 {
293 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
294 }
295
296 // Arithmetic operations:
297
298#if !PLATFORM(X86_64)
299 void adcl_im(int imm, void* addr)
300 {
301 if (CAN_SIGN_EXTEND_8_32(imm)) {
302 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
303 m_formatter.immediate8(imm);
304 } else {
305 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
306 m_formatter.immediate32(imm);
307 }
308 }
309#endif
310
311 void addl_rr(RegisterID src, RegisterID dst)
312 {
313 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
314 }
315
316 void addl_mr(int offset, RegisterID base, RegisterID dst)
317 {
318 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
319 }
320
321 void addl_ir(int imm, RegisterID dst)
322 {
323 if (CAN_SIGN_EXTEND_8_32(imm)) {
324 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
325 m_formatter.immediate8(imm);
326 } else {
327 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
328 m_formatter.immediate32(imm);
329 }
330 }
331
332 void addl_im(int imm, int offset, RegisterID base)
333 {
334 if (CAN_SIGN_EXTEND_8_32(imm)) {
335 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
336 m_formatter.immediate8(imm);
337 } else {
338 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
339 m_formatter.immediate32(imm);
340 }
341 }
342
343#if PLATFORM(X86_64)
344 void addq_rr(RegisterID src, RegisterID dst)
345 {
346 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
347 }
348
349 void addq_ir(int imm, RegisterID dst)
350 {
351 if (CAN_SIGN_EXTEND_8_32(imm)) {
352 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
353 m_formatter.immediate8(imm);
354 } else {
355 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
356 m_formatter.immediate32(imm);
357 }
358 }
359
360 void addq_im(int imm, int offset, RegisterID base)
361 {
362 if (CAN_SIGN_EXTEND_8_32(imm)) {
363 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
364 m_formatter.immediate8(imm);
365 } else {
366 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
367 m_formatter.immediate32(imm);
368 }
369 }
370#else
371 void addl_im(int imm, void* addr)
372 {
373 if (CAN_SIGN_EXTEND_8_32(imm)) {
374 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
375 m_formatter.immediate8(imm);
376 } else {
377 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
378 m_formatter.immediate32(imm);
379 }
380 }
381#endif
382
383 void andl_rr(RegisterID src, RegisterID dst)
384 {
385 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
386 }
387
388 void andl_ir(int imm, RegisterID dst)
389 {
390 if (CAN_SIGN_EXTEND_8_32(imm)) {
391 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
392 m_formatter.immediate8(imm);
393 } else {
394 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
395 m_formatter.immediate32(imm);
396 }
397 }
398
399 void andl_im(int imm, int offset, RegisterID base)
400 {
401 if (CAN_SIGN_EXTEND_8_32(imm)) {
402 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
403 m_formatter.immediate8(imm);
404 } else {
405 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
406 m_formatter.immediate32(imm);
407 }
408 }
409
410#if PLATFORM(X86_64)
411 void andq_rr(RegisterID src, RegisterID dst)
412 {
413 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
414 }
415
416 void andq_ir(int imm, RegisterID dst)
417 {
418 if (CAN_SIGN_EXTEND_8_32(imm)) {
419 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
420 m_formatter.immediate8(imm);
421 } else {
422 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
423 m_formatter.immediate32(imm);
424 }
425 }
426#else
427 void andl_im(int imm, void* addr)
428 {
429 if (CAN_SIGN_EXTEND_8_32(imm)) {
430 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
431 m_formatter.immediate8(imm);
432 } else {
433 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
434 m_formatter.immediate32(imm);
435 }
436 }
437#endif
438
439 void notl_r(RegisterID dst)
440 {
441 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
442 }
443
444 void orl_rr(RegisterID src, RegisterID dst)
445 {
446 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
447 }
448
449 void orl_mr(int offset, RegisterID base, RegisterID dst)
450 {
451 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
452 }
453
454 void orl_ir(int imm, RegisterID dst)
455 {
456 if (CAN_SIGN_EXTEND_8_32(imm)) {
457 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
458 m_formatter.immediate8(imm);
459 } else {
460 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
461 m_formatter.immediate32(imm);
462 }
463 }
464
465 void orl_im(int imm, int offset, RegisterID base)
466 {
467 if (CAN_SIGN_EXTEND_8_32(imm)) {
468 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
469 m_formatter.immediate8(imm);
470 } else {
471 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
472 m_formatter.immediate32(imm);
473 }
474 }
475
476#if PLATFORM(X86_64)
477 void orq_rr(RegisterID src, RegisterID dst)
478 {
479 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
480 }
481
482 void orq_ir(int imm, RegisterID dst)
483 {
484 if (CAN_SIGN_EXTEND_8_32(imm)) {
485 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
486 m_formatter.immediate8(imm);
487 } else {
488 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
489 m_formatter.immediate32(imm);
490 }
491 }
492#else
493 void orl_im(int imm, void* addr)
494 {
495 if (CAN_SIGN_EXTEND_8_32(imm)) {
496 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
497 m_formatter.immediate8(imm);
498 } else {
499 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
500 m_formatter.immediate32(imm);
501 }
502 }
503#endif
504
505 void subl_rr(RegisterID src, RegisterID dst)
506 {
507 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
508 }
509
510 void subl_mr(int offset, RegisterID base, RegisterID dst)
511 {
512 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
513 }
514
515 void subl_ir(int imm, RegisterID dst)
516 {
517 if (CAN_SIGN_EXTEND_8_32(imm)) {
518 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
519 m_formatter.immediate8(imm);
520 } else {
521 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
522 m_formatter.immediate32(imm);
523 }
524 }
525
526 void subl_im(int imm, int offset, RegisterID base)
527 {
528 if (CAN_SIGN_EXTEND_8_32(imm)) {
529 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
530 m_formatter.immediate8(imm);
531 } else {
532 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
533 m_formatter.immediate32(imm);
534 }
535 }
536
537#if PLATFORM(X86_64)
538 void subq_rr(RegisterID src, RegisterID dst)
539 {
540 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
541 }
542
543 void subq_ir(int imm, RegisterID dst)
544 {
545 if (CAN_SIGN_EXTEND_8_32(imm)) {
546 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
547 m_formatter.immediate8(imm);
548 } else {
549 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
550 m_formatter.immediate32(imm);
551 }
552 }
553#else
554 void subl_im(int imm, void* addr)
555 {
556 if (CAN_SIGN_EXTEND_8_32(imm)) {
557 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
558 m_formatter.immediate8(imm);
559 } else {
560 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
561 m_formatter.immediate32(imm);
562 }
563 }
564#endif
565
566 void xorl_rr(RegisterID src, RegisterID dst)
567 {
568 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
569 }
570
571 void xorl_ir(int imm, RegisterID dst)
572 {
573 if (CAN_SIGN_EXTEND_8_32(imm)) {
574 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
575 m_formatter.immediate8(imm);
576 } else {
577 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
578 m_formatter.immediate32(imm);
579 }
580 }
581
582#if PLATFORM(X86_64)
583 void xorq_rr(RegisterID src, RegisterID dst)
584 {
585 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
586 }
587
588 void xorq_ir(int imm, RegisterID dst)
589 {
590 if (CAN_SIGN_EXTEND_8_32(imm)) {
591 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
592 m_formatter.immediate8(imm);
593 } else {
594 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
595 m_formatter.immediate32(imm);
596 }
597 }
598#endif
599
600 void sarl_i8r(int imm, RegisterID dst)
601 {
602 if (imm == 1)
603 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
604 else {
605 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
606 m_formatter.immediate8(imm);
607 }
608 }
609
610 void sarl_CLr(RegisterID dst)
611 {
612 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
613 }
614
615 void shll_i8r(int imm, RegisterID dst)
616 {
617 if (imm == 1)
618 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
619 else {
620 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
621 m_formatter.immediate8(imm);
622 }
623 }
624
625 void shll_CLr(RegisterID dst)
626 {
627 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
628 }
629
630#if PLATFORM(X86_64)
631 void sarq_CLr(RegisterID dst)
632 {
633 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
634 }
635
636 void sarq_i8r(int imm, RegisterID dst)
637 {
638 if (imm == 1)
639 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
640 else {
641 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
642 m_formatter.immediate8(imm);
643 }
644 }
645#endif
646
647 void imull_rr(RegisterID src, RegisterID dst)
648 {
649 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
650 }
651
652 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
653 {
654 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
655 m_formatter.immediate32(value);
656 }
657
658 void idivl_r(RegisterID dst)
659 {
660 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
661 }
662
663 // Comparisons:
664
665 void cmpl_rr(RegisterID src, RegisterID dst)
666 {
667 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
668 }
669
670 void cmpl_rm(RegisterID src, int offset, RegisterID base)
671 {
672 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
673 }
674
675 void cmpl_mr(int offset, RegisterID base, RegisterID src)
676 {
677 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
678 }
679
680 void cmpl_ir(int imm, RegisterID dst)
681 {
682 if (CAN_SIGN_EXTEND_8_32(imm)) {
683 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
684 m_formatter.immediate8(imm);
685 } else {
686 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
687 m_formatter.immediate32(imm);
688 }
689 }
690
691 void cmpl_ir_force32(int imm, RegisterID dst)
692 {
693 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
694 m_formatter.immediate32(imm);
695 }
696
697 void cmpl_im(int imm, int offset, RegisterID base)
698 {
699 if (CAN_SIGN_EXTEND_8_32(imm)) {
700 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
701 m_formatter.immediate8(imm);
702 } else {
703 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
704 m_formatter.immediate32(imm);
705 }
706 }
707
708 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
709 {
710 if (CAN_SIGN_EXTEND_8_32(imm)) {
711 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
712 m_formatter.immediate8(imm);
713 } else {
714 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
715 m_formatter.immediate32(imm);
716 }
717 }
718
719 void cmpl_im_force32(int imm, int offset, RegisterID base)
720 {
721 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
722 m_formatter.immediate32(imm);
723 }
724
725#if PLATFORM(X86_64)
726 void cmpq_rr(RegisterID src, RegisterID dst)
727 {
728 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
729 }
730
731 void cmpq_rm(RegisterID src, int offset, RegisterID base)
732 {
733 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
734 }
735
736 void cmpq_mr(int offset, RegisterID base, RegisterID src)
737 {
738 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
739 }
740
741 void cmpq_ir(int imm, RegisterID dst)
742 {
743 if (CAN_SIGN_EXTEND_8_32(imm)) {
744 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
745 m_formatter.immediate8(imm);
746 } else {
747 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
748 m_formatter.immediate32(imm);
749 }
750 }
751
752 void cmpq_im(int imm, int offset, RegisterID base)
753 {
754 if (CAN_SIGN_EXTEND_8_32(imm)) {
755 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
756 m_formatter.immediate8(imm);
757 } else {
758 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
759 m_formatter.immediate32(imm);
760 }
761 }
762
763 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
764 {
765 if (CAN_SIGN_EXTEND_8_32(imm)) {
766 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
767 m_formatter.immediate8(imm);
768 } else {
769 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
770 m_formatter.immediate32(imm);
771 }
772 }
773#else
774 void cmpl_rm(RegisterID reg, void* addr)
775 {
776 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
777 }
778
779 void cmpl_im(int imm, void* addr)
780 {
781 if (CAN_SIGN_EXTEND_8_32(imm)) {
782 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
783 m_formatter.immediate8(imm);
784 } else {
785 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
786 m_formatter.immediate32(imm);
787 }
788 }
789#endif
790
791 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
792 {
793 m_formatter.prefix(PRE_OPERAND_SIZE);
794 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
795 }
796
797 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
798 {
799 if (CAN_SIGN_EXTEND_8_32(imm)) {
800 m_formatter.prefix(PRE_OPERAND_SIZE);
801 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
802 m_formatter.immediate8(imm);
803 } else {
804 m_formatter.prefix(PRE_OPERAND_SIZE);
805 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
806 m_formatter.immediate16(imm);
807 }
808 }
809
810 void testl_rr(RegisterID src, RegisterID dst)
811 {
812 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
813 }
814
815 void testl_i32r(int imm, RegisterID dst)
816 {
817 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
818 m_formatter.immediate32(imm);
819 }
820
821 void testl_i32m(int imm, int offset, RegisterID base)
822 {
823 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
824 m_formatter.immediate32(imm);
825 }
826
827 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
828 {
829 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
830 m_formatter.immediate32(imm);
831 }
832
833#if PLATFORM(X86_64)
834 void testq_rr(RegisterID src, RegisterID dst)
835 {
836 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
837 }
838
839 void testq_i32r(int imm, RegisterID dst)
840 {
841 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
842 m_formatter.immediate32(imm);
843 }
844
845 void testq_i32m(int imm, int offset, RegisterID base)
846 {
847 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
848 m_formatter.immediate32(imm);
849 }
850
851 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
852 {
853 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
854 m_formatter.immediate32(imm);
855 }
856#endif
857
858 void testw_rr(RegisterID src, RegisterID dst)
859 {
860 m_formatter.prefix(PRE_OPERAND_SIZE);
861 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
862 }
863
864 void testb_i8r(int imm, RegisterID dst)
865 {
866 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
867 m_formatter.immediate8(imm);
868 }
869
870 void setCC_r(Condition cond, RegisterID dst)
871 {
872 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
873 }
874
875 void sete_r(RegisterID dst)
876 {
877 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
878 }
879
880 void setz_r(RegisterID dst)
881 {
882 sete_r(dst);
883 }
884
885 void setne_r(RegisterID dst)
886 {
887 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
888 }
889
890 void setnz_r(RegisterID dst)
891 {
892 setne_r(dst);
893 }
894
895 // Various move ops:
896
897 void cdq()
898 {
899 m_formatter.oneByteOp(OP_CDQ);
900 }
901
902 void xchgl_rr(RegisterID src, RegisterID dst)
903 {
904 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
905 }
906
907#if PLATFORM(X86_64)
908 void xchgq_rr(RegisterID src, RegisterID dst)
909 {
910 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
911 }
912#endif
913
914 void movl_rr(RegisterID src, RegisterID dst)
915 {
916 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
917 }
918
919 void movl_rm(RegisterID src, int offset, RegisterID base)
920 {
921 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
922 }
923
924 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
925 {
926 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
927 }
928
929 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
930 {
931 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
932 }
933
934 void movl_mEAX(void* addr)
935 {
936 m_formatter.oneByteOp(OP_MOV_EAXOv);
937#if PLATFORM(X86_64)
938 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
939#else
940 m_formatter.immediate32(reinterpret_cast<int>(addr));
941#endif
942 }
943
944 void movl_mr(int offset, RegisterID base, RegisterID dst)
945 {
946 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
947 }
948
949 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
950 {
951 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
952 }
953
954 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
955 {
956 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
957 }
958
959 void movl_i32r(int imm, RegisterID dst)
960 {
961 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
962 m_formatter.immediate32(imm);
963 }
964
965 void movl_i32m(int imm, int offset, RegisterID base)
966 {
967 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
968 m_formatter.immediate32(imm);
969 }
970
971 void movl_EAXm(void* addr)
972 {
973 m_formatter.oneByteOp(OP_MOV_OvEAX);
974#if PLATFORM(X86_64)
975 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
976#else
977 m_formatter.immediate32(reinterpret_cast<int>(addr));
978#endif
979 }
980
981#if PLATFORM(X86_64)
982 void movq_rr(RegisterID src, RegisterID dst)
983 {
984 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
985 }
986
987 void movq_rm(RegisterID src, int offset, RegisterID base)
988 {
989 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
990 }
991
992 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
993 {
994 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
995 }
996
997 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
998 {
999 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1000 }
1001
1002 void movq_mEAX(void* addr)
1003 {
1004 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1005 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1006 }
1007
1008 void movq_EAXm(void* addr)
1009 {
1010 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1011 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1012 }
1013
1014 void movq_mr(int offset, RegisterID base, RegisterID dst)
1015 {
1016 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1017 }
1018
1019 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1020 {
1021 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1022 }
1023
1024 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1025 {
1026 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1027 }
1028
1029 void movq_i32m(int imm, int offset, RegisterID base)
1030 {
1031 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1032 m_formatter.immediate32(imm);
1033 }
1034
1035 void movq_i64r(int64_t imm, RegisterID dst)
1036 {
1037 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1038 m_formatter.immediate64(imm);
1039 }
1040
1041 void movsxd_rr(RegisterID src, RegisterID dst)
1042 {
1043 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1044 }
1045
1046
1047#else
1048 void movl_rm(RegisterID src, void* addr)
1049 {
1050 if (src == X86::eax)
1051 movl_EAXm(addr);
1052 else
1053 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1054 }
1055
1056 void movl_mr(void* addr, RegisterID dst)
1057 {
1058 if (dst == X86::eax)
1059 movl_mEAX(addr);
1060 else
1061 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1062 }
1063
1064 void movl_i32m(int imm, void* addr)
1065 {
1066 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1067 m_formatter.immediate32(imm);
1068 }
1069#endif
1070
1071 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1072 {
1073 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1074 }
1075
1076 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1077 {
1078 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1079 }
1080
1081 void movzbl_rr(RegisterID src, RegisterID dst)
1082 {
1083 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1084 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1085 // REX prefixes are defined to be silently ignored by the processor.
1086 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1087 }
1088
1089 void leal_mr(int offset, RegisterID base, RegisterID dst)
1090 {
1091 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1092 }
1093#if PLATFORM(X86_64)
1094 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1095 {
1096 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1097 }
1098#endif
1099
1100 // Flow control:
1101
1102 JmpSrc call()
1103 {
1104 m_formatter.oneByteOp(OP_CALL_rel32);
1105 return m_formatter.immediateRel32();
1106 }
1107
1108 JmpSrc call(RegisterID dst)
1109 {
1110 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1111 return JmpSrc(m_formatter.size());
1112 }
1113
1114 void call_m(int offset, RegisterID base)
1115 {
1116 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1117 }
1118
1119 JmpSrc jmp()
1120 {
1121 m_formatter.oneByteOp(OP_JMP_rel32);
1122 return m_formatter.immediateRel32();
1123 }
1124
1125 // Return a JmpSrc so we have a label to the jump, so we can use this
1126 // To make a tail recursive call on x86-64. The MacroAssembler
1127 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1128 JmpSrc jmp_r(RegisterID dst)
1129 {
1130 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1131 return JmpSrc(m_formatter.size());
1132 }
1133
1134 void jmp_m(int offset, RegisterID base)
1135 {
1136 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1137 }
1138
1139 JmpSrc jne()
1140 {
1141 m_formatter.twoByteOp(jccRel32(ConditionNE));
1142 return m_formatter.immediateRel32();
1143 }
1144
1145 JmpSrc jnz()
1146 {
1147 return jne();
1148 }
1149
1150 JmpSrc je()
1151 {
1152 m_formatter.twoByteOp(jccRel32(ConditionE));
1153 return m_formatter.immediateRel32();
1154 }
1155
1156 JmpSrc jl()
1157 {
1158 m_formatter.twoByteOp(jccRel32(ConditionL));
1159 return m_formatter.immediateRel32();
1160 }
1161
1162 JmpSrc jb()
1163 {
1164 m_formatter.twoByteOp(jccRel32(ConditionB));
1165 return m_formatter.immediateRel32();
1166 }
1167
1168 JmpSrc jle()
1169 {
1170 m_formatter.twoByteOp(jccRel32(ConditionLE));
1171 return m_formatter.immediateRel32();
1172 }
1173
1174 JmpSrc jbe()
1175 {
1176 m_formatter.twoByteOp(jccRel32(ConditionBE));
1177 return m_formatter.immediateRel32();
1178 }
1179
1180 JmpSrc jge()
1181 {
1182 m_formatter.twoByteOp(jccRel32(ConditionGE));
1183 return m_formatter.immediateRel32();
1184 }
1185
1186 JmpSrc jg()
1187 {
1188 m_formatter.twoByteOp(jccRel32(ConditionG));
1189 return m_formatter.immediateRel32();
1190 }
1191
1192 JmpSrc ja()
1193 {
1194 m_formatter.twoByteOp(jccRel32(ConditionA));
1195 return m_formatter.immediateRel32();
1196 }
1197
1198 JmpSrc jae()
1199 {
1200 m_formatter.twoByteOp(jccRel32(ConditionAE));
1201 return m_formatter.immediateRel32();
1202 }
1203
1204 JmpSrc jo()
1205 {
1206 m_formatter.twoByteOp(jccRel32(ConditionO));
1207 return m_formatter.immediateRel32();
1208 }
1209
1210 JmpSrc jp()
1211 {
1212 m_formatter.twoByteOp(jccRel32(ConditionP));
1213 return m_formatter.immediateRel32();
1214 }
1215
1216 JmpSrc js()
1217 {
1218 m_formatter.twoByteOp(jccRel32(ConditionS));
1219 return m_formatter.immediateRel32();
1220 }
1221
1222 JmpSrc jCC(Condition cond)
1223 {
1224 m_formatter.twoByteOp(jccRel32(cond));
1225 return m_formatter.immediateRel32();
1226 }
1227
1228 // SSE operations:
1229
1230 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1231 {
1232 m_formatter.prefix(PRE_SSE_F2);
1233 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1234 }
1235
1236 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1237 {
1238 m_formatter.prefix(PRE_SSE_F2);
1239 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1240 }
1241
1242 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1243 {
1244 m_formatter.prefix(PRE_SSE_F2);
1245 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1246 }
1247
1248 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1249 {
1250 m_formatter.prefix(PRE_SSE_F2);
1251 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1252 }
1253
1254 void movd_rr(XMMRegisterID src, RegisterID dst)
1255 {
1256 m_formatter.prefix(PRE_SSE_66);
1257 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1258 }
1259
1260#if PLATFORM(X86_64)
1261 void movq_rr(XMMRegisterID src, RegisterID dst)
1262 {
1263 m_formatter.prefix(PRE_SSE_66);
1264 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1265 }
1266
1267 void movq_rr(RegisterID src, XMMRegisterID dst)
1268 {
1269 m_formatter.prefix(PRE_SSE_66);
1270 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1271 }
1272#endif
1273
1274 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1275 {
1276 m_formatter.prefix(PRE_SSE_F2);
1277 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1278 }
1279
1280 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1281 {
1282 m_formatter.prefix(PRE_SSE_F2);
1283 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1284 }
1285
1286 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1287 {
1288 m_formatter.prefix(PRE_SSE_F2);
1289 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1290 }
1291
1292 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1293 {
1294 m_formatter.prefix(PRE_SSE_F2);
1295 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1296 }
1297
1298 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1299 {
1300 m_formatter.prefix(PRE_SSE_66);
1301 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1302 m_formatter.immediate8(whichWord);
1303 }
1304
1305 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1306 {
1307 m_formatter.prefix(PRE_SSE_F2);
1308 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1309 }
1310
1311 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1312 {
1313 m_formatter.prefix(PRE_SSE_F2);
1314 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1315 }
1316
1317 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1318 {
1319 m_formatter.prefix(PRE_SSE_66);
1320 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1321 }
1322
1323 // Misc instructions:
1324
1325 void int3()
1326 {
1327 m_formatter.oneByteOp(OP_INT3);
1328 }
1329
1330 void ret()
1331 {
1332 m_formatter.oneByteOp(OP_RET);
1333 }
1334
1335 void predictNotTaken()
1336 {
1337 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1338 }
1339
1340 // Assembler admin methods:
1341
1342 JmpDst label()
1343 {
1344 return JmpDst(m_formatter.size());
1345 }
1346
1347 JmpDst align(int alignment)
1348 {
1349 while (!m_formatter.isAligned(alignment))
1350 m_formatter.oneByteOp(OP_HLT);
1351
1352 return label();
1353 }
1354
1355 // Linking & patching:
1356 //
1357 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1358 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1359 // code has been finalized it is (platform support permitting) within a non-
1360 // writable region of memory; to modify the code in an execute-only execuable
1361 // pool the 'repatch' and 'relink' methods should be used.
1362
1363 void linkJump(JmpSrc from, JmpDst to)
1364 {
1365 ASSERT(from.m_offset != -1);
1366 ASSERT(to.m_offset != -1);
1367
1368 char* code = reinterpret_cast<char*>(m_formatter.data());
1369 patchRel32(code + from.m_offset, code + to.m_offset);
1370 }
1371
1372 static void linkJump(void* code, JmpSrc from, void* to)
1373 {
1374 ASSERT(from.m_offset != -1);
1375
1376 patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1377 }
1378
1379 static void linkCall(void* code, JmpSrc from, void* to)
1380 {
1381 ASSERT(from.m_offset != -1);
1382
1383 patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1384 }
1385
1386#if PLATFORM(X86_64)
1387 static void patchPointerForCall(void* where, void* value)
1388 {
1389 reinterpret_cast<void**>(where)[-1] = value;
1390 }
1391#endif
1392
1393 static void patchPointer(void* code, JmpDst where, void* value)
1394 {
1395 ASSERT(where.m_offset != -1);
1396
1397 patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1398 }
1399
1400 static void relinkJump(void* from, void* to)
1401 {
1402 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
1403 patchRel32(from, to);
1404 }
1405
1406 static void relinkCall(void* from, void* to)
1407 {
1408 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
1409 patchRel32(from, to);
1410 }
1411
1412 static void repatchInt32(void* where, int32_t value)
1413 {
1414 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(int32_t), sizeof(int32_t));
1415 patchInt32(where, value);
1416 }
1417
1418 static void repatchPointer(void* where, void* value)
1419 {
1420 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(void*), sizeof(void*));
1421 patchPointer(where, value);
1422 }
1423
1424 static void repatchLoadPtrToLEA(void* where)
1425 {
1426#if PLATFORM(X86_64)
1427 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1428 // Skip over the prefix byte.
1429 where = reinterpret_cast<char*>(where) + 1;
1430#endif
1431 ExecutableAllocator::MakeWritable unprotect(where, 1);
1432 *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
1433 }
1434
1435 static unsigned getCallReturnOffset(JmpSrc call)
1436 {
1437 ASSERT(call.m_offset >= 0);
1438 return call.m_offset;
1439 }
1440
1441 static void* getRelocatedAddress(void* code, JmpSrc jump)
1442 {
1443 ASSERT(jump.m_offset != -1);
1444
1445 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1446 }
1447
1448 static void* getRelocatedAddress(void* code, JmpDst destination)
1449 {
1450 ASSERT(destination.m_offset != -1);
1451
1452 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1453 }
1454
1455 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1456 {
1457 return dst.m_offset - src.m_offset;
1458 }
1459
1460 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1461 {
1462 return dst.m_offset - src.m_offset;
1463 }
1464
1465 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1466 {
1467 return dst.m_offset - src.m_offset;
1468 }
1469
1470 void* executableCopy(ExecutablePool* allocator)
1471 {
1472 void* copy = m_formatter.executableCopy(allocator);
1473 ASSERT(copy);
1474 return copy;
1475 }
1476
1477private:
1478
1479 static void patchPointer(void* where, void* value)
1480 {
1481 reinterpret_cast<void**>(where)[-1] = value;
1482 }
1483
1484 static void patchInt32(void* where, int32_t value)
1485 {
1486 reinterpret_cast<int32_t*>(where)[-1] = value;
1487 }
1488
1489 static void patchRel32(void* from, void* to)
1490 {
1491 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1492 ASSERT(offset == static_cast<int32_t>(offset));
1493
1494 patchInt32(from, offset);
1495 }
1496
1497 class X86InstructionFormatter {
1498
1499 static const int maxInstructionSize = 16;
1500
1501 public:
1502
1503 // Legacy prefix bytes:
1504 //
1505 // These are emmitted prior to the instruction.
1506
1507 void prefix(OneByteOpcodeID pre)
1508 {
1509 m_buffer.putByte(pre);
1510 }
1511
1512 // Word-sized operands / no operand instruction formatters.
1513 //
1514 // In addition to the opcode, the following operand permutations are supported:
1515 // * None - instruction takes no operands.
1516 // * One register - the low three bits of the RegisterID are added into the opcode.
1517 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1518 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1519 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1520 //
1521 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1522 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1523 //
1524 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1525
1526 void oneByteOp(OneByteOpcodeID opcode)
1527 {
1528 m_buffer.ensureSpace(maxInstructionSize);
1529 m_buffer.putByteUnchecked(opcode);
1530 }
1531
1532 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1533 {
1534 m_buffer.ensureSpace(maxInstructionSize);
1535 emitRexIfNeeded(0, 0, reg);
1536 m_buffer.putByteUnchecked(opcode + (reg & 7));
1537 }
1538
1539 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1540 {
1541 m_buffer.ensureSpace(maxInstructionSize);
1542 emitRexIfNeeded(reg, 0, rm);
1543 m_buffer.putByteUnchecked(opcode);
1544 registerModRM(reg, rm);
1545 }
1546
1547 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1548 {
1549 m_buffer.ensureSpace(maxInstructionSize);
1550 emitRexIfNeeded(reg, 0, base);
1551 m_buffer.putByteUnchecked(opcode);
1552 memoryModRM(reg, base, offset);
1553 }
1554
1555 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1556 {
1557 m_buffer.ensureSpace(maxInstructionSize);
1558 emitRexIfNeeded(reg, 0, base);
1559 m_buffer.putByteUnchecked(opcode);
1560 memoryModRM_disp32(reg, base, offset);
1561 }
1562
1563 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1564 {
1565 m_buffer.ensureSpace(maxInstructionSize);
1566 emitRexIfNeeded(reg, index, base);
1567 m_buffer.putByteUnchecked(opcode);
1568 memoryModRM(reg, base, index, scale, offset);
1569 }
1570
1571#if !PLATFORM(X86_64)
1572 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1573 {
1574 m_buffer.ensureSpace(maxInstructionSize);
1575 m_buffer.putByteUnchecked(opcode);
1576 memoryModRM(reg, address);
1577 }
1578#endif
1579
1580 void twoByteOp(TwoByteOpcodeID opcode)
1581 {
1582 m_buffer.ensureSpace(maxInstructionSize);
1583 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1584 m_buffer.putByteUnchecked(opcode);
1585 }
1586
1587 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1588 {
1589 m_buffer.ensureSpace(maxInstructionSize);
1590 emitRexIfNeeded(reg, 0, rm);
1591 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1592 m_buffer.putByteUnchecked(opcode);
1593 registerModRM(reg, rm);
1594 }
1595
1596 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1597 {
1598 m_buffer.ensureSpace(maxInstructionSize);
1599 emitRexIfNeeded(reg, 0, base);
1600 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1601 m_buffer.putByteUnchecked(opcode);
1602 memoryModRM(reg, base, offset);
1603 }
1604
1605 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1606 {
1607 m_buffer.ensureSpace(maxInstructionSize);
1608 emitRexIfNeeded(reg, index, base);
1609 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1610 m_buffer.putByteUnchecked(opcode);
1611 memoryModRM(reg, base, index, scale, offset);
1612 }
1613
1614#if PLATFORM(X86_64)
1615 // Quad-word-sized operands:
1616 //
1617 // Used to format 64-bit operantions, planting a REX.w prefix.
1618 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1619 // the normal (non-'64'-postfixed) formatters should be used.
1620
1621 void oneByteOp64(OneByteOpcodeID opcode)
1622 {
1623 m_buffer.ensureSpace(maxInstructionSize);
1624 emitRexW(0, 0, 0);
1625 m_buffer.putByteUnchecked(opcode);
1626 }
1627
1628 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1629 {
1630 m_buffer.ensureSpace(maxInstructionSize);
1631 emitRexW(0, 0, reg);
1632 m_buffer.putByteUnchecked(opcode + (reg & 7));
1633 }
1634
1635 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1636 {
1637 m_buffer.ensureSpace(maxInstructionSize);
1638 emitRexW(reg, 0, rm);
1639 m_buffer.putByteUnchecked(opcode);
1640 registerModRM(reg, rm);
1641 }
1642
1643 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1644 {
1645 m_buffer.ensureSpace(maxInstructionSize);
1646 emitRexW(reg, 0, base);
1647 m_buffer.putByteUnchecked(opcode);
1648 memoryModRM(reg, base, offset);
1649 }
1650
1651 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1652 {
1653 m_buffer.ensureSpace(maxInstructionSize);
1654 emitRexW(reg, 0, base);
1655 m_buffer.putByteUnchecked(opcode);
1656 memoryModRM_disp32(reg, base, offset);
1657 }
1658
1659 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1660 {
1661 m_buffer.ensureSpace(maxInstructionSize);
1662 emitRexW(reg, index, base);
1663 m_buffer.putByteUnchecked(opcode);
1664 memoryModRM(reg, base, index, scale, offset);
1665 }
1666
1667 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1668 {
1669 m_buffer.ensureSpace(maxInstructionSize);
1670 emitRexW(reg, 0, rm);
1671 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1672 m_buffer.putByteUnchecked(opcode);
1673 registerModRM(reg, rm);
1674 }
1675#endif
1676
1677 // Byte-operands:
1678 //
1679 // These methods format byte operations. Byte operations differ from the normal
1680 // formatters in the circumstances under which they will decide to emit REX prefixes.
1681 // These should be used where any register operand signifies a byte register.
1682 //
1683 // The disctinction is due to the handling of register numbers in the range 4..7 on
1684 // x86-64. These register numbers may either represent the second byte of the first
1685 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1686 //
1687 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1688 // be accessed where a REX prefix is present), these are likely best treated as
1689 // deprecated. In order to ensure the correct registers spl..dil are selected a
1690 // REX prefix will be emitted for any byte register operand in the range 4..15.
1691 //
1692 // These formatters may be used in instructions where a mix of operand sizes, in which
1693 // case an unnecessary REX will be emitted, for example:
1694 // movzbl %al, %edi
1695 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1696 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1697 // be silently ignored by the processor.
1698 //
1699 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1700 // is provided to check byte register operands.
1701
1702 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1703 {
1704 m_buffer.ensureSpace(maxInstructionSize);
1705 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1706 m_buffer.putByteUnchecked(opcode);
1707 registerModRM(groupOp, rm);
1708 }
1709
1710 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1711 {
1712 m_buffer.ensureSpace(maxInstructionSize);
1713 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1714 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1715 m_buffer.putByteUnchecked(opcode);
1716 registerModRM(reg, rm);
1717 }
1718
1719 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1720 {
1721 m_buffer.ensureSpace(maxInstructionSize);
1722 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1723 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1724 m_buffer.putByteUnchecked(opcode);
1725 registerModRM(groupOp, rm);
1726 }
1727
1728 // Immediates:
1729 //
1730 // An immedaite should be appended where appropriate after an op has been emitted.
1731 // The writes are unchecked since the opcode formatters above will have ensured space.
1732
1733 void immediate8(int imm)
1734 {
1735 m_buffer.putByteUnchecked(imm);
1736 }
1737
1738 void immediate16(int imm)
1739 {
1740 m_buffer.putShortUnchecked(imm);
1741 }
1742
1743 void immediate32(int imm)
1744 {
1745 m_buffer.putIntUnchecked(imm);
1746 }
1747
1748 void immediate64(int64_t imm)
1749 {
1750 m_buffer.putInt64Unchecked(imm);
1751 }
1752
1753 JmpSrc immediateRel32()
1754 {
1755 m_buffer.putIntUnchecked(0);
1756 return JmpSrc(m_buffer.size());
1757 }
1758
1759 // Administrative methods:
1760
1761 size_t size() const { return m_buffer.size(); }
1762 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1763 void* data() const { return m_buffer.data(); }
1764 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1765
1766 private:
1767
1768 // Internals; ModRm and REX formatters.
1769
1770 static const RegisterID noBase = X86::ebp;
1771 static const RegisterID hasSib = X86::esp;
1772 static const RegisterID noIndex = X86::esp;
1773#if PLATFORM(X86_64)
1774 static const RegisterID noBase2 = X86::r13;
1775 static const RegisterID hasSib2 = X86::r12;
1776
1777 // Registers r8 & above require a REX prefixe.
1778 inline bool regRequiresRex(int reg)
1779 {
1780 return (reg >= X86::r8);
1781 }
1782
1783 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1784 inline bool byteRegRequiresRex(int reg)
1785 {
1786 return (reg >= X86::esp);
1787 }
1788
1789 // Format a REX prefix byte.
1790 inline void emitRex(bool w, int r, int x, int b)
1791 {
1792 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1793 }
1794
1795 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1796 inline void emitRexW(int r, int x, int b)
1797 {
1798 emitRex(true, r, x, b);
1799 }
1800
1801 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1802 // regRequiresRex() to check other registers (i.e. address base & index).
1803 inline void emitRexIf(bool condition, int r, int x, int b)
1804 {
1805 if (condition) emitRex(false, r, x, b);
1806 }
1807
1808 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1809 inline void emitRexIfNeeded(int r, int x, int b)
1810 {
1811 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1812 }
1813#else
1814 // No REX prefix bytes on 32-bit x86.
1815 inline bool regRequiresRex(int) { return false; }
1816 inline bool byteRegRequiresRex(int) { return false; }
1817 inline void emitRexIf(bool, int, int, int) {}
1818 inline void emitRexIfNeeded(int, int, int) {}
1819#endif
1820
1821 enum ModRmMode {
1822 ModRmMemoryNoDisp,
1823 ModRmMemoryDisp8,
1824 ModRmMemoryDisp32,
1825 ModRmRegister,
1826 };
1827
1828 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1829 {
1830 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1831 }
1832
1833 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1834 {
1835 ASSERT(mode != ModRmRegister);
1836
1837 putModRm(mode, reg, hasSib);
1838 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1839 }
1840
1841 void registerModRM(int reg, RegisterID rm)
1842 {
1843 putModRm(ModRmRegister, reg, rm);
1844 }
1845
1846 void memoryModRM(int reg, RegisterID base, int offset)
1847 {
1848 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1849#if PLATFORM(X86_64)
1850 if ((base == hasSib) || (base == hasSib2)) {
1851#else
1852 if (base == hasSib) {
1853#endif
1854 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1855 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1856 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1857 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1858 m_buffer.putByteUnchecked(offset);
1859 } else {
1860 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1861 m_buffer.putIntUnchecked(offset);
1862 }
1863 } else {
1864#if PLATFORM(X86_64)
1865 if (!offset && (base != noBase) && (base != noBase2))
1866#else
1867 if (!offset && (base != noBase))
1868#endif
1869 putModRm(ModRmMemoryNoDisp, reg, base);
1870 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1871 putModRm(ModRmMemoryDisp8, reg, base);
1872 m_buffer.putByteUnchecked(offset);
1873 } else {
1874 putModRm(ModRmMemoryDisp32, reg, base);
1875 m_buffer.putIntUnchecked(offset);
1876 }
1877 }
1878 }
1879
1880 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1881 {
1882 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1883#if PLATFORM(X86_64)
1884 if ((base == hasSib) || (base == hasSib2)) {
1885#else
1886 if (base == hasSib) {
1887#endif
1888 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1889 m_buffer.putIntUnchecked(offset);
1890 } else {
1891 putModRm(ModRmMemoryDisp32, reg, base);
1892 m_buffer.putIntUnchecked(offset);
1893 }
1894 }
1895
1896 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1897 {
1898 ASSERT(index != noIndex);
1899
1900#if PLATFORM(X86_64)
1901 if (!offset && (base != noBase) && (base != noBase2))
1902#else
1903 if (!offset && (base != noBase))
1904#endif
1905 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1906 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1907 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1908 m_buffer.putByteUnchecked(offset);
1909 } else {
1910 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1911 m_buffer.putIntUnchecked(offset);
1912 }
1913 }
1914
1915#if !PLATFORM(X86_64)
1916 void memoryModRM(int reg, void* address)
1917 {
1918 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1919 putModRm(ModRmMemoryNoDisp, reg, noBase);
1920 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1921 }
1922#endif
1923
1924 AssemblerBuffer m_buffer;
1925 } m_formatter;
1926};
1927
1928} // namespace JSC
1929
1930#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1931
1932#endif // X86Assembler_h
Note: See TracBrowser for help on using the repository browser.