source: webkit/trunk/JavaScriptCore/assembler/X86Assembler.h@ 43619

Last change on this file since 43619 was 43619, checked in by [email protected], 16 years ago

2009-05-12 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Add SamplingCounter tool to provide a simple mechanism for counting events in JSC
(enabled using ENABLE(SAMPLING_COUNTERS)). To count events within a single function
use the class 'SamplingCounter', where the counter may be incremented from multiple
functions 'GlobalSamplingCounter' may be convenient; all other counters (stack or
heap allocated, rather than statically declared) should use the DeletableSamplingCounter.
Further description of these classes is provided alongside their definition in
SamplingTool.h.

Counters may be incremented from c++ by calling the 'count()' method on the counter,
or may be incremented by JIT code by using the 'emitCount()' method within the JIT.

This patch also fixes CODEBLOCK_SAMPLING, which was missing a null pointer check.

  • JavaScriptCore.exp:
  • assembler/MacroAssemblerX86.h: (JSC::MacroAssemblerX86::addWithCarry32): (JSC::MacroAssemblerX86::and32): (JSC::MacroAssemblerX86::or32):
  • assembler/MacroAssemblerX86Common.h: (JSC::MacroAssemblerX86Common::and32): (JSC::MacroAssemblerX86Common::or32):
  • assembler/MacroAssemblerX86_64.h: (JSC::MacroAssemblerX86_64::and32): (JSC::MacroAssemblerX86_64::or32): (JSC::MacroAssemblerX86_64::addPtr):
  • assembler/X86Assembler.h: (JSC::X86Assembler::): (JSC::X86Assembler::adcl_im): (JSC::X86Assembler::addq_im): (JSC::X86Assembler::andl_im): (JSC::X86Assembler::orl_im):
  • bytecode/SamplingTool.cpp: (JSC::AbstractSamplingCounter::dump):
  • bytecode/SamplingTool.h: (JSC::AbstractSamplingCounter::count): (JSC::GlobalSamplingCounter::name): (JSC::SamplingCounter::SamplingCounter):
  • jit/JIT.h:
  • jit/JITCall.cpp: (JSC::):
  • jit/JITInlineMethods.h: (JSC::JIT::setSamplingFlag): (JSC::JIT::clearSamplingFlag): (JSC::JIT::emitCount):
  • jsc.cpp: (runWithScripts):
  • parser/Nodes.cpp: (JSC::ScopeNode::ScopeNode):
  • wtf/Platform.h:
File size: 57.5 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44
45#define REPTACH_OFFSET_CALL_R11 3
46#endif
47
48namespace X86 {
49 typedef enum {
50 eax,
51 ecx,
52 edx,
53 ebx,
54 esp,
55 ebp,
56 esi,
57 edi,
58
59#if PLATFORM(X86_64)
60 r8,
61 r9,
62 r10,
63 r11,
64 r12,
65 r13,
66 r14,
67 r15,
68#endif
69 } RegisterID;
70
71 typedef enum {
72 xmm0,
73 xmm1,
74 xmm2,
75 xmm3,
76 xmm4,
77 xmm5,
78 xmm6,
79 xmm7,
80 } XMMRegisterID;
81}
82
83class X86Assembler {
84public:
85 typedef X86::RegisterID RegisterID;
86 typedef X86::XMMRegisterID XMMRegisterID;
87
88 typedef enum {
89 ConditionO,
90 ConditionNO,
91 ConditionB,
92 ConditionAE,
93 ConditionE,
94 ConditionNE,
95 ConditionBE,
96 ConditionA,
97 ConditionS,
98 ConditionNS,
99 ConditionP,
100 ConditionNP,
101 ConditionL,
102 ConditionGE,
103 ConditionLE,
104 ConditionG,
105
106 ConditionC = ConditionB,
107 ConditionNC = ConditionAE,
108 } Condition;
109
110private:
111 typedef enum {
112 OP_ADD_EvGv = 0x01,
113 OP_ADD_GvEv = 0x03,
114 OP_OR_EvGv = 0x09,
115 OP_OR_GvEv = 0x0B,
116 OP_2BYTE_ESCAPE = 0x0F,
117 OP_AND_EvGv = 0x21,
118 OP_SUB_EvGv = 0x29,
119 OP_SUB_GvEv = 0x2B,
120 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
121 OP_XOR_EvGv = 0x31,
122 OP_CMP_EvGv = 0x39,
123 OP_CMP_GvEv = 0x3B,
124#if PLATFORM(X86_64)
125 PRE_REX = 0x40,
126#endif
127 OP_PUSH_EAX = 0x50,
128 OP_POP_EAX = 0x58,
129#if PLATFORM(X86_64)
130 OP_MOVSXD_GvEv = 0x63,
131#endif
132 PRE_OPERAND_SIZE = 0x66,
133 PRE_SSE_66 = 0x66,
134 OP_PUSH_Iz = 0x68,
135 OP_IMUL_GvEvIz = 0x69,
136 OP_GROUP1_EvIz = 0x81,
137 OP_GROUP1_EvIb = 0x83,
138 OP_TEST_EvGv = 0x85,
139 OP_XCHG_EvGv = 0x87,
140 OP_MOV_EvGv = 0x89,
141 OP_MOV_GvEv = 0x8B,
142 OP_LEA = 0x8D,
143 OP_GROUP1A_Ev = 0x8F,
144 OP_CDQ = 0x99,
145 OP_MOV_EAXOv = 0xA1,
146 OP_MOV_OvEAX = 0xA3,
147 OP_MOV_EAXIv = 0xB8,
148 OP_GROUP2_EvIb = 0xC1,
149 OP_RET = 0xC3,
150 OP_GROUP11_EvIz = 0xC7,
151 OP_INT3 = 0xCC,
152 OP_GROUP2_Ev1 = 0xD1,
153 OP_GROUP2_EvCL = 0xD3,
154 OP_CALL_rel32 = 0xE8,
155 OP_JMP_rel32 = 0xE9,
156 PRE_SSE_F2 = 0xF2,
157 OP_HLT = 0xF4,
158 OP_GROUP3_EbIb = 0xF6,
159 OP_GROUP3_Ev = 0xF7,
160 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
161 OP_GROUP5_Ev = 0xFF,
162 } OneByteOpcodeID;
163
164 typedef enum {
165 OP2_MOVSD_VsdWsd = 0x10,
166 OP2_MOVSD_WsdVsd = 0x11,
167 OP2_CVTSI2SD_VsdEd = 0x2A,
168 OP2_CVTTSD2SI_GdWsd = 0x2C,
169 OP2_UCOMISD_VsdWsd = 0x2E,
170 OP2_ADDSD_VsdWsd = 0x58,
171 OP2_MULSD_VsdWsd = 0x59,
172 OP2_SUBSD_VsdWsd = 0x5C,
173 OP2_MOVD_VdEd = 0x6E,
174 OP2_MOVD_EdVd = 0x7E,
175 OP2_JCC_rel32 = 0x80,
176 OP_SETCC = 0x90,
177 OP2_IMUL_GvEv = 0xAF,
178 OP2_MOVZX_GvEb = 0xB6,
179 OP2_MOVZX_GvEw = 0xB7,
180 OP2_PEXTRW_GdUdIb = 0xC5,
181 } TwoByteOpcodeID;
182
183 TwoByteOpcodeID jccRel32(Condition cond)
184 {
185 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
186 }
187
188 TwoByteOpcodeID setccOpcode(Condition cond)
189 {
190 return (TwoByteOpcodeID)(OP_SETCC + cond);
191 }
192
193 typedef enum {
194 GROUP1_OP_ADD = 0,
195 GROUP1_OP_OR = 1,
196 GROUP1_OP_ADC = 2,
197 GROUP1_OP_AND = 4,
198 GROUP1_OP_SUB = 5,
199 GROUP1_OP_XOR = 6,
200 GROUP1_OP_CMP = 7,
201
202 GROUP1A_OP_POP = 0,
203
204 GROUP2_OP_SHL = 4,
205 GROUP2_OP_SAR = 7,
206
207 GROUP3_OP_TEST = 0,
208 GROUP3_OP_NOT = 2,
209 GROUP3_OP_IDIV = 7,
210
211 GROUP5_OP_CALLN = 2,
212 GROUP5_OP_JMPN = 4,
213 GROUP5_OP_PUSH = 6,
214
215 GROUP11_MOV = 0,
216 } GroupOpcodeID;
217
218 class X86InstructionFormatter;
219public:
220
221 class JmpSrc {
222 friend class X86Assembler;
223 friend class X86InstructionFormatter;
224 public:
225 JmpSrc()
226 : m_offset(-1)
227 {
228 }
229
230 private:
231 JmpSrc(int offset)
232 : m_offset(offset)
233 {
234 }
235
236 int m_offset;
237 };
238
239 class JmpDst {
240 friend class X86Assembler;
241 friend class X86InstructionFormatter;
242 public:
243 JmpDst()
244 : m_offset(-1)
245 , m_used(false)
246 {
247 }
248
249 bool isUsed() const { return m_used; }
250 void used() { m_used = true; }
251 private:
252 JmpDst(int offset)
253 : m_offset(offset)
254 , m_used(false)
255 {
256 ASSERT(m_offset == offset);
257 }
258
259 int m_offset : 31;
260 bool m_used : 1;
261 };
262
263 X86Assembler()
264 {
265 }
266
267 size_t size() const { return m_formatter.size(); }
268
269 // Stack operations:
270
271 void push_r(RegisterID reg)
272 {
273 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
274 }
275
276 void pop_r(RegisterID reg)
277 {
278 m_formatter.oneByteOp(OP_POP_EAX, reg);
279 }
280
281 void push_i32(int imm)
282 {
283 m_formatter.oneByteOp(OP_PUSH_Iz);
284 m_formatter.immediate32(imm);
285 }
286
287 void push_m(int offset, RegisterID base)
288 {
289 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
290 }
291
292 void pop_m(int offset, RegisterID base)
293 {
294 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
295 }
296
297 // Arithmetic operations:
298
299#if !PLATFORM(X86_64)
300 void adcl_im(int imm, void* addr)
301 {
302 if (CAN_SIGN_EXTEND_8_32(imm)) {
303 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
304 m_formatter.immediate8(imm);
305 } else {
306 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
307 m_formatter.immediate32(imm);
308 }
309 }
310#endif
311
312 void addl_rr(RegisterID src, RegisterID dst)
313 {
314 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
315 }
316
317 void addl_mr(int offset, RegisterID base, RegisterID dst)
318 {
319 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
320 }
321
322 void addl_ir(int imm, RegisterID dst)
323 {
324 if (CAN_SIGN_EXTEND_8_32(imm)) {
325 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
326 m_formatter.immediate8(imm);
327 } else {
328 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
329 m_formatter.immediate32(imm);
330 }
331 }
332
333 void addl_im(int imm, int offset, RegisterID base)
334 {
335 if (CAN_SIGN_EXTEND_8_32(imm)) {
336 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
337 m_formatter.immediate8(imm);
338 } else {
339 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
340 m_formatter.immediate32(imm);
341 }
342 }
343
344#if PLATFORM(X86_64)
345 void addq_rr(RegisterID src, RegisterID dst)
346 {
347 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
348 }
349
350 void addq_ir(int imm, RegisterID dst)
351 {
352 if (CAN_SIGN_EXTEND_8_32(imm)) {
353 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
354 m_formatter.immediate8(imm);
355 } else {
356 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
357 m_formatter.immediate32(imm);
358 }
359 }
360
361 void addq_im(int imm, int offset, RegisterID base)
362 {
363 if (CAN_SIGN_EXTEND_8_32(imm)) {
364 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
365 m_formatter.immediate8(imm);
366 } else {
367 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
368 m_formatter.immediate32(imm);
369 }
370 }
371#else
372 void addl_im(int imm, void* addr)
373 {
374 if (CAN_SIGN_EXTEND_8_32(imm)) {
375 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
376 m_formatter.immediate8(imm);
377 } else {
378 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
379 m_formatter.immediate32(imm);
380 }
381 }
382#endif
383
384 void andl_rr(RegisterID src, RegisterID dst)
385 {
386 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
387 }
388
389 void andl_ir(int imm, RegisterID dst)
390 {
391 if (CAN_SIGN_EXTEND_8_32(imm)) {
392 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
393 m_formatter.immediate8(imm);
394 } else {
395 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
396 m_formatter.immediate32(imm);
397 }
398 }
399
400 void andl_im(int imm, int offset, RegisterID base)
401 {
402 if (CAN_SIGN_EXTEND_8_32(imm)) {
403 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
404 m_formatter.immediate8(imm);
405 } else {
406 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
407 m_formatter.immediate32(imm);
408 }
409 }
410
411#if PLATFORM(X86_64)
412 void andq_rr(RegisterID src, RegisterID dst)
413 {
414 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
415 }
416
417 void andq_ir(int imm, RegisterID dst)
418 {
419 if (CAN_SIGN_EXTEND_8_32(imm)) {
420 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
421 m_formatter.immediate8(imm);
422 } else {
423 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
424 m_formatter.immediate32(imm);
425 }
426 }
427#else
428 void andl_im(int imm, void* addr)
429 {
430 if (CAN_SIGN_EXTEND_8_32(imm)) {
431 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
432 m_formatter.immediate8(imm);
433 } else {
434 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
435 m_formatter.immediate32(imm);
436 }
437 }
438#endif
439
440 void notl_r(RegisterID dst)
441 {
442 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
443 }
444
445 void orl_rr(RegisterID src, RegisterID dst)
446 {
447 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
448 }
449
450 void orl_mr(int offset, RegisterID base, RegisterID dst)
451 {
452 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
453 }
454
455 void orl_ir(int imm, RegisterID dst)
456 {
457 if (CAN_SIGN_EXTEND_8_32(imm)) {
458 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
459 m_formatter.immediate8(imm);
460 } else {
461 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
462 m_formatter.immediate32(imm);
463 }
464 }
465
466 void orl_im(int imm, int offset, RegisterID base)
467 {
468 if (CAN_SIGN_EXTEND_8_32(imm)) {
469 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
470 m_formatter.immediate8(imm);
471 } else {
472 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
473 m_formatter.immediate32(imm);
474 }
475 }
476
477#if PLATFORM(X86_64)
478 void orq_rr(RegisterID src, RegisterID dst)
479 {
480 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
481 }
482
483 void orq_ir(int imm, RegisterID dst)
484 {
485 if (CAN_SIGN_EXTEND_8_32(imm)) {
486 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
487 m_formatter.immediate8(imm);
488 } else {
489 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
490 m_formatter.immediate32(imm);
491 }
492 }
493#else
494 void orl_im(int imm, void* addr)
495 {
496 if (CAN_SIGN_EXTEND_8_32(imm)) {
497 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
498 m_formatter.immediate8(imm);
499 } else {
500 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
501 m_formatter.immediate32(imm);
502 }
503 }
504#endif
505
506 void subl_rr(RegisterID src, RegisterID dst)
507 {
508 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
509 }
510
511 void subl_mr(int offset, RegisterID base, RegisterID dst)
512 {
513 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
514 }
515
516 void subl_ir(int imm, RegisterID dst)
517 {
518 if (CAN_SIGN_EXTEND_8_32(imm)) {
519 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
520 m_formatter.immediate8(imm);
521 } else {
522 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
523 m_formatter.immediate32(imm);
524 }
525 }
526
527 void subl_im(int imm, int offset, RegisterID base)
528 {
529 if (CAN_SIGN_EXTEND_8_32(imm)) {
530 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
531 m_formatter.immediate8(imm);
532 } else {
533 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
534 m_formatter.immediate32(imm);
535 }
536 }
537
538#if PLATFORM(X86_64)
539 void subq_rr(RegisterID src, RegisterID dst)
540 {
541 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
542 }
543
544 void subq_ir(int imm, RegisterID dst)
545 {
546 if (CAN_SIGN_EXTEND_8_32(imm)) {
547 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
548 m_formatter.immediate8(imm);
549 } else {
550 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
551 m_formatter.immediate32(imm);
552 }
553 }
554#else
555 void subl_im(int imm, void* addr)
556 {
557 if (CAN_SIGN_EXTEND_8_32(imm)) {
558 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
559 m_formatter.immediate8(imm);
560 } else {
561 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
562 m_formatter.immediate32(imm);
563 }
564 }
565#endif
566
567 void xorl_rr(RegisterID src, RegisterID dst)
568 {
569 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
570 }
571
572 void xorl_ir(int imm, RegisterID dst)
573 {
574 if (CAN_SIGN_EXTEND_8_32(imm)) {
575 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
576 m_formatter.immediate8(imm);
577 } else {
578 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
579 m_formatter.immediate32(imm);
580 }
581 }
582
583#if PLATFORM(X86_64)
584 void xorq_rr(RegisterID src, RegisterID dst)
585 {
586 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
587 }
588
589 void xorq_ir(int imm, RegisterID dst)
590 {
591 if (CAN_SIGN_EXTEND_8_32(imm)) {
592 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
593 m_formatter.immediate8(imm);
594 } else {
595 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
596 m_formatter.immediate32(imm);
597 }
598 }
599#endif
600
601 void sarl_i8r(int imm, RegisterID dst)
602 {
603 if (imm == 1)
604 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
605 else {
606 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
607 m_formatter.immediate8(imm);
608 }
609 }
610
611 void sarl_CLr(RegisterID dst)
612 {
613 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
614 }
615
616 void shll_i8r(int imm, RegisterID dst)
617 {
618 if (imm == 1)
619 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
620 else {
621 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
622 m_formatter.immediate8(imm);
623 }
624 }
625
626 void shll_CLr(RegisterID dst)
627 {
628 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
629 }
630
631#if PLATFORM(X86_64)
632 void sarq_CLr(RegisterID dst)
633 {
634 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
635 }
636
637 void sarq_i8r(int imm, RegisterID dst)
638 {
639 if (imm == 1)
640 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
641 else {
642 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
643 m_formatter.immediate8(imm);
644 }
645 }
646#endif
647
648 void imull_rr(RegisterID src, RegisterID dst)
649 {
650 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
651 }
652
653 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
654 {
655 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
656 m_formatter.immediate32(value);
657 }
658
659 void idivl_r(RegisterID dst)
660 {
661 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
662 }
663
664 // Comparisons:
665
666 void cmpl_rr(RegisterID src, RegisterID dst)
667 {
668 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
669 }
670
671 void cmpl_rm(RegisterID src, int offset, RegisterID base)
672 {
673 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
674 }
675
676 void cmpl_mr(int offset, RegisterID base, RegisterID src)
677 {
678 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
679 }
680
681 void cmpl_ir(int imm, RegisterID dst)
682 {
683 if (CAN_SIGN_EXTEND_8_32(imm)) {
684 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
685 m_formatter.immediate8(imm);
686 } else {
687 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
688 m_formatter.immediate32(imm);
689 }
690 }
691
692 void cmpl_ir_force32(int imm, RegisterID dst)
693 {
694 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
695 m_formatter.immediate32(imm);
696 }
697
698 void cmpl_im(int imm, int offset, RegisterID base)
699 {
700 if (CAN_SIGN_EXTEND_8_32(imm)) {
701 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
702 m_formatter.immediate8(imm);
703 } else {
704 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
705 m_formatter.immediate32(imm);
706 }
707 }
708
709 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
710 {
711 if (CAN_SIGN_EXTEND_8_32(imm)) {
712 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
713 m_formatter.immediate8(imm);
714 } else {
715 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
716 m_formatter.immediate32(imm);
717 }
718 }
719
720 void cmpl_im_force32(int imm, int offset, RegisterID base)
721 {
722 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
723 m_formatter.immediate32(imm);
724 }
725
726#if PLATFORM(X86_64)
727 void cmpq_rr(RegisterID src, RegisterID dst)
728 {
729 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
730 }
731
732 void cmpq_rm(RegisterID src, int offset, RegisterID base)
733 {
734 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
735 }
736
737 void cmpq_mr(int offset, RegisterID base, RegisterID src)
738 {
739 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
740 }
741
742 void cmpq_ir(int imm, RegisterID dst)
743 {
744 if (CAN_SIGN_EXTEND_8_32(imm)) {
745 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
746 m_formatter.immediate8(imm);
747 } else {
748 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
749 m_formatter.immediate32(imm);
750 }
751 }
752
753 void cmpq_im(int imm, int offset, RegisterID base)
754 {
755 if (CAN_SIGN_EXTEND_8_32(imm)) {
756 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
757 m_formatter.immediate8(imm);
758 } else {
759 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
760 m_formatter.immediate32(imm);
761 }
762 }
763
764 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
765 {
766 if (CAN_SIGN_EXTEND_8_32(imm)) {
767 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
768 m_formatter.immediate8(imm);
769 } else {
770 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
771 m_formatter.immediate32(imm);
772 }
773 }
774#else
775 void cmpl_rm(RegisterID reg, void* addr)
776 {
777 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
778 }
779
780 void cmpl_im(int imm, void* addr)
781 {
782 if (CAN_SIGN_EXTEND_8_32(imm)) {
783 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
784 m_formatter.immediate8(imm);
785 } else {
786 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
787 m_formatter.immediate32(imm);
788 }
789 }
790#endif
791
792 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
793 {
794 m_formatter.prefix(PRE_OPERAND_SIZE);
795 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
796 }
797
798 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
799 {
800 if (CAN_SIGN_EXTEND_8_32(imm)) {
801 m_formatter.prefix(PRE_OPERAND_SIZE);
802 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
803 m_formatter.immediate8(imm);
804 } else {
805 m_formatter.prefix(PRE_OPERAND_SIZE);
806 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
807 m_formatter.immediate16(imm);
808 }
809 }
810
811 void testl_rr(RegisterID src, RegisterID dst)
812 {
813 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
814 }
815
816 void testl_i32r(int imm, RegisterID dst)
817 {
818 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
819 m_formatter.immediate32(imm);
820 }
821
822 void testl_i32m(int imm, int offset, RegisterID base)
823 {
824 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
825 m_formatter.immediate32(imm);
826 }
827
828 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
829 {
830 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
831 m_formatter.immediate32(imm);
832 }
833
834#if PLATFORM(X86_64)
835 void testq_rr(RegisterID src, RegisterID dst)
836 {
837 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
838 }
839
840 void testq_i32r(int imm, RegisterID dst)
841 {
842 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
843 m_formatter.immediate32(imm);
844 }
845
846 void testq_i32m(int imm, int offset, RegisterID base)
847 {
848 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
849 m_formatter.immediate32(imm);
850 }
851
852 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
853 {
854 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
855 m_formatter.immediate32(imm);
856 }
857#endif
858
859 void testw_rr(RegisterID src, RegisterID dst)
860 {
861 m_formatter.prefix(PRE_OPERAND_SIZE);
862 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
863 }
864
865 void testb_i8r(int imm, RegisterID dst)
866 {
867 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
868 m_formatter.immediate8(imm);
869 }
870
871 void setCC_r(Condition cond, RegisterID dst)
872 {
873 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
874 }
875
876 void sete_r(RegisterID dst)
877 {
878 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
879 }
880
881 void setz_r(RegisterID dst)
882 {
883 sete_r(dst);
884 }
885
886 void setne_r(RegisterID dst)
887 {
888 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
889 }
890
891 void setnz_r(RegisterID dst)
892 {
893 setne_r(dst);
894 }
895
896 // Various move ops:
897
898 void cdq()
899 {
900 m_formatter.oneByteOp(OP_CDQ);
901 }
902
903 void xchgl_rr(RegisterID src, RegisterID dst)
904 {
905 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
906 }
907
908#if PLATFORM(X86_64)
909 void xchgq_rr(RegisterID src, RegisterID dst)
910 {
911 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
912 }
913#endif
914
915 void movl_rr(RegisterID src, RegisterID dst)
916 {
917 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
918 }
919
920 void movl_rm(RegisterID src, int offset, RegisterID base)
921 {
922 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
923 }
924
925 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
926 {
927 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
928 }
929
930 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
931 {
932 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
933 }
934
935 void movl_mEAX(void* addr)
936 {
937 m_formatter.oneByteOp(OP_MOV_EAXOv);
938#if PLATFORM(X86_64)
939 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
940#else
941 m_formatter.immediate32(reinterpret_cast<int>(addr));
942#endif
943 }
944
945 void movl_mr(int offset, RegisterID base, RegisterID dst)
946 {
947 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
948 }
949
950 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
951 {
952 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
953 }
954
955 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
956 {
957 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
958 }
959
960 void movl_i32r(int imm, RegisterID dst)
961 {
962 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
963 m_formatter.immediate32(imm);
964 }
965
966 void movl_i32m(int imm, int offset, RegisterID base)
967 {
968 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
969 m_formatter.immediate32(imm);
970 }
971
972 void movl_EAXm(void* addr)
973 {
974 m_formatter.oneByteOp(OP_MOV_OvEAX);
975#if PLATFORM(X86_64)
976 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
977#else
978 m_formatter.immediate32(reinterpret_cast<int>(addr));
979#endif
980 }
981
982#if PLATFORM(X86_64)
983 void movq_rr(RegisterID src, RegisterID dst)
984 {
985 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
986 }
987
988 void movq_rm(RegisterID src, int offset, RegisterID base)
989 {
990 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
991 }
992
993 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
994 {
995 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
996 }
997
998 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
999 {
1000 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1001 }
1002
1003 void movq_mEAX(void* addr)
1004 {
1005 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1006 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1007 }
1008
1009 void movq_EAXm(void* addr)
1010 {
1011 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1012 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1013 }
1014
1015 void movq_mr(int offset, RegisterID base, RegisterID dst)
1016 {
1017 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1018 }
1019
1020 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1021 {
1022 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1023 }
1024
1025 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1026 {
1027 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1028 }
1029
1030 void movq_i32m(int imm, int offset, RegisterID base)
1031 {
1032 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1033 m_formatter.immediate32(imm);
1034 }
1035
1036 void movq_i64r(int64_t imm, RegisterID dst)
1037 {
1038 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1039 m_formatter.immediate64(imm);
1040 }
1041
1042 void movsxd_rr(RegisterID src, RegisterID dst)
1043 {
1044 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1045 }
1046
1047
1048#else
1049 void movl_rm(RegisterID src, void* addr)
1050 {
1051 if (src == X86::eax)
1052 movl_EAXm(addr);
1053 else
1054 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1055 }
1056
1057 void movl_mr(void* addr, RegisterID dst)
1058 {
1059 if (dst == X86::eax)
1060 movl_mEAX(addr);
1061 else
1062 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1063 }
1064
1065 void movl_i32m(int imm, void* addr)
1066 {
1067 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1068 m_formatter.immediate32(imm);
1069 }
1070#endif
1071
1072 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1073 {
1074 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1075 }
1076
1077 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1078 {
1079 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1080 }
1081
1082 void movzbl_rr(RegisterID src, RegisterID dst)
1083 {
1084 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1085 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1086 // REX prefixes are defined to be silently ignored by the processor.
1087 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1088 }
1089
1090 void leal_mr(int offset, RegisterID base, RegisterID dst)
1091 {
1092 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1093 }
1094#if PLATFORM(X86_64)
1095 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1096 {
1097 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1098 }
1099#endif
1100
1101 // Flow control:
1102
1103 JmpSrc call()
1104 {
1105 m_formatter.oneByteOp(OP_CALL_rel32);
1106 return m_formatter.immediateRel32();
1107 }
1108
1109 JmpSrc call(RegisterID dst)
1110 {
1111 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1112 return JmpSrc(m_formatter.size());
1113 }
1114
1115 void call_m(int offset, RegisterID base)
1116 {
1117 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1118 }
1119
1120 JmpSrc jmp()
1121 {
1122 m_formatter.oneByteOp(OP_JMP_rel32);
1123 return m_formatter.immediateRel32();
1124 }
1125
1126 // Return a JmpSrc so we have a label to the jump, so we can use this
1127 // To make a tail recursive call on x86-64. The MacroAssembler
1128 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1129 JmpSrc jmp_r(RegisterID dst)
1130 {
1131 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1132 return JmpSrc(m_formatter.size());
1133 }
1134
1135 void jmp_m(int offset, RegisterID base)
1136 {
1137 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1138 }
1139
1140 JmpSrc jne()
1141 {
1142 m_formatter.twoByteOp(jccRel32(ConditionNE));
1143 return m_formatter.immediateRel32();
1144 }
1145
1146 JmpSrc jnz()
1147 {
1148 return jne();
1149 }
1150
1151 JmpSrc je()
1152 {
1153 m_formatter.twoByteOp(jccRel32(ConditionE));
1154 return m_formatter.immediateRel32();
1155 }
1156
1157 JmpSrc jl()
1158 {
1159 m_formatter.twoByteOp(jccRel32(ConditionL));
1160 return m_formatter.immediateRel32();
1161 }
1162
1163 JmpSrc jb()
1164 {
1165 m_formatter.twoByteOp(jccRel32(ConditionB));
1166 return m_formatter.immediateRel32();
1167 }
1168
1169 JmpSrc jle()
1170 {
1171 m_formatter.twoByteOp(jccRel32(ConditionLE));
1172 return m_formatter.immediateRel32();
1173 }
1174
1175 JmpSrc jbe()
1176 {
1177 m_formatter.twoByteOp(jccRel32(ConditionBE));
1178 return m_formatter.immediateRel32();
1179 }
1180
1181 JmpSrc jge()
1182 {
1183 m_formatter.twoByteOp(jccRel32(ConditionGE));
1184 return m_formatter.immediateRel32();
1185 }
1186
1187 JmpSrc jg()
1188 {
1189 m_formatter.twoByteOp(jccRel32(ConditionG));
1190 return m_formatter.immediateRel32();
1191 }
1192
1193 JmpSrc ja()
1194 {
1195 m_formatter.twoByteOp(jccRel32(ConditionA));
1196 return m_formatter.immediateRel32();
1197 }
1198
1199 JmpSrc jae()
1200 {
1201 m_formatter.twoByteOp(jccRel32(ConditionAE));
1202 return m_formatter.immediateRel32();
1203 }
1204
1205 JmpSrc jo()
1206 {
1207 m_formatter.twoByteOp(jccRel32(ConditionO));
1208 return m_formatter.immediateRel32();
1209 }
1210
1211 JmpSrc jp()
1212 {
1213 m_formatter.twoByteOp(jccRel32(ConditionP));
1214 return m_formatter.immediateRel32();
1215 }
1216
1217 JmpSrc js()
1218 {
1219 m_formatter.twoByteOp(jccRel32(ConditionS));
1220 return m_formatter.immediateRel32();
1221 }
1222
1223 JmpSrc jCC(Condition cond)
1224 {
1225 m_formatter.twoByteOp(jccRel32(cond));
1226 return m_formatter.immediateRel32();
1227 }
1228
1229 // SSE operations:
1230
1231 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1232 {
1233 m_formatter.prefix(PRE_SSE_F2);
1234 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1235 }
1236
1237 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1238 {
1239 m_formatter.prefix(PRE_SSE_F2);
1240 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1241 }
1242
1243 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1244 {
1245 m_formatter.prefix(PRE_SSE_F2);
1246 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1247 }
1248
1249 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1250 {
1251 m_formatter.prefix(PRE_SSE_F2);
1252 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1253 }
1254
1255 void movd_rr(XMMRegisterID src, RegisterID dst)
1256 {
1257 m_formatter.prefix(PRE_SSE_66);
1258 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1259 }
1260
1261#if PLATFORM(X86_64)
1262 void movq_rr(XMMRegisterID src, RegisterID dst)
1263 {
1264 m_formatter.prefix(PRE_SSE_66);
1265 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1266 }
1267
1268 void movq_rr(RegisterID src, XMMRegisterID dst)
1269 {
1270 m_formatter.prefix(PRE_SSE_66);
1271 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1272 }
1273#endif
1274
1275 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1276 {
1277 m_formatter.prefix(PRE_SSE_F2);
1278 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1279 }
1280
1281 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1282 {
1283 m_formatter.prefix(PRE_SSE_F2);
1284 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1285 }
1286
1287 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1288 {
1289 m_formatter.prefix(PRE_SSE_F2);
1290 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1291 }
1292
1293 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1294 {
1295 m_formatter.prefix(PRE_SSE_F2);
1296 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1297 }
1298
1299 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1300 {
1301 m_formatter.prefix(PRE_SSE_66);
1302 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1303 m_formatter.immediate8(whichWord);
1304 }
1305
1306 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1307 {
1308 m_formatter.prefix(PRE_SSE_F2);
1309 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1310 }
1311
1312 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1313 {
1314 m_formatter.prefix(PRE_SSE_F2);
1315 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1316 }
1317
1318 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1319 {
1320 m_formatter.prefix(PRE_SSE_66);
1321 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1322 }
1323
1324 // Misc instructions:
1325
1326 void int3()
1327 {
1328 m_formatter.oneByteOp(OP_INT3);
1329 }
1330
1331 void ret()
1332 {
1333 m_formatter.oneByteOp(OP_RET);
1334 }
1335
1336 void predictNotTaken()
1337 {
1338 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1339 }
1340
1341 // Assembler admin methods:
1342
1343 JmpDst label()
1344 {
1345 return JmpDst(m_formatter.size());
1346 }
1347
1348 JmpDst align(int alignment)
1349 {
1350 while (!m_formatter.isAligned(alignment))
1351 m_formatter.oneByteOp(OP_HLT);
1352
1353 return label();
1354 }
1355
1356 // Linking & patching:
1357
1358 void linkJump(JmpSrc from, JmpDst to)
1359 {
1360 ASSERT(to.m_offset != -1);
1361 ASSERT(from.m_offset != -1);
1362
1363 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1364 }
1365
1366 static void linkJump(void* code, JmpSrc from, void* to)
1367 {
1368 ASSERT(from.m_offset != -1);
1369 ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1370 ASSERT(linkOffset == static_cast<int>(linkOffset));
1371 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
1372 }
1373
1374 static void patchLoadToLEA(intptr_t where)
1375 {
1376 unsigned char* ptr = reinterpret_cast<unsigned char*>(where);
1377 ptr[0] = static_cast<unsigned char>(OP_LEA);
1378 }
1379
1380 static void patchJump(intptr_t where, void* destination)
1381 {
1382 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1383 ASSERT(offset == static_cast<int32_t>(offset));
1384 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1385 }
1386
1387#if PLATFORM(X86_64)
1388 // FIXME: transition these functions out of here - the assembler
1389 // shouldn't know that that this is mov/call pair using r11. :-/
1390 static void patchMacroAssemblerCall(intptr_t where, void* destination)
1391 {
1392 patchAddress(reinterpret_cast<void*>(where - REPTACH_OFFSET_CALL_R11), JmpDst(0), destination);
1393 }
1394#else
1395 static void patchMacroAssemblerCall(intptr_t where, void* destination)
1396 {
1397 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1398 ASSERT(offset == static_cast<int32_t>(offset));
1399 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1400 }
1401#endif
1402
1403 void linkCall(JmpSrc from, JmpDst to)
1404 {
1405 ASSERT(to.m_offset != -1);
1406 ASSERT(from.m_offset != -1);
1407
1408 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1409 }
1410
1411 static void linkCall(void* code, JmpSrc from, void* to)
1412 {
1413 ASSERT(from.m_offset != -1);
1414 ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1415 ASSERT(linkOffset == static_cast<int>(linkOffset));
1416 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset;
1417 }
1418
1419 static void patchCall(intptr_t where, void* destination)
1420 {
1421 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1422 ASSERT(offset == static_cast<int32_t>(offset));
1423 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1424 }
1425
1426 static void patchAddress(void* code, JmpDst position, void* value)
1427 {
1428 ASSERT(position.m_offset != -1);
1429
1430 reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
1431 }
1432
1433 static unsigned getCallReturnOffset(JmpSrc call)
1434 {
1435 ASSERT(call.m_offset >= 0);
1436 return call.m_offset;
1437 }
1438
1439 static void* getRelocatedAddress(void* code, JmpSrc jump)
1440 {
1441 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1442 }
1443
1444 static void* getRelocatedAddress(void* code, JmpDst destination)
1445 {
1446 ASSERT(destination.m_offset != -1);
1447
1448 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1449 }
1450
1451 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1452 {
1453 return dst.m_offset - src.m_offset;
1454 }
1455
1456 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1457 {
1458 return dst.m_offset - src.m_offset;
1459 }
1460
1461 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1462 {
1463 return dst.m_offset - src.m_offset;
1464 }
1465
1466 static void patchImmediate(intptr_t where, int32_t value)
1467 {
1468 reinterpret_cast<int32_t*>(where)[-1] = value;
1469 }
1470
1471 static void patchPointer(intptr_t where, intptr_t value)
1472 {
1473 reinterpret_cast<intptr_t*>(where)[-1] = value;
1474 }
1475
1476 void* executableCopy(ExecutablePool* allocator)
1477 {
1478 void* copy = m_formatter.executableCopy(allocator);
1479 ASSERT(copy);
1480 return copy;
1481 }
1482
1483private:
1484
1485 class X86InstructionFormatter {
1486
1487 static const int maxInstructionSize = 16;
1488
1489 public:
1490
1491 // Legacy prefix bytes:
1492 //
1493 // These are emmitted prior to the instruction.
1494
1495 void prefix(OneByteOpcodeID pre)
1496 {
1497 m_buffer.putByte(pre);
1498 }
1499
1500 // Word-sized operands / no operand instruction formatters.
1501 //
1502 // In addition to the opcode, the following operand permutations are supported:
1503 // * None - instruction takes no operands.
1504 // * One register - the low three bits of the RegisterID are added into the opcode.
1505 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1506 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1507 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1508 //
1509 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1510 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1511 //
1512 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1513
1514 void oneByteOp(OneByteOpcodeID opcode)
1515 {
1516 m_buffer.ensureSpace(maxInstructionSize);
1517 m_buffer.putByteUnchecked(opcode);
1518 }
1519
1520 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1521 {
1522 m_buffer.ensureSpace(maxInstructionSize);
1523 emitRexIfNeeded(0, 0, reg);
1524 m_buffer.putByteUnchecked(opcode + (reg & 7));
1525 }
1526
1527 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1528 {
1529 m_buffer.ensureSpace(maxInstructionSize);
1530 emitRexIfNeeded(reg, 0, rm);
1531 m_buffer.putByteUnchecked(opcode);
1532 registerModRM(reg, rm);
1533 }
1534
1535 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1536 {
1537 m_buffer.ensureSpace(maxInstructionSize);
1538 emitRexIfNeeded(reg, 0, base);
1539 m_buffer.putByteUnchecked(opcode);
1540 memoryModRM(reg, base, offset);
1541 }
1542
1543 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1544 {
1545 m_buffer.ensureSpace(maxInstructionSize);
1546 emitRexIfNeeded(reg, 0, base);
1547 m_buffer.putByteUnchecked(opcode);
1548 memoryModRM_disp32(reg, base, offset);
1549 }
1550
1551 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1552 {
1553 m_buffer.ensureSpace(maxInstructionSize);
1554 emitRexIfNeeded(reg, index, base);
1555 m_buffer.putByteUnchecked(opcode);
1556 memoryModRM(reg, base, index, scale, offset);
1557 }
1558
1559#if !PLATFORM(X86_64)
1560 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1561 {
1562 m_buffer.ensureSpace(maxInstructionSize);
1563 m_buffer.putByteUnchecked(opcode);
1564 memoryModRM(reg, address);
1565 }
1566#endif
1567
1568 void twoByteOp(TwoByteOpcodeID opcode)
1569 {
1570 m_buffer.ensureSpace(maxInstructionSize);
1571 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1572 m_buffer.putByteUnchecked(opcode);
1573 }
1574
1575 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1576 {
1577 m_buffer.ensureSpace(maxInstructionSize);
1578 emitRexIfNeeded(reg, 0, rm);
1579 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1580 m_buffer.putByteUnchecked(opcode);
1581 registerModRM(reg, rm);
1582 }
1583
1584 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1585 {
1586 m_buffer.ensureSpace(maxInstructionSize);
1587 emitRexIfNeeded(reg, 0, base);
1588 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1589 m_buffer.putByteUnchecked(opcode);
1590 memoryModRM(reg, base, offset);
1591 }
1592
1593 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1594 {
1595 m_buffer.ensureSpace(maxInstructionSize);
1596 emitRexIfNeeded(reg, index, base);
1597 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1598 m_buffer.putByteUnchecked(opcode);
1599 memoryModRM(reg, base, index, scale, offset);
1600 }
1601
1602#if PLATFORM(X86_64)
1603 // Quad-word-sized operands:
1604 //
1605 // Used to format 64-bit operantions, planting a REX.w prefix.
1606 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1607 // the normal (non-'64'-postfixed) formatters should be used.
1608
1609 void oneByteOp64(OneByteOpcodeID opcode)
1610 {
1611 m_buffer.ensureSpace(maxInstructionSize);
1612 emitRexW(0, 0, 0);
1613 m_buffer.putByteUnchecked(opcode);
1614 }
1615
1616 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1617 {
1618 m_buffer.ensureSpace(maxInstructionSize);
1619 emitRexW(0, 0, reg);
1620 m_buffer.putByteUnchecked(opcode + (reg & 7));
1621 }
1622
1623 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1624 {
1625 m_buffer.ensureSpace(maxInstructionSize);
1626 emitRexW(reg, 0, rm);
1627 m_buffer.putByteUnchecked(opcode);
1628 registerModRM(reg, rm);
1629 }
1630
1631 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1632 {
1633 m_buffer.ensureSpace(maxInstructionSize);
1634 emitRexW(reg, 0, base);
1635 m_buffer.putByteUnchecked(opcode);
1636 memoryModRM(reg, base, offset);
1637 }
1638
1639 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1640 {
1641 m_buffer.ensureSpace(maxInstructionSize);
1642 emitRexW(reg, 0, base);
1643 m_buffer.putByteUnchecked(opcode);
1644 memoryModRM_disp32(reg, base, offset);
1645 }
1646
1647 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1648 {
1649 m_buffer.ensureSpace(maxInstructionSize);
1650 emitRexW(reg, index, base);
1651 m_buffer.putByteUnchecked(opcode);
1652 memoryModRM(reg, base, index, scale, offset);
1653 }
1654
1655 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1656 {
1657 m_buffer.ensureSpace(maxInstructionSize);
1658 emitRexW(reg, 0, rm);
1659 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1660 m_buffer.putByteUnchecked(opcode);
1661 registerModRM(reg, rm);
1662 }
1663#endif
1664
1665 // Byte-operands:
1666 //
1667 // These methods format byte operations. Byte operations differ from the normal
1668 // formatters in the circumstances under which they will decide to emit REX prefixes.
1669 // These should be used where any register operand signifies a byte register.
1670 //
1671 // The disctinction is due to the handling of register numbers in the range 4..7 on
1672 // x86-64. These register numbers may either represent the second byte of the first
1673 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1674 //
1675 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1676 // be accessed where a REX prefix is present), these are likely best treated as
1677 // deprecated. In order to ensure the correct registers spl..dil are selected a
1678 // REX prefix will be emitted for any byte register operand in the range 4..15.
1679 //
1680 // These formatters may be used in instructions where a mix of operand sizes, in which
1681 // case an unnecessary REX will be emitted, for example:
1682 // movzbl %al, %edi
1683 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1684 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1685 // be silently ignored by the processor.
1686 //
1687 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1688 // is provided to check byte register operands.
1689
1690 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1691 {
1692 m_buffer.ensureSpace(maxInstructionSize);
1693 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1694 m_buffer.putByteUnchecked(opcode);
1695 registerModRM(groupOp, rm);
1696 }
1697
1698 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1699 {
1700 m_buffer.ensureSpace(maxInstructionSize);
1701 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1702 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1703 m_buffer.putByteUnchecked(opcode);
1704 registerModRM(reg, rm);
1705 }
1706
1707 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1708 {
1709 m_buffer.ensureSpace(maxInstructionSize);
1710 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1711 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1712 m_buffer.putByteUnchecked(opcode);
1713 registerModRM(groupOp, rm);
1714 }
1715
1716 // Immediates:
1717 //
1718 // An immedaite should be appended where appropriate after an op has been emitted.
1719 // The writes are unchecked since the opcode formatters above will have ensured space.
1720
1721 void immediate8(int imm)
1722 {
1723 m_buffer.putByteUnchecked(imm);
1724 }
1725
1726 void immediate16(int imm)
1727 {
1728 m_buffer.putShortUnchecked(imm);
1729 }
1730
1731 void immediate32(int imm)
1732 {
1733 m_buffer.putIntUnchecked(imm);
1734 }
1735
1736 void immediate64(int64_t imm)
1737 {
1738 m_buffer.putInt64Unchecked(imm);
1739 }
1740
1741 JmpSrc immediateRel32()
1742 {
1743 m_buffer.putIntUnchecked(0);
1744 return JmpSrc(m_buffer.size());
1745 }
1746
1747 // Administrative methods:
1748
1749 size_t size() const { return m_buffer.size(); }
1750 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1751 void* data() const { return m_buffer.data(); }
1752 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1753
1754 private:
1755
1756 // Internals; ModRm and REX formatters.
1757
1758 static const RegisterID noBase = X86::ebp;
1759 static const RegisterID hasSib = X86::esp;
1760 static const RegisterID noIndex = X86::esp;
1761#if PLATFORM(X86_64)
1762 static const RegisterID noBase2 = X86::r13;
1763 static const RegisterID hasSib2 = X86::r12;
1764
1765 // Registers r8 & above require a REX prefixe.
1766 inline bool regRequiresRex(int reg)
1767 {
1768 return (reg >= X86::r8);
1769 }
1770
1771 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1772 inline bool byteRegRequiresRex(int reg)
1773 {
1774 return (reg >= X86::esp);
1775 }
1776
1777 // Format a REX prefix byte.
1778 inline void emitRex(bool w, int r, int x, int b)
1779 {
1780 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1781 }
1782
1783 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1784 inline void emitRexW(int r, int x, int b)
1785 {
1786 emitRex(true, r, x, b);
1787 }
1788
1789 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1790 // regRequiresRex() to check other registers (i.e. address base & index).
1791 inline void emitRexIf(bool condition, int r, int x, int b)
1792 {
1793 if (condition) emitRex(false, r, x, b);
1794 }
1795
1796 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1797 inline void emitRexIfNeeded(int r, int x, int b)
1798 {
1799 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1800 }
1801#else
1802 // No REX prefix bytes on 32-bit x86.
1803 inline bool regRequiresRex(int) { return false; }
1804 inline bool byteRegRequiresRex(int) { return false; }
1805 inline void emitRexIf(bool, int, int, int) {}
1806 inline void emitRexIfNeeded(int, int, int) {}
1807#endif
1808
1809 enum ModRmMode {
1810 ModRmMemoryNoDisp,
1811 ModRmMemoryDisp8,
1812 ModRmMemoryDisp32,
1813 ModRmRegister,
1814 };
1815
1816 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1817 {
1818 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1819 }
1820
1821 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1822 {
1823 ASSERT(mode != ModRmRegister);
1824
1825 putModRm(mode, reg, hasSib);
1826 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1827 }
1828
1829 void registerModRM(int reg, RegisterID rm)
1830 {
1831 putModRm(ModRmRegister, reg, rm);
1832 }
1833
1834 void memoryModRM(int reg, RegisterID base, int offset)
1835 {
1836 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1837#if PLATFORM(X86_64)
1838 if ((base == hasSib) || (base == hasSib2)) {
1839#else
1840 if (base == hasSib) {
1841#endif
1842 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1843 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1844 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1845 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1846 m_buffer.putByteUnchecked(offset);
1847 } else {
1848 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1849 m_buffer.putIntUnchecked(offset);
1850 }
1851 } else {
1852#if PLATFORM(X86_64)
1853 if (!offset && (base != noBase) && (base != noBase2))
1854#else
1855 if (!offset && (base != noBase))
1856#endif
1857 putModRm(ModRmMemoryNoDisp, reg, base);
1858 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1859 putModRm(ModRmMemoryDisp8, reg, base);
1860 m_buffer.putByteUnchecked(offset);
1861 } else {
1862 putModRm(ModRmMemoryDisp32, reg, base);
1863 m_buffer.putIntUnchecked(offset);
1864 }
1865 }
1866 }
1867
1868 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1869 {
1870 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1871#if PLATFORM(X86_64)
1872 if ((base == hasSib) || (base == hasSib2)) {
1873#else
1874 if (base == hasSib) {
1875#endif
1876 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1877 m_buffer.putIntUnchecked(offset);
1878 } else {
1879 putModRm(ModRmMemoryDisp32, reg, base);
1880 m_buffer.putIntUnchecked(offset);
1881 }
1882 }
1883
1884 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1885 {
1886 ASSERT(index != noIndex);
1887
1888#if PLATFORM(X86_64)
1889 if (!offset && (base != noBase) && (base != noBase2))
1890#else
1891 if (!offset && (base != noBase))
1892#endif
1893 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1894 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1895 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1896 m_buffer.putByteUnchecked(offset);
1897 } else {
1898 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1899 m_buffer.putIntUnchecked(offset);
1900 }
1901 }
1902
1903#if !PLATFORM(X86_64)
1904 void memoryModRM(int reg, void* address)
1905 {
1906 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1907 putModRm(ModRmMemoryNoDisp, reg, noBase);
1908 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1909 }
1910#endif
1911
1912 AssemblerBuffer m_buffer;
1913 } m_formatter;
1914};
1915
1916} // namespace JSC
1917
1918#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1919
1920#endif // X86Assembler_h
Note: See TracBrowser for help on using the repository browser.