source: webkit/trunk/JavaScriptCore/assembler/X86Assembler.h@ 44346

Last change on this file since 44346 was 44341, checked in by [email protected], 16 years ago

2009-06-01 Gavin Barraclough <[email protected]>

Reviewed by Sam "WX" Weinig.

Allow the JIT to operate without relying on use of RWX memory, on platforms where this is supported.

This patch adds a switch to Platform.h (ENABLE_ASSEMBLER_WX_EXCLUSIVE) which enables this mode of operation.
When this flag is set, all executable memory will be allocated RX, and switched to RW only whilst being
modified. Upon completion of code generation the protection is switched back to RX to allow execution.

Further optimization will be required before it is desirable to enable this mode of operation by default;
enabling this presently incurs a 5%-10% regression.

(Submitting disabled - no performance impact).

  • assembler/AbstractMacroAssembler.h: (JSC::AbstractMacroAssembler::CodeLocationInstruction::repatchLoadToLEA): (JSC::AbstractMacroAssembler::CodeLocationLabel::fromFunctionPointer): (JSC::AbstractMacroAssembler::CodeLocationJump::relink): (JSC::AbstractMacroAssembler::CodeLocationCall::relink): (JSC::AbstractMacroAssembler::CodeLocationNearCall::relink): (JSC::AbstractMacroAssembler::CodeLocationDataLabel32::repatch): (JSC::AbstractMacroAssembler::CodeLocationDataLabelPtr::repatch): (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToTrampoline): (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToFunction): (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkNearCallerToTrampoline): (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkNearCallerToFunction): (JSC::AbstractMacroAssembler::PatchBuffer::PatchBuffer): (JSC::AbstractMacroAssembler::PatchBuffer::~PatchBuffer): (JSC::AbstractMacroAssembler::PatchBuffer::link): (JSC::AbstractMacroAssembler::PatchBuffer::patch): (JSC::AbstractMacroAssembler::PatchBuffer::performFinalization): (JSC::::CodeLocationCommon::nearCallAtOffset): (JSC::::CodeLocationCall::CodeLocationCall): (JSC::::CodeLocationNearCall::CodeLocationNearCall):
  • assembler/AssemblerBuffer.h: (JSC::AssemblerBuffer::executableCopy):
  • assembler/X86Assembler.h: (JSC::CAN_SIGN_EXTEND_U32_64): (JSC::X86Assembler::linkJump): (JSC::X86Assembler::linkCall): (JSC::X86Assembler::patchPointer): (JSC::X86Assembler::relinkJump): (JSC::X86Assembler::relinkCall): (JSC::X86Assembler::repatchInt32): (JSC::X86Assembler::repatchPointer): (JSC::X86Assembler::repatchLoadToLEA): (JSC::X86Assembler::patchInt32): (JSC::X86Assembler::patchRel32):
  • jit/ExecutableAllocator.h: (JSC::ExecutableAllocator::): (JSC::ExecutableAllocator::makeWritable): (JSC::ExecutableAllocator::makeExecutable):
  • jit/ExecutableAllocatorFixedVMPool.cpp: (JSC::FixedVMPoolAllocator::FixedVMPoolAllocator):
  • jit/ExecutableAllocatorPosix.cpp: (JSC::ExecutablePool::systemAlloc): (JSC::ExecutablePool::systemRelease): (JSC::ExecutableAllocator::reprotectRegion):
  • jit/ExecutableAllocatorWin.cpp:
  • jit/JITPropertyAccess.cpp: (JSC::JIT::patchGetByIdSelf): (JSC::JIT::patchPutByIdReplace):
  • wtf/Platform.h:
File size: 57.3 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44#endif
45
46namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57#if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66#endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79}
80
81class X86Assembler {
82public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85 typedef XMMRegisterID FPRegisterID;
86
87 typedef enum {
88 ConditionO,
89 ConditionNO,
90 ConditionB,
91 ConditionAE,
92 ConditionE,
93 ConditionNE,
94 ConditionBE,
95 ConditionA,
96 ConditionS,
97 ConditionNS,
98 ConditionP,
99 ConditionNP,
100 ConditionL,
101 ConditionGE,
102 ConditionLE,
103 ConditionG,
104
105 ConditionC = ConditionB,
106 ConditionNC = ConditionAE,
107 } Condition;
108
109private:
110 typedef enum {
111 OP_ADD_EvGv = 0x01,
112 OP_ADD_GvEv = 0x03,
113 OP_OR_EvGv = 0x09,
114 OP_OR_GvEv = 0x0B,
115 OP_2BYTE_ESCAPE = 0x0F,
116 OP_AND_EvGv = 0x21,
117 OP_SUB_EvGv = 0x29,
118 OP_SUB_GvEv = 0x2B,
119 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
120 OP_XOR_EvGv = 0x31,
121 OP_CMP_EvGv = 0x39,
122 OP_CMP_GvEv = 0x3B,
123#if PLATFORM(X86_64)
124 PRE_REX = 0x40,
125#endif
126 OP_PUSH_EAX = 0x50,
127 OP_POP_EAX = 0x58,
128#if PLATFORM(X86_64)
129 OP_MOVSXD_GvEv = 0x63,
130#endif
131 PRE_OPERAND_SIZE = 0x66,
132 PRE_SSE_66 = 0x66,
133 OP_PUSH_Iz = 0x68,
134 OP_IMUL_GvEvIz = 0x69,
135 OP_GROUP1_EvIz = 0x81,
136 OP_GROUP1_EvIb = 0x83,
137 OP_TEST_EvGv = 0x85,
138 OP_XCHG_EvGv = 0x87,
139 OP_MOV_EvGv = 0x89,
140 OP_MOV_GvEv = 0x8B,
141 OP_LEA = 0x8D,
142 OP_GROUP1A_Ev = 0x8F,
143 OP_CDQ = 0x99,
144 OP_MOV_EAXOv = 0xA1,
145 OP_MOV_OvEAX = 0xA3,
146 OP_MOV_EAXIv = 0xB8,
147 OP_GROUP2_EvIb = 0xC1,
148 OP_RET = 0xC3,
149 OP_GROUP11_EvIz = 0xC7,
150 OP_INT3 = 0xCC,
151 OP_GROUP2_Ev1 = 0xD1,
152 OP_GROUP2_EvCL = 0xD3,
153 OP_CALL_rel32 = 0xE8,
154 OP_JMP_rel32 = 0xE9,
155 PRE_SSE_F2 = 0xF2,
156 OP_HLT = 0xF4,
157 OP_GROUP3_EbIb = 0xF6,
158 OP_GROUP3_Ev = 0xF7,
159 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
160 OP_GROUP5_Ev = 0xFF,
161 } OneByteOpcodeID;
162
163 typedef enum {
164 OP2_MOVSD_VsdWsd = 0x10,
165 OP2_MOVSD_WsdVsd = 0x11,
166 OP2_CVTSI2SD_VsdEd = 0x2A,
167 OP2_CVTTSD2SI_GdWsd = 0x2C,
168 OP2_UCOMISD_VsdWsd = 0x2E,
169 OP2_ADDSD_VsdWsd = 0x58,
170 OP2_MULSD_VsdWsd = 0x59,
171 OP2_SUBSD_VsdWsd = 0x5C,
172 OP2_MOVD_VdEd = 0x6E,
173 OP2_MOVD_EdVd = 0x7E,
174 OP2_JCC_rel32 = 0x80,
175 OP_SETCC = 0x90,
176 OP2_IMUL_GvEv = 0xAF,
177 OP2_MOVZX_GvEb = 0xB6,
178 OP2_MOVZX_GvEw = 0xB7,
179 OP2_PEXTRW_GdUdIb = 0xC5,
180 } TwoByteOpcodeID;
181
182 TwoByteOpcodeID jccRel32(Condition cond)
183 {
184 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
185 }
186
187 TwoByteOpcodeID setccOpcode(Condition cond)
188 {
189 return (TwoByteOpcodeID)(OP_SETCC + cond);
190 }
191
192 typedef enum {
193 GROUP1_OP_ADD = 0,
194 GROUP1_OP_OR = 1,
195 GROUP1_OP_ADC = 2,
196 GROUP1_OP_AND = 4,
197 GROUP1_OP_SUB = 5,
198 GROUP1_OP_XOR = 6,
199 GROUP1_OP_CMP = 7,
200
201 GROUP1A_OP_POP = 0,
202
203 GROUP2_OP_SHL = 4,
204 GROUP2_OP_SAR = 7,
205
206 GROUP3_OP_TEST = 0,
207 GROUP3_OP_NOT = 2,
208 GROUP3_OP_IDIV = 7,
209
210 GROUP5_OP_CALLN = 2,
211 GROUP5_OP_JMPN = 4,
212 GROUP5_OP_PUSH = 6,
213
214 GROUP11_MOV = 0,
215 } GroupOpcodeID;
216
217 class X86InstructionFormatter;
218public:
219
220 class JmpSrc {
221 friend class X86Assembler;
222 friend class X86InstructionFormatter;
223 public:
224 JmpSrc()
225 : m_offset(-1)
226 {
227 }
228
229 private:
230 JmpSrc(int offset)
231 : m_offset(offset)
232 {
233 }
234
235 int m_offset;
236 };
237
238 class JmpDst {
239 friend class X86Assembler;
240 friend class X86InstructionFormatter;
241 public:
242 JmpDst()
243 : m_offset(-1)
244 , m_used(false)
245 {
246 }
247
248 bool isUsed() const { return m_used; }
249 void used() { m_used = true; }
250 private:
251 JmpDst(int offset)
252 : m_offset(offset)
253 , m_used(false)
254 {
255 ASSERT(m_offset == offset);
256 }
257
258 int m_offset : 31;
259 bool m_used : 1;
260 };
261
262 X86Assembler()
263 {
264 }
265
266 size_t size() const { return m_formatter.size(); }
267
268 // Stack operations:
269
270 void push_r(RegisterID reg)
271 {
272 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
273 }
274
275 void pop_r(RegisterID reg)
276 {
277 m_formatter.oneByteOp(OP_POP_EAX, reg);
278 }
279
280 void push_i32(int imm)
281 {
282 m_formatter.oneByteOp(OP_PUSH_Iz);
283 m_formatter.immediate32(imm);
284 }
285
286 void push_m(int offset, RegisterID base)
287 {
288 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
289 }
290
291 void pop_m(int offset, RegisterID base)
292 {
293 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
294 }
295
296 // Arithmetic operations:
297
298#if !PLATFORM(X86_64)
299 void adcl_im(int imm, void* addr)
300 {
301 if (CAN_SIGN_EXTEND_8_32(imm)) {
302 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
303 m_formatter.immediate8(imm);
304 } else {
305 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
306 m_formatter.immediate32(imm);
307 }
308 }
309#endif
310
311 void addl_rr(RegisterID src, RegisterID dst)
312 {
313 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
314 }
315
316 void addl_mr(int offset, RegisterID base, RegisterID dst)
317 {
318 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
319 }
320
321 void addl_ir(int imm, RegisterID dst)
322 {
323 if (CAN_SIGN_EXTEND_8_32(imm)) {
324 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
325 m_formatter.immediate8(imm);
326 } else {
327 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
328 m_formatter.immediate32(imm);
329 }
330 }
331
332 void addl_im(int imm, int offset, RegisterID base)
333 {
334 if (CAN_SIGN_EXTEND_8_32(imm)) {
335 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
336 m_formatter.immediate8(imm);
337 } else {
338 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
339 m_formatter.immediate32(imm);
340 }
341 }
342
343#if PLATFORM(X86_64)
344 void addq_rr(RegisterID src, RegisterID dst)
345 {
346 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
347 }
348
349 void addq_ir(int imm, RegisterID dst)
350 {
351 if (CAN_SIGN_EXTEND_8_32(imm)) {
352 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
353 m_formatter.immediate8(imm);
354 } else {
355 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
356 m_formatter.immediate32(imm);
357 }
358 }
359
360 void addq_im(int imm, int offset, RegisterID base)
361 {
362 if (CAN_SIGN_EXTEND_8_32(imm)) {
363 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
364 m_formatter.immediate8(imm);
365 } else {
366 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
367 m_formatter.immediate32(imm);
368 }
369 }
370#else
371 void addl_im(int imm, void* addr)
372 {
373 if (CAN_SIGN_EXTEND_8_32(imm)) {
374 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
375 m_formatter.immediate8(imm);
376 } else {
377 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
378 m_formatter.immediate32(imm);
379 }
380 }
381#endif
382
383 void andl_rr(RegisterID src, RegisterID dst)
384 {
385 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
386 }
387
388 void andl_ir(int imm, RegisterID dst)
389 {
390 if (CAN_SIGN_EXTEND_8_32(imm)) {
391 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
392 m_formatter.immediate8(imm);
393 } else {
394 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
395 m_formatter.immediate32(imm);
396 }
397 }
398
399 void andl_im(int imm, int offset, RegisterID base)
400 {
401 if (CAN_SIGN_EXTEND_8_32(imm)) {
402 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
403 m_formatter.immediate8(imm);
404 } else {
405 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
406 m_formatter.immediate32(imm);
407 }
408 }
409
410#if PLATFORM(X86_64)
411 void andq_rr(RegisterID src, RegisterID dst)
412 {
413 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
414 }
415
416 void andq_ir(int imm, RegisterID dst)
417 {
418 if (CAN_SIGN_EXTEND_8_32(imm)) {
419 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
420 m_formatter.immediate8(imm);
421 } else {
422 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
423 m_formatter.immediate32(imm);
424 }
425 }
426#else
427 void andl_im(int imm, void* addr)
428 {
429 if (CAN_SIGN_EXTEND_8_32(imm)) {
430 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
431 m_formatter.immediate8(imm);
432 } else {
433 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
434 m_formatter.immediate32(imm);
435 }
436 }
437#endif
438
439 void notl_r(RegisterID dst)
440 {
441 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
442 }
443
444 void orl_rr(RegisterID src, RegisterID dst)
445 {
446 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
447 }
448
449 void orl_mr(int offset, RegisterID base, RegisterID dst)
450 {
451 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
452 }
453
454 void orl_ir(int imm, RegisterID dst)
455 {
456 if (CAN_SIGN_EXTEND_8_32(imm)) {
457 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
458 m_formatter.immediate8(imm);
459 } else {
460 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
461 m_formatter.immediate32(imm);
462 }
463 }
464
465 void orl_im(int imm, int offset, RegisterID base)
466 {
467 if (CAN_SIGN_EXTEND_8_32(imm)) {
468 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
469 m_formatter.immediate8(imm);
470 } else {
471 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
472 m_formatter.immediate32(imm);
473 }
474 }
475
476#if PLATFORM(X86_64)
477 void orq_rr(RegisterID src, RegisterID dst)
478 {
479 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
480 }
481
482 void orq_ir(int imm, RegisterID dst)
483 {
484 if (CAN_SIGN_EXTEND_8_32(imm)) {
485 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
486 m_formatter.immediate8(imm);
487 } else {
488 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
489 m_formatter.immediate32(imm);
490 }
491 }
492#else
493 void orl_im(int imm, void* addr)
494 {
495 if (CAN_SIGN_EXTEND_8_32(imm)) {
496 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
497 m_formatter.immediate8(imm);
498 } else {
499 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
500 m_formatter.immediate32(imm);
501 }
502 }
503#endif
504
505 void subl_rr(RegisterID src, RegisterID dst)
506 {
507 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
508 }
509
510 void subl_mr(int offset, RegisterID base, RegisterID dst)
511 {
512 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
513 }
514
515 void subl_ir(int imm, RegisterID dst)
516 {
517 if (CAN_SIGN_EXTEND_8_32(imm)) {
518 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
519 m_formatter.immediate8(imm);
520 } else {
521 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
522 m_formatter.immediate32(imm);
523 }
524 }
525
526 void subl_im(int imm, int offset, RegisterID base)
527 {
528 if (CAN_SIGN_EXTEND_8_32(imm)) {
529 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
530 m_formatter.immediate8(imm);
531 } else {
532 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
533 m_formatter.immediate32(imm);
534 }
535 }
536
537#if PLATFORM(X86_64)
538 void subq_rr(RegisterID src, RegisterID dst)
539 {
540 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
541 }
542
543 void subq_ir(int imm, RegisterID dst)
544 {
545 if (CAN_SIGN_EXTEND_8_32(imm)) {
546 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
547 m_formatter.immediate8(imm);
548 } else {
549 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
550 m_formatter.immediate32(imm);
551 }
552 }
553#else
554 void subl_im(int imm, void* addr)
555 {
556 if (CAN_SIGN_EXTEND_8_32(imm)) {
557 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
558 m_formatter.immediate8(imm);
559 } else {
560 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
561 m_formatter.immediate32(imm);
562 }
563 }
564#endif
565
566 void xorl_rr(RegisterID src, RegisterID dst)
567 {
568 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
569 }
570
571 void xorl_ir(int imm, RegisterID dst)
572 {
573 if (CAN_SIGN_EXTEND_8_32(imm)) {
574 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
575 m_formatter.immediate8(imm);
576 } else {
577 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
578 m_formatter.immediate32(imm);
579 }
580 }
581
582#if PLATFORM(X86_64)
583 void xorq_rr(RegisterID src, RegisterID dst)
584 {
585 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
586 }
587
588 void xorq_ir(int imm, RegisterID dst)
589 {
590 if (CAN_SIGN_EXTEND_8_32(imm)) {
591 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
592 m_formatter.immediate8(imm);
593 } else {
594 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
595 m_formatter.immediate32(imm);
596 }
597 }
598#endif
599
600 void sarl_i8r(int imm, RegisterID dst)
601 {
602 if (imm == 1)
603 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
604 else {
605 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
606 m_formatter.immediate8(imm);
607 }
608 }
609
610 void sarl_CLr(RegisterID dst)
611 {
612 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
613 }
614
615 void shll_i8r(int imm, RegisterID dst)
616 {
617 if (imm == 1)
618 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
619 else {
620 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
621 m_formatter.immediate8(imm);
622 }
623 }
624
625 void shll_CLr(RegisterID dst)
626 {
627 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
628 }
629
630#if PLATFORM(X86_64)
631 void sarq_CLr(RegisterID dst)
632 {
633 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
634 }
635
636 void sarq_i8r(int imm, RegisterID dst)
637 {
638 if (imm == 1)
639 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
640 else {
641 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
642 m_formatter.immediate8(imm);
643 }
644 }
645#endif
646
647 void imull_rr(RegisterID src, RegisterID dst)
648 {
649 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
650 }
651
652 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
653 {
654 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
655 m_formatter.immediate32(value);
656 }
657
658 void idivl_r(RegisterID dst)
659 {
660 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
661 }
662
663 // Comparisons:
664
665 void cmpl_rr(RegisterID src, RegisterID dst)
666 {
667 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
668 }
669
670 void cmpl_rm(RegisterID src, int offset, RegisterID base)
671 {
672 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
673 }
674
675 void cmpl_mr(int offset, RegisterID base, RegisterID src)
676 {
677 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
678 }
679
680 void cmpl_ir(int imm, RegisterID dst)
681 {
682 if (CAN_SIGN_EXTEND_8_32(imm)) {
683 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
684 m_formatter.immediate8(imm);
685 } else {
686 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
687 m_formatter.immediate32(imm);
688 }
689 }
690
691 void cmpl_ir_force32(int imm, RegisterID dst)
692 {
693 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
694 m_formatter.immediate32(imm);
695 }
696
697 void cmpl_im(int imm, int offset, RegisterID base)
698 {
699 if (CAN_SIGN_EXTEND_8_32(imm)) {
700 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
701 m_formatter.immediate8(imm);
702 } else {
703 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
704 m_formatter.immediate32(imm);
705 }
706 }
707
708 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
709 {
710 if (CAN_SIGN_EXTEND_8_32(imm)) {
711 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
712 m_formatter.immediate8(imm);
713 } else {
714 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
715 m_formatter.immediate32(imm);
716 }
717 }
718
719 void cmpl_im_force32(int imm, int offset, RegisterID base)
720 {
721 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
722 m_formatter.immediate32(imm);
723 }
724
725#if PLATFORM(X86_64)
726 void cmpq_rr(RegisterID src, RegisterID dst)
727 {
728 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
729 }
730
731 void cmpq_rm(RegisterID src, int offset, RegisterID base)
732 {
733 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
734 }
735
736 void cmpq_mr(int offset, RegisterID base, RegisterID src)
737 {
738 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
739 }
740
741 void cmpq_ir(int imm, RegisterID dst)
742 {
743 if (CAN_SIGN_EXTEND_8_32(imm)) {
744 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
745 m_formatter.immediate8(imm);
746 } else {
747 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
748 m_formatter.immediate32(imm);
749 }
750 }
751
752 void cmpq_im(int imm, int offset, RegisterID base)
753 {
754 if (CAN_SIGN_EXTEND_8_32(imm)) {
755 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
756 m_formatter.immediate8(imm);
757 } else {
758 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
759 m_formatter.immediate32(imm);
760 }
761 }
762
763 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
764 {
765 if (CAN_SIGN_EXTEND_8_32(imm)) {
766 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
767 m_formatter.immediate8(imm);
768 } else {
769 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
770 m_formatter.immediate32(imm);
771 }
772 }
773#else
774 void cmpl_rm(RegisterID reg, void* addr)
775 {
776 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
777 }
778
779 void cmpl_im(int imm, void* addr)
780 {
781 if (CAN_SIGN_EXTEND_8_32(imm)) {
782 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
783 m_formatter.immediate8(imm);
784 } else {
785 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
786 m_formatter.immediate32(imm);
787 }
788 }
789#endif
790
791 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
792 {
793 m_formatter.prefix(PRE_OPERAND_SIZE);
794 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
795 }
796
797 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
798 {
799 if (CAN_SIGN_EXTEND_8_32(imm)) {
800 m_formatter.prefix(PRE_OPERAND_SIZE);
801 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
802 m_formatter.immediate8(imm);
803 } else {
804 m_formatter.prefix(PRE_OPERAND_SIZE);
805 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
806 m_formatter.immediate16(imm);
807 }
808 }
809
810 void testl_rr(RegisterID src, RegisterID dst)
811 {
812 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
813 }
814
815 void testl_i32r(int imm, RegisterID dst)
816 {
817 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
818 m_formatter.immediate32(imm);
819 }
820
821 void testl_i32m(int imm, int offset, RegisterID base)
822 {
823 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
824 m_formatter.immediate32(imm);
825 }
826
827 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
828 {
829 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
830 m_formatter.immediate32(imm);
831 }
832
833#if PLATFORM(X86_64)
834 void testq_rr(RegisterID src, RegisterID dst)
835 {
836 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
837 }
838
839 void testq_i32r(int imm, RegisterID dst)
840 {
841 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
842 m_formatter.immediate32(imm);
843 }
844
845 void testq_i32m(int imm, int offset, RegisterID base)
846 {
847 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
848 m_formatter.immediate32(imm);
849 }
850
851 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
852 {
853 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
854 m_formatter.immediate32(imm);
855 }
856#endif
857
858 void testw_rr(RegisterID src, RegisterID dst)
859 {
860 m_formatter.prefix(PRE_OPERAND_SIZE);
861 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
862 }
863
864 void testb_i8r(int imm, RegisterID dst)
865 {
866 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
867 m_formatter.immediate8(imm);
868 }
869
870 void setCC_r(Condition cond, RegisterID dst)
871 {
872 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
873 }
874
875 void sete_r(RegisterID dst)
876 {
877 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
878 }
879
880 void setz_r(RegisterID dst)
881 {
882 sete_r(dst);
883 }
884
885 void setne_r(RegisterID dst)
886 {
887 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
888 }
889
890 void setnz_r(RegisterID dst)
891 {
892 setne_r(dst);
893 }
894
895 // Various move ops:
896
897 void cdq()
898 {
899 m_formatter.oneByteOp(OP_CDQ);
900 }
901
902 void xchgl_rr(RegisterID src, RegisterID dst)
903 {
904 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
905 }
906
907#if PLATFORM(X86_64)
908 void xchgq_rr(RegisterID src, RegisterID dst)
909 {
910 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
911 }
912#endif
913
914 void movl_rr(RegisterID src, RegisterID dst)
915 {
916 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
917 }
918
919 void movl_rm(RegisterID src, int offset, RegisterID base)
920 {
921 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
922 }
923
924 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
925 {
926 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
927 }
928
929 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
930 {
931 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
932 }
933
934 void movl_mEAX(void* addr)
935 {
936 m_formatter.oneByteOp(OP_MOV_EAXOv);
937#if PLATFORM(X86_64)
938 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
939#else
940 m_formatter.immediate32(reinterpret_cast<int>(addr));
941#endif
942 }
943
944 void movl_mr(int offset, RegisterID base, RegisterID dst)
945 {
946 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
947 }
948
949 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
950 {
951 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
952 }
953
954 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
955 {
956 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
957 }
958
959 void movl_i32r(int imm, RegisterID dst)
960 {
961 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
962 m_formatter.immediate32(imm);
963 }
964
965 void movl_i32m(int imm, int offset, RegisterID base)
966 {
967 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
968 m_formatter.immediate32(imm);
969 }
970
971 void movl_EAXm(void* addr)
972 {
973 m_formatter.oneByteOp(OP_MOV_OvEAX);
974#if PLATFORM(X86_64)
975 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
976#else
977 m_formatter.immediate32(reinterpret_cast<int>(addr));
978#endif
979 }
980
981#if PLATFORM(X86_64)
982 void movq_rr(RegisterID src, RegisterID dst)
983 {
984 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
985 }
986
987 void movq_rm(RegisterID src, int offset, RegisterID base)
988 {
989 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
990 }
991
992 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
993 {
994 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
995 }
996
997 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
998 {
999 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1000 }
1001
1002 void movq_mEAX(void* addr)
1003 {
1004 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1005 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1006 }
1007
1008 void movq_EAXm(void* addr)
1009 {
1010 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1011 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1012 }
1013
1014 void movq_mr(int offset, RegisterID base, RegisterID dst)
1015 {
1016 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1017 }
1018
1019 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1020 {
1021 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1022 }
1023
1024 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1025 {
1026 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1027 }
1028
1029 void movq_i32m(int imm, int offset, RegisterID base)
1030 {
1031 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1032 m_formatter.immediate32(imm);
1033 }
1034
1035 void movq_i64r(int64_t imm, RegisterID dst)
1036 {
1037 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1038 m_formatter.immediate64(imm);
1039 }
1040
1041 void movsxd_rr(RegisterID src, RegisterID dst)
1042 {
1043 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1044 }
1045
1046
1047#else
1048 void movl_rm(RegisterID src, void* addr)
1049 {
1050 if (src == X86::eax)
1051 movl_EAXm(addr);
1052 else
1053 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1054 }
1055
1056 void movl_mr(void* addr, RegisterID dst)
1057 {
1058 if (dst == X86::eax)
1059 movl_mEAX(addr);
1060 else
1061 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1062 }
1063
1064 void movl_i32m(int imm, void* addr)
1065 {
1066 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1067 m_formatter.immediate32(imm);
1068 }
1069#endif
1070
1071 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1072 {
1073 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1074 }
1075
1076 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1077 {
1078 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1079 }
1080
1081 void movzbl_rr(RegisterID src, RegisterID dst)
1082 {
1083 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1084 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1085 // REX prefixes are defined to be silently ignored by the processor.
1086 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1087 }
1088
1089 void leal_mr(int offset, RegisterID base, RegisterID dst)
1090 {
1091 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1092 }
1093#if PLATFORM(X86_64)
1094 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1095 {
1096 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1097 }
1098#endif
1099
1100 // Flow control:
1101
1102 JmpSrc call()
1103 {
1104 m_formatter.oneByteOp(OP_CALL_rel32);
1105 return m_formatter.immediateRel32();
1106 }
1107
1108 JmpSrc call(RegisterID dst)
1109 {
1110 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1111 return JmpSrc(m_formatter.size());
1112 }
1113
1114 void call_m(int offset, RegisterID base)
1115 {
1116 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1117 }
1118
1119 JmpSrc jmp()
1120 {
1121 m_formatter.oneByteOp(OP_JMP_rel32);
1122 return m_formatter.immediateRel32();
1123 }
1124
1125 // Return a JmpSrc so we have a label to the jump, so we can use this
1126 // To make a tail recursive call on x86-64. The MacroAssembler
1127 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1128 JmpSrc jmp_r(RegisterID dst)
1129 {
1130 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1131 return JmpSrc(m_formatter.size());
1132 }
1133
1134 void jmp_m(int offset, RegisterID base)
1135 {
1136 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1137 }
1138
1139 JmpSrc jne()
1140 {
1141 m_formatter.twoByteOp(jccRel32(ConditionNE));
1142 return m_formatter.immediateRel32();
1143 }
1144
1145 JmpSrc jnz()
1146 {
1147 return jne();
1148 }
1149
1150 JmpSrc je()
1151 {
1152 m_formatter.twoByteOp(jccRel32(ConditionE));
1153 return m_formatter.immediateRel32();
1154 }
1155
1156 JmpSrc jl()
1157 {
1158 m_formatter.twoByteOp(jccRel32(ConditionL));
1159 return m_formatter.immediateRel32();
1160 }
1161
1162 JmpSrc jb()
1163 {
1164 m_formatter.twoByteOp(jccRel32(ConditionB));
1165 return m_formatter.immediateRel32();
1166 }
1167
1168 JmpSrc jle()
1169 {
1170 m_formatter.twoByteOp(jccRel32(ConditionLE));
1171 return m_formatter.immediateRel32();
1172 }
1173
1174 JmpSrc jbe()
1175 {
1176 m_formatter.twoByteOp(jccRel32(ConditionBE));
1177 return m_formatter.immediateRel32();
1178 }
1179
1180 JmpSrc jge()
1181 {
1182 m_formatter.twoByteOp(jccRel32(ConditionGE));
1183 return m_formatter.immediateRel32();
1184 }
1185
1186 JmpSrc jg()
1187 {
1188 m_formatter.twoByteOp(jccRel32(ConditionG));
1189 return m_formatter.immediateRel32();
1190 }
1191
1192 JmpSrc ja()
1193 {
1194 m_formatter.twoByteOp(jccRel32(ConditionA));
1195 return m_formatter.immediateRel32();
1196 }
1197
1198 JmpSrc jae()
1199 {
1200 m_formatter.twoByteOp(jccRel32(ConditionAE));
1201 return m_formatter.immediateRel32();
1202 }
1203
1204 JmpSrc jo()
1205 {
1206 m_formatter.twoByteOp(jccRel32(ConditionO));
1207 return m_formatter.immediateRel32();
1208 }
1209
1210 JmpSrc jp()
1211 {
1212 m_formatter.twoByteOp(jccRel32(ConditionP));
1213 return m_formatter.immediateRel32();
1214 }
1215
1216 JmpSrc js()
1217 {
1218 m_formatter.twoByteOp(jccRel32(ConditionS));
1219 return m_formatter.immediateRel32();
1220 }
1221
1222 JmpSrc jCC(Condition cond)
1223 {
1224 m_formatter.twoByteOp(jccRel32(cond));
1225 return m_formatter.immediateRel32();
1226 }
1227
1228 // SSE operations:
1229
1230 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1231 {
1232 m_formatter.prefix(PRE_SSE_F2);
1233 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1234 }
1235
1236 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1237 {
1238 m_formatter.prefix(PRE_SSE_F2);
1239 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1240 }
1241
1242 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1243 {
1244 m_formatter.prefix(PRE_SSE_F2);
1245 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1246 }
1247
1248 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1249 {
1250 m_formatter.prefix(PRE_SSE_F2);
1251 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1252 }
1253
1254 void movd_rr(XMMRegisterID src, RegisterID dst)
1255 {
1256 m_formatter.prefix(PRE_SSE_66);
1257 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1258 }
1259
1260#if PLATFORM(X86_64)
1261 void movq_rr(XMMRegisterID src, RegisterID dst)
1262 {
1263 m_formatter.prefix(PRE_SSE_66);
1264 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1265 }
1266
1267 void movq_rr(RegisterID src, XMMRegisterID dst)
1268 {
1269 m_formatter.prefix(PRE_SSE_66);
1270 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1271 }
1272#endif
1273
1274 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1275 {
1276 m_formatter.prefix(PRE_SSE_F2);
1277 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1278 }
1279
1280 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1281 {
1282 m_formatter.prefix(PRE_SSE_F2);
1283 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1284 }
1285
1286 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1287 {
1288 m_formatter.prefix(PRE_SSE_F2);
1289 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1290 }
1291
1292 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1293 {
1294 m_formatter.prefix(PRE_SSE_F2);
1295 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1296 }
1297
1298 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1299 {
1300 m_formatter.prefix(PRE_SSE_66);
1301 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1302 m_formatter.immediate8(whichWord);
1303 }
1304
1305 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1306 {
1307 m_formatter.prefix(PRE_SSE_F2);
1308 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1309 }
1310
1311 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1312 {
1313 m_formatter.prefix(PRE_SSE_F2);
1314 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1315 }
1316
1317 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1318 {
1319 m_formatter.prefix(PRE_SSE_66);
1320 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1321 }
1322
1323 // Misc instructions:
1324
1325 void int3()
1326 {
1327 m_formatter.oneByteOp(OP_INT3);
1328 }
1329
1330 void ret()
1331 {
1332 m_formatter.oneByteOp(OP_RET);
1333 }
1334
1335 void predictNotTaken()
1336 {
1337 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1338 }
1339
1340 // Assembler admin methods:
1341
1342 JmpDst label()
1343 {
1344 return JmpDst(m_formatter.size());
1345 }
1346
1347 JmpDst align(int alignment)
1348 {
1349 while (!m_formatter.isAligned(alignment))
1350 m_formatter.oneByteOp(OP_HLT);
1351
1352 return label();
1353 }
1354
1355 // Linking & patching:
1356 //
1357 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1358 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1359 // code has been finalized it is (platform support permitting) within a non-
1360 // writable region of memory; to modify the code in an execute-only execuable
1361 // pool the 'repatch' and 'relink' methods should be used.
1362
1363 void linkJump(JmpSrc from, JmpDst to)
1364 {
1365 ASSERT(from.m_offset != -1);
1366 ASSERT(to.m_offset != -1);
1367
1368 char* code = reinterpret_cast<char*>(m_formatter.data());
1369 patchRel32(code + from.m_offset, code + to.m_offset);
1370 }
1371
1372 void linkCall(JmpSrc from, JmpDst to)
1373 {
1374 ASSERT(from.m_offset != -1);
1375 ASSERT(to.m_offset != -1);
1376
1377 char* code = reinterpret_cast<char*>(m_formatter.data());
1378 patchRel32(code + from.m_offset, code + to.m_offset);
1379 }
1380
1381 static void linkJump(void* code, JmpSrc from, void* to)
1382 {
1383 ASSERT(from.m_offset != -1);
1384
1385 patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1386 }
1387
1388 static void linkCall(void* code, JmpSrc from, void* to)
1389 {
1390 ASSERT(from.m_offset != -1);
1391
1392 patchRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1393 }
1394
1395 static void patchPointer(void* where, void* value)
1396 {
1397 reinterpret_cast<void**>(where)[-1] = value;
1398 }
1399
1400 static void patchPointer(void* code, JmpDst where, void* value)
1401 {
1402 ASSERT(where.m_offset != -1);
1403
1404 patchPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1405 }
1406
1407 static void relinkJump(void* from, void* to)
1408 {
1409 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
1410 patchRel32(from, to);
1411 }
1412
1413 static void relinkCall(void* from, void* to)
1414 {
1415 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(from) - sizeof(int32_t), sizeof(int32_t));
1416 patchRel32(from, to);
1417 }
1418
1419 static void repatchInt32(void* where, int32_t value)
1420 {
1421 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(int32_t), sizeof(int32_t));
1422 patchInt32(where, value);
1423 }
1424
1425 static void repatchPointer(void* where, void* value)
1426 {
1427 ExecutableAllocator::MakeWritable unprotect(reinterpret_cast<char*>(where) - sizeof(void*), sizeof(void*));
1428 patchPointer(where, value);
1429 }
1430
1431 static void repatchLoadToLEA(void* where)
1432 {
1433 ExecutableAllocator::MakeWritable unprotect(where, 1);
1434 *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
1435 }
1436
1437 static unsigned getCallReturnOffset(JmpSrc call)
1438 {
1439 ASSERT(call.m_offset >= 0);
1440 return call.m_offset;
1441 }
1442
1443 static void* getRelocatedAddress(void* code, JmpSrc jump)
1444 {
1445 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1446 }
1447
1448 static void* getRelocatedAddress(void* code, JmpDst destination)
1449 {
1450 ASSERT(destination.m_offset != -1);
1451
1452 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1453 }
1454
1455 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1456 {
1457 return dst.m_offset - src.m_offset;
1458 }
1459
1460 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1461 {
1462 return dst.m_offset - src.m_offset;
1463 }
1464
1465 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1466 {
1467 return dst.m_offset - src.m_offset;
1468 }
1469
1470 void* executableCopy(ExecutablePool* allocator)
1471 {
1472 void* copy = m_formatter.executableCopy(allocator);
1473 ASSERT(copy);
1474 return copy;
1475 }
1476
1477private:
1478
1479 static void patchInt32(void* where, int32_t value)
1480 {
1481 reinterpret_cast<int32_t*>(where)[-1] = value;
1482 }
1483
1484 static void patchRel32(void* from, void* to)
1485 {
1486 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1487 ASSERT(offset == static_cast<int32_t>(offset));
1488
1489 patchInt32(from, offset);
1490 }
1491
1492 class X86InstructionFormatter {
1493
1494 static const int maxInstructionSize = 16;
1495
1496 public:
1497
1498 // Legacy prefix bytes:
1499 //
1500 // These are emmitted prior to the instruction.
1501
1502 void prefix(OneByteOpcodeID pre)
1503 {
1504 m_buffer.putByte(pre);
1505 }
1506
1507 // Word-sized operands / no operand instruction formatters.
1508 //
1509 // In addition to the opcode, the following operand permutations are supported:
1510 // * None - instruction takes no operands.
1511 // * One register - the low three bits of the RegisterID are added into the opcode.
1512 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1513 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1514 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1515 //
1516 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1517 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1518 //
1519 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1520
1521 void oneByteOp(OneByteOpcodeID opcode)
1522 {
1523 m_buffer.ensureSpace(maxInstructionSize);
1524 m_buffer.putByteUnchecked(opcode);
1525 }
1526
1527 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1528 {
1529 m_buffer.ensureSpace(maxInstructionSize);
1530 emitRexIfNeeded(0, 0, reg);
1531 m_buffer.putByteUnchecked(opcode + (reg & 7));
1532 }
1533
1534 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1535 {
1536 m_buffer.ensureSpace(maxInstructionSize);
1537 emitRexIfNeeded(reg, 0, rm);
1538 m_buffer.putByteUnchecked(opcode);
1539 registerModRM(reg, rm);
1540 }
1541
1542 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1543 {
1544 m_buffer.ensureSpace(maxInstructionSize);
1545 emitRexIfNeeded(reg, 0, base);
1546 m_buffer.putByteUnchecked(opcode);
1547 memoryModRM(reg, base, offset);
1548 }
1549
1550 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1551 {
1552 m_buffer.ensureSpace(maxInstructionSize);
1553 emitRexIfNeeded(reg, 0, base);
1554 m_buffer.putByteUnchecked(opcode);
1555 memoryModRM_disp32(reg, base, offset);
1556 }
1557
1558 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1559 {
1560 m_buffer.ensureSpace(maxInstructionSize);
1561 emitRexIfNeeded(reg, index, base);
1562 m_buffer.putByteUnchecked(opcode);
1563 memoryModRM(reg, base, index, scale, offset);
1564 }
1565
1566#if !PLATFORM(X86_64)
1567 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1568 {
1569 m_buffer.ensureSpace(maxInstructionSize);
1570 m_buffer.putByteUnchecked(opcode);
1571 memoryModRM(reg, address);
1572 }
1573#endif
1574
1575 void twoByteOp(TwoByteOpcodeID opcode)
1576 {
1577 m_buffer.ensureSpace(maxInstructionSize);
1578 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1579 m_buffer.putByteUnchecked(opcode);
1580 }
1581
1582 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1583 {
1584 m_buffer.ensureSpace(maxInstructionSize);
1585 emitRexIfNeeded(reg, 0, rm);
1586 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1587 m_buffer.putByteUnchecked(opcode);
1588 registerModRM(reg, rm);
1589 }
1590
1591 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1592 {
1593 m_buffer.ensureSpace(maxInstructionSize);
1594 emitRexIfNeeded(reg, 0, base);
1595 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1596 m_buffer.putByteUnchecked(opcode);
1597 memoryModRM(reg, base, offset);
1598 }
1599
1600 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1601 {
1602 m_buffer.ensureSpace(maxInstructionSize);
1603 emitRexIfNeeded(reg, index, base);
1604 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1605 m_buffer.putByteUnchecked(opcode);
1606 memoryModRM(reg, base, index, scale, offset);
1607 }
1608
1609#if PLATFORM(X86_64)
1610 // Quad-word-sized operands:
1611 //
1612 // Used to format 64-bit operantions, planting a REX.w prefix.
1613 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1614 // the normal (non-'64'-postfixed) formatters should be used.
1615
1616 void oneByteOp64(OneByteOpcodeID opcode)
1617 {
1618 m_buffer.ensureSpace(maxInstructionSize);
1619 emitRexW(0, 0, 0);
1620 m_buffer.putByteUnchecked(opcode);
1621 }
1622
1623 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1624 {
1625 m_buffer.ensureSpace(maxInstructionSize);
1626 emitRexW(0, 0, reg);
1627 m_buffer.putByteUnchecked(opcode + (reg & 7));
1628 }
1629
1630 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1631 {
1632 m_buffer.ensureSpace(maxInstructionSize);
1633 emitRexW(reg, 0, rm);
1634 m_buffer.putByteUnchecked(opcode);
1635 registerModRM(reg, rm);
1636 }
1637
1638 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1639 {
1640 m_buffer.ensureSpace(maxInstructionSize);
1641 emitRexW(reg, 0, base);
1642 m_buffer.putByteUnchecked(opcode);
1643 memoryModRM(reg, base, offset);
1644 }
1645
1646 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1647 {
1648 m_buffer.ensureSpace(maxInstructionSize);
1649 emitRexW(reg, 0, base);
1650 m_buffer.putByteUnchecked(opcode);
1651 memoryModRM_disp32(reg, base, offset);
1652 }
1653
1654 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1655 {
1656 m_buffer.ensureSpace(maxInstructionSize);
1657 emitRexW(reg, index, base);
1658 m_buffer.putByteUnchecked(opcode);
1659 memoryModRM(reg, base, index, scale, offset);
1660 }
1661
1662 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1663 {
1664 m_buffer.ensureSpace(maxInstructionSize);
1665 emitRexW(reg, 0, rm);
1666 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1667 m_buffer.putByteUnchecked(opcode);
1668 registerModRM(reg, rm);
1669 }
1670#endif
1671
1672 // Byte-operands:
1673 //
1674 // These methods format byte operations. Byte operations differ from the normal
1675 // formatters in the circumstances under which they will decide to emit REX prefixes.
1676 // These should be used where any register operand signifies a byte register.
1677 //
1678 // The disctinction is due to the handling of register numbers in the range 4..7 on
1679 // x86-64. These register numbers may either represent the second byte of the first
1680 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1681 //
1682 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1683 // be accessed where a REX prefix is present), these are likely best treated as
1684 // deprecated. In order to ensure the correct registers spl..dil are selected a
1685 // REX prefix will be emitted for any byte register operand in the range 4..15.
1686 //
1687 // These formatters may be used in instructions where a mix of operand sizes, in which
1688 // case an unnecessary REX will be emitted, for example:
1689 // movzbl %al, %edi
1690 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1691 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1692 // be silently ignored by the processor.
1693 //
1694 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1695 // is provided to check byte register operands.
1696
1697 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1698 {
1699 m_buffer.ensureSpace(maxInstructionSize);
1700 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1701 m_buffer.putByteUnchecked(opcode);
1702 registerModRM(groupOp, rm);
1703 }
1704
1705 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1706 {
1707 m_buffer.ensureSpace(maxInstructionSize);
1708 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1709 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1710 m_buffer.putByteUnchecked(opcode);
1711 registerModRM(reg, rm);
1712 }
1713
1714 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1715 {
1716 m_buffer.ensureSpace(maxInstructionSize);
1717 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1718 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1719 m_buffer.putByteUnchecked(opcode);
1720 registerModRM(groupOp, rm);
1721 }
1722
1723 // Immediates:
1724 //
1725 // An immedaite should be appended where appropriate after an op has been emitted.
1726 // The writes are unchecked since the opcode formatters above will have ensured space.
1727
1728 void immediate8(int imm)
1729 {
1730 m_buffer.putByteUnchecked(imm);
1731 }
1732
1733 void immediate16(int imm)
1734 {
1735 m_buffer.putShortUnchecked(imm);
1736 }
1737
1738 void immediate32(int imm)
1739 {
1740 m_buffer.putIntUnchecked(imm);
1741 }
1742
1743 void immediate64(int64_t imm)
1744 {
1745 m_buffer.putInt64Unchecked(imm);
1746 }
1747
1748 JmpSrc immediateRel32()
1749 {
1750 m_buffer.putIntUnchecked(0);
1751 return JmpSrc(m_buffer.size());
1752 }
1753
1754 // Administrative methods:
1755
1756 size_t size() const { return m_buffer.size(); }
1757 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1758 void* data() const { return m_buffer.data(); }
1759 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1760
1761 private:
1762
1763 // Internals; ModRm and REX formatters.
1764
1765 static const RegisterID noBase = X86::ebp;
1766 static const RegisterID hasSib = X86::esp;
1767 static const RegisterID noIndex = X86::esp;
1768#if PLATFORM(X86_64)
1769 static const RegisterID noBase2 = X86::r13;
1770 static const RegisterID hasSib2 = X86::r12;
1771
1772 // Registers r8 & above require a REX prefixe.
1773 inline bool regRequiresRex(int reg)
1774 {
1775 return (reg >= X86::r8);
1776 }
1777
1778 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1779 inline bool byteRegRequiresRex(int reg)
1780 {
1781 return (reg >= X86::esp);
1782 }
1783
1784 // Format a REX prefix byte.
1785 inline void emitRex(bool w, int r, int x, int b)
1786 {
1787 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1788 }
1789
1790 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1791 inline void emitRexW(int r, int x, int b)
1792 {
1793 emitRex(true, r, x, b);
1794 }
1795
1796 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1797 // regRequiresRex() to check other registers (i.e. address base & index).
1798 inline void emitRexIf(bool condition, int r, int x, int b)
1799 {
1800 if (condition) emitRex(false, r, x, b);
1801 }
1802
1803 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1804 inline void emitRexIfNeeded(int r, int x, int b)
1805 {
1806 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1807 }
1808#else
1809 // No REX prefix bytes on 32-bit x86.
1810 inline bool regRequiresRex(int) { return false; }
1811 inline bool byteRegRequiresRex(int) { return false; }
1812 inline void emitRexIf(bool, int, int, int) {}
1813 inline void emitRexIfNeeded(int, int, int) {}
1814#endif
1815
1816 enum ModRmMode {
1817 ModRmMemoryNoDisp,
1818 ModRmMemoryDisp8,
1819 ModRmMemoryDisp32,
1820 ModRmRegister,
1821 };
1822
1823 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1824 {
1825 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1826 }
1827
1828 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1829 {
1830 ASSERT(mode != ModRmRegister);
1831
1832 putModRm(mode, reg, hasSib);
1833 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1834 }
1835
1836 void registerModRM(int reg, RegisterID rm)
1837 {
1838 putModRm(ModRmRegister, reg, rm);
1839 }
1840
1841 void memoryModRM(int reg, RegisterID base, int offset)
1842 {
1843 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1844#if PLATFORM(X86_64)
1845 if ((base == hasSib) || (base == hasSib2)) {
1846#else
1847 if (base == hasSib) {
1848#endif
1849 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1850 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1851 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1852 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1853 m_buffer.putByteUnchecked(offset);
1854 } else {
1855 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1856 m_buffer.putIntUnchecked(offset);
1857 }
1858 } else {
1859#if PLATFORM(X86_64)
1860 if (!offset && (base != noBase) && (base != noBase2))
1861#else
1862 if (!offset && (base != noBase))
1863#endif
1864 putModRm(ModRmMemoryNoDisp, reg, base);
1865 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1866 putModRm(ModRmMemoryDisp8, reg, base);
1867 m_buffer.putByteUnchecked(offset);
1868 } else {
1869 putModRm(ModRmMemoryDisp32, reg, base);
1870 m_buffer.putIntUnchecked(offset);
1871 }
1872 }
1873 }
1874
1875 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1876 {
1877 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1878#if PLATFORM(X86_64)
1879 if ((base == hasSib) || (base == hasSib2)) {
1880#else
1881 if (base == hasSib) {
1882#endif
1883 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1884 m_buffer.putIntUnchecked(offset);
1885 } else {
1886 putModRm(ModRmMemoryDisp32, reg, base);
1887 m_buffer.putIntUnchecked(offset);
1888 }
1889 }
1890
1891 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1892 {
1893 ASSERT(index != noIndex);
1894
1895#if PLATFORM(X86_64)
1896 if (!offset && (base != noBase) && (base != noBase2))
1897#else
1898 if (!offset && (base != noBase))
1899#endif
1900 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1901 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1902 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1903 m_buffer.putByteUnchecked(offset);
1904 } else {
1905 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1906 m_buffer.putIntUnchecked(offset);
1907 }
1908 }
1909
1910#if !PLATFORM(X86_64)
1911 void memoryModRM(int reg, void* address)
1912 {
1913 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1914 putModRm(ModRmMemoryNoDisp, reg, noBase);
1915 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1916 }
1917#endif
1918
1919 AssemblerBuffer m_buffer;
1920 } m_formatter;
1921};
1922
1923} // namespace JSC
1924
1925#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1926
1927#endif // X86Assembler_h
Note: See TracBrowser for help on using the repository browser.