source: webkit/trunk/JavaScriptCore/assembler/X86Assembler.h@ 39738

Last change on this file since 39738 was 39738, checked in by [email protected], 16 years ago

2009-01-08 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Encode immediates in the low word of JSValuePtrs, on x86-64.

On 32-bit platforms a JSValuePtr may represent a 31-bit signed integer.
On 64-bit platforms, if USE(ALTERNATE_JSIMMEDIATE) is defined, a full
32-bit integer may be stored in an immediate.


Presently USE(ALTERNATE_JSIMMEDIATE) uses the same encoding as the default
immediate format - the value is left shifted by one, so a one bit tag can
be added to indicate the value is an immediate. However this means that
values must be commonly be detagged (by right shifting by one) before
arithmetic operations can be performed on immediates. This patch modifies
the formattting so the the high bits of the immediate mark values as being
integer.

  • assembler/MacroAssembler.h: (JSC::MacroAssembler::not32): (JSC::MacroAssembler::orPtr): (JSC::MacroAssembler::zeroExtend32ToPtr): (JSC::MacroAssembler::jaePtr): (JSC::MacroAssembler::jbPtr): (JSC::MacroAssembler::jnzPtr): (JSC::MacroAssembler::jzPtr):
  • assembler/X86Assembler.h: (JSC::X86Assembler::): (JSC::X86Assembler::notl_r): (JSC::X86Assembler::testq_i32r):
  • jit/JIT.cpp: (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases): (JSC::JIT::privateCompileCTIMachineTrampolines):
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArith_op_lshift): (JSC::JIT::compileFastArith_op_rshift): (JSC::JIT::compileFastArith_op_bitand): (JSC::JIT::compileFastArithSlow_op_bitand): (JSC::JIT::compileFastArith_op_mod): (JSC::JIT::compileFastArithSlow_op_mod): (JSC::JIT::compileFastArith_op_add): (JSC::JIT::compileFastArith_op_mul): (JSC::JIT::compileFastArith_op_post_inc): (JSC::JIT::compileFastArith_op_post_dec): (JSC::JIT::compileFastArith_op_pre_inc): (JSC::JIT::compileFastArith_op_pre_dec): (JSC::JIT::putDoubleResultToJSNumberCellOrJSImmediate): (JSC::JIT::compileBinaryArithOp):
  • jit/JITCall.cpp: (JSC::JIT::compileOpCallSlowCase):
  • jit/JITInlineMethods.h: (JSC::JIT::emitJumpIfJSCell): (JSC::JIT::emitJumpIfNotJSCell): (JSC::JIT::emitJumpIfImmNum): (JSC::JIT::emitJumpSlowCaseIfNotImmNum): (JSC::JIT::emitJumpSlowCaseIfNotImmNums): (JSC::JIT::emitFastArithDeTagImmediate): (JSC::JIT::emitFastArithDeTagImmediateJumpIfZero): (JSC::JIT::emitFastArithReTagImmediate): (JSC::JIT::emitFastArithImmToInt): (JSC::JIT::emitFastArithIntToImmNoCheck): (JSC::JIT::emitTagAsBoolImmediate):
  • jit/JITPropertyAccess.cpp: (JSC::resizePropertyStorage): (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::privateCompilePatchGetArrayLength): (JSC::JIT::privateCompileGetByIdSelf): (JSC::JIT::privateCompileGetByIdProto): (JSC::JIT::privateCompileGetByIdChain): (JSC::JIT::privateCompilePutByIdReplace):
  • runtime/JSImmediate.h: (JSC::JSImmediate::isNumber): (JSC::JSImmediate::isPositiveNumber): (JSC::JSImmediate::areBothImmediateNumbers): (JSC::JSImmediate::xorImmediateNumbers): (JSC::JSImmediate::rightShiftImmediateNumbers): (JSC::JSImmediate::canDoFastAdditiveOperations): (JSC::JSImmediate::addImmediateNumbers): (JSC::JSImmediate::subImmediateNumbers): (JSC::JSImmediate::makeInt): (JSC::JSImmediate::toBoolean):
  • wtf/Platform.h:
File size: 49.8 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef X86Assembler_h
27#define X86Assembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33#include "AssemblerBuffer.h"
34#include <stdint.h>
35#include <wtf/Assertions.h>
36#include <wtf/Vector.h>
37
38namespace JSC {
39
40inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41#if PLATFORM(X86_64)
42inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
43inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44#endif
45
46namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57#if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66#endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79}
80
81class X86Assembler {
82public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85
86 typedef enum {
87 OP_ADD_EvGv = 0x01,
88 OP_ADD_GvEv = 0x03,
89 OP_OR_EvGv = 0x09,
90 OP_OR_GvEv = 0x0B,
91 OP_2BYTE_ESCAPE = 0x0F,
92 OP_AND_EvGv = 0x21,
93 OP_SUB_EvGv = 0x29,
94 OP_SUB_GvEv = 0x2B,
95 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
96 OP_XOR_EvGv = 0x31,
97 OP_CMP_EvGv = 0x39,
98 OP_CMP_GvEv = 0x3B,
99#if PLATFORM(X86_64)
100 PRE_REX = 0x40,
101#endif
102 OP_PUSH_EAX = 0x50,
103 OP_POP_EAX = 0x58,
104#if PLATFORM(X86_64)
105 OP_MOVSXD_GvEv = 0x63,
106#endif
107 PRE_OPERAND_SIZE = 0x66,
108 PRE_SSE_66 = 0x66,
109 OP_PUSH_Iz = 0x68,
110 OP_IMUL_GvEvIz = 0x69,
111 OP_GROUP1_EvIz = 0x81,
112 OP_GROUP1_EvIb = 0x83,
113 OP_TEST_EvGv = 0x85,
114 OP_XCHG_EvGv = 0x87,
115 OP_MOV_EvGv = 0x89,
116 OP_MOV_GvEv = 0x8B,
117 OP_LEA = 0x8D,
118 OP_GROUP1A_Ev = 0x8F,
119 OP_CDQ = 0x99,
120 OP_MOV_EAXOv = 0xA1,
121 OP_MOV_OvEAX = 0xA3,
122 OP_MOV_EAXIv = 0xB8,
123 OP_GROUP2_EvIb = 0xC1,
124 OP_RET = 0xC3,
125 OP_GROUP11_EvIz = 0xC7,
126 OP_INT3 = 0xCC,
127 OP_GROUP2_Ev1 = 0xD1,
128 OP_GROUP2_EvCL = 0xD3,
129 OP_CALL_rel32 = 0xE8,
130 OP_JMP_rel32 = 0xE9,
131 PRE_SSE_F2 = 0xF2,
132 OP_HLT = 0xF4,
133 OP_GROUP3_EbIb = 0xF6,
134 OP_GROUP3_Ev = 0xF7,
135 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
136 OP_GROUP5_Ev = 0xFF,
137 } OneByteOpcodeID;
138
139 typedef enum {
140 OP2_MOVSD_VsdWsd = 0x10,
141 OP2_MOVSD_WsdVsd = 0x11,
142 OP2_CVTSI2SD_VsdEd = 0x2A,
143 OP2_CVTTSD2SI_GdWsd = 0x2C,
144 OP2_UCOMISD_VsdWsd = 0x2E,
145 OP2_ADDSD_VsdWsd = 0x58,
146 OP2_MULSD_VsdWsd = 0x59,
147 OP2_SUBSD_VsdWsd = 0x5C,
148 OP2_MOVD_EdVd = 0x7E,
149 OP2_JO_rel32 = 0x80,
150 OP2_JB_rel32 = 0x82,
151 OP2_JAE_rel32 = 0x83,
152 OP2_JE_rel32 = 0x84,
153 OP2_JNE_rel32 = 0x85,
154 OP2_JBE_rel32 = 0x86,
155 OP2_JA_rel32 = 0x87,
156 OP2_JS_rel32 = 0x88,
157 OP2_JP_rel32 = 0x8A,
158 OP2_JL_rel32 = 0x8C,
159 OP2_JGE_rel32 = 0x8D,
160 OP2_JLE_rel32 = 0x8E,
161 OP2_JG_rel32 = 0x8F,
162 OP_SETE = 0x94,
163 OP_SETNE = 0x95,
164 OP2_IMUL_GvEv = 0xAF,
165 OP2_MOVZX_GvEb = 0xB6,
166 OP2_MOVZX_GvEw = 0xB7,
167 OP2_PEXTRW_GdUdIb = 0xC5,
168 } TwoByteOpcodeID;
169
170 typedef enum {
171 GROUP1_OP_ADD = 0,
172 GROUP1_OP_OR = 1,
173 GROUP1_OP_AND = 4,
174 GROUP1_OP_SUB = 5,
175 GROUP1_OP_XOR = 6,
176 GROUP1_OP_CMP = 7,
177
178 GROUP1A_OP_POP = 0,
179
180 GROUP2_OP_SHL = 4,
181 GROUP2_OP_SAR = 7,
182
183 GROUP3_OP_TEST = 0,
184 GROUP3_OP_NOT = 2,
185 GROUP3_OP_IDIV = 7,
186
187 GROUP5_OP_CALLN = 2,
188 GROUP5_OP_JMPN = 4,
189 GROUP5_OP_PUSH = 6,
190
191 GROUP11_MOV = 0,
192 } GroupOpcodeID;
193
194 // Opaque label types
195
196private:
197 class X86InstructionFormatter;
198public:
199
200 class JmpSrc {
201 friend class X86Assembler;
202 friend class X86InstructionFormatter;
203 public:
204 JmpSrc()
205 : m_offset(-1)
206 {
207 }
208
209 private:
210 JmpSrc(int offset)
211 : m_offset(offset)
212 {
213 }
214
215 int m_offset;
216 };
217
218 class JmpDst {
219 friend class X86Assembler;
220 friend class X86InstructionFormatter;
221 public:
222 JmpDst()
223 : m_offset(-1)
224 {
225 }
226
227 private:
228 JmpDst(int offset)
229 : m_offset(offset)
230 {
231 }
232
233 int m_offset;
234 };
235
236 X86Assembler()
237 {
238 }
239
240 size_t size() const { return m_formatter.size(); }
241
242 // Stack operations:
243
244 void push_r(RegisterID reg)
245 {
246 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
247 }
248
249 void pop_r(RegisterID reg)
250 {
251 m_formatter.oneByteOp(OP_POP_EAX, reg);
252 }
253
254 void push_i32(int imm)
255 {
256 m_formatter.oneByteOp(OP_PUSH_Iz);
257 m_formatter.immediate32(imm);
258 }
259
260 void push_m(int offset, RegisterID base)
261 {
262 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
263 }
264
265 void pop_m(int offset, RegisterID base)
266 {
267 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
268 }
269
270 // Arithmetic operations:
271
272 void addl_rr(RegisterID src, RegisterID dst)
273 {
274 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
275 }
276
277 void addl_mr(int offset, RegisterID base, RegisterID dst)
278 {
279 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
280 }
281
282 void addl_ir(int imm, RegisterID dst)
283 {
284 if (CAN_SIGN_EXTEND_8_32(imm)) {
285 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
286 m_formatter.immediate8(imm);
287 } else {
288 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
289 m_formatter.immediate32(imm);
290 }
291 }
292
293 void addl_im(int imm, int offset, RegisterID base)
294 {
295 if (CAN_SIGN_EXTEND_8_32(imm)) {
296 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
297 m_formatter.immediate8(imm);
298 } else {
299 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
300 m_formatter.immediate32(imm);
301 }
302 }
303
304#if PLATFORM(X86_64)
305 void addq_rr(RegisterID src, RegisterID dst)
306 {
307 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
308 }
309
310 void addq_ir(int imm, RegisterID dst)
311 {
312 if (CAN_SIGN_EXTEND_8_32(imm)) {
313 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
314 m_formatter.immediate8(imm);
315 } else {
316 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
317 m_formatter.immediate32(imm);
318 }
319 }
320#else
321 void addl_im(int imm, void* addr)
322 {
323 if (CAN_SIGN_EXTEND_8_32(imm)) {
324 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
325 m_formatter.immediate8(imm);
326 } else {
327 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
328 m_formatter.immediate32(imm);
329 }
330 }
331#endif
332
333 void andl_rr(RegisterID src, RegisterID dst)
334 {
335 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
336 }
337
338 void andl_ir(int imm, RegisterID dst)
339 {
340 if (CAN_SIGN_EXTEND_8_32(imm)) {
341 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
342 m_formatter.immediate8(imm);
343 } else {
344 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
345 m_formatter.immediate32(imm);
346 }
347 }
348
349#if PLATFORM(X86_64)
350 void andq_rr(RegisterID src, RegisterID dst)
351 {
352 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
353 }
354
355 void andq_ir(int imm, RegisterID dst)
356 {
357 if (CAN_SIGN_EXTEND_8_32(imm)) {
358 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
359 m_formatter.immediate8(imm);
360 } else {
361 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
362 m_formatter.immediate32(imm);
363 }
364 }
365#endif
366
367 void notl_r(RegisterID dst)
368 {
369 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
370 }
371
372 void orl_rr(RegisterID src, RegisterID dst)
373 {
374 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
375 }
376
377 void orl_mr(int offset, RegisterID base, RegisterID dst)
378 {
379 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
380 }
381
382 void orl_ir(int imm, RegisterID dst)
383 {
384 if (CAN_SIGN_EXTEND_8_32(imm)) {
385 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
386 m_formatter.immediate8(imm);
387 } else {
388 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
389 m_formatter.immediate32(imm);
390 }
391 }
392
393#if PLATFORM(X86_64)
394 void orq_rr(RegisterID src, RegisterID dst)
395 {
396 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
397 }
398
399 void orq_ir(int imm, RegisterID dst)
400 {
401 if (CAN_SIGN_EXTEND_8_32(imm)) {
402 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
403 m_formatter.immediate8(imm);
404 } else {
405 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
406 m_formatter.immediate32(imm);
407 }
408 }
409#endif
410
411 void subl_rr(RegisterID src, RegisterID dst)
412 {
413 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
414 }
415
416 void subl_mr(int offset, RegisterID base, RegisterID dst)
417 {
418 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
419 }
420
421 void subl_ir(int imm, RegisterID dst)
422 {
423 if (CAN_SIGN_EXTEND_8_32(imm)) {
424 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
425 m_formatter.immediate8(imm);
426 } else {
427 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
428 m_formatter.immediate32(imm);
429 }
430 }
431
432 void subl_im(int imm, int offset, RegisterID base)
433 {
434 if (CAN_SIGN_EXTEND_8_32(imm)) {
435 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
436 m_formatter.immediate8(imm);
437 } else {
438 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
439 m_formatter.immediate32(imm);
440 }
441 }
442
443#if PLATFORM(X86_64)
444 void subq_ir(int imm, RegisterID dst)
445 {
446 if (CAN_SIGN_EXTEND_8_32(imm)) {
447 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
448 m_formatter.immediate8(imm);
449 } else {
450 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
451 m_formatter.immediate32(imm);
452 }
453 }
454#else
455 void subl_im(int imm, void* addr)
456 {
457 if (CAN_SIGN_EXTEND_8_32(imm)) {
458 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
459 m_formatter.immediate8(imm);
460 } else {
461 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
462 m_formatter.immediate32(imm);
463 }
464 }
465#endif
466
467 void xorl_rr(RegisterID src, RegisterID dst)
468 {
469 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
470 }
471
472 void xorl_ir(int imm, RegisterID dst)
473 {
474 if (CAN_SIGN_EXTEND_8_32(imm)) {
475 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
476 m_formatter.immediate8(imm);
477 } else {
478 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
479 m_formatter.immediate32(imm);
480 }
481 }
482
483#if PLATFORM(X86_64)
484 void xorq_rr(RegisterID src, RegisterID dst)
485 {
486 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
487 }
488
489 void xorq_ir(int imm, RegisterID dst)
490 {
491 if (CAN_SIGN_EXTEND_8_32(imm)) {
492 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
493 m_formatter.immediate8(imm);
494 } else {
495 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
496 m_formatter.immediate32(imm);
497 }
498 }
499#endif
500
501 void sarl_i8r(int imm, RegisterID dst)
502 {
503 if (imm == 1)
504 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
505 else {
506 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
507 m_formatter.immediate8(imm);
508 }
509 }
510
511 void sarl_CLr(RegisterID dst)
512 {
513 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
514 }
515
516 void shll_i8r(int imm, RegisterID dst)
517 {
518 if (imm == 1)
519 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
520 else {
521 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
522 m_formatter.immediate8(imm);
523 }
524 }
525
526 void shll_CLr(RegisterID dst)
527 {
528 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
529 }
530
531#if PLATFORM(X86_64)
532 void sarq_CLr(RegisterID dst)
533 {
534 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
535 }
536
537 void sarq_i8r(int imm, RegisterID dst)
538 {
539 if (imm == 1)
540 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
541 else {
542 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
543 m_formatter.immediate8(imm);
544 }
545 }
546#endif
547
548 void imull_rr(RegisterID src, RegisterID dst)
549 {
550 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
551 }
552
553 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
554 {
555 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
556 m_formatter.immediate32(value);
557 }
558
559 void idivl_r(RegisterID dst)
560 {
561 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
562 }
563
564 // Comparisons:
565
566 void cmpl_rr(RegisterID src, RegisterID dst)
567 {
568 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
569 }
570
571 void cmpl_rm(RegisterID src, int offset, RegisterID base)
572 {
573 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
574 }
575
576 void cmpl_mr(int offset, RegisterID base, RegisterID src)
577 {
578 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
579 }
580
581 void cmpl_ir(int imm, RegisterID dst)
582 {
583 if (CAN_SIGN_EXTEND_8_32(imm)) {
584 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
585 m_formatter.immediate8(imm);
586 } else {
587 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
588 m_formatter.immediate32(imm);
589 }
590 }
591
592 void cmpl_ir_force32(int imm, RegisterID dst)
593 {
594 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
595 m_formatter.immediate32(imm);
596 }
597
598 void cmpl_im(int imm, int offset, RegisterID base)
599 {
600 if (CAN_SIGN_EXTEND_8_32(imm)) {
601 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
602 m_formatter.immediate8(imm);
603 } else {
604 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
605 m_formatter.immediate32(imm);
606 }
607 }
608
609 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
610 {
611 if (CAN_SIGN_EXTEND_8_32(imm)) {
612 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
613 m_formatter.immediate8(imm);
614 } else {
615 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
616 m_formatter.immediate32(imm);
617 }
618 }
619
620 void cmpl_im_force32(int imm, int offset, RegisterID base)
621 {
622 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
623 m_formatter.immediate32(imm);
624 }
625
626#if PLATFORM(X86_64)
627 void cmpq_rr(RegisterID src, RegisterID dst)
628 {
629 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
630 }
631
632 void cmpq_rm(RegisterID src, int offset, RegisterID base)
633 {
634 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
635 }
636
637 void cmpq_ir(int imm, RegisterID dst)
638 {
639 if (CAN_SIGN_EXTEND_8_32(imm)) {
640 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
641 m_formatter.immediate8(imm);
642 } else {
643 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
644 m_formatter.immediate32(imm);
645 }
646 }
647
648 void cmpq_im(int imm, int offset, RegisterID base)
649 {
650 if (CAN_SIGN_EXTEND_8_32(imm)) {
651 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
652 m_formatter.immediate8(imm);
653 } else {
654 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
655 m_formatter.immediate32(imm);
656 }
657 }
658
659 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
660 {
661 if (CAN_SIGN_EXTEND_8_32(imm)) {
662 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
663 m_formatter.immediate8(imm);
664 } else {
665 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
666 m_formatter.immediate32(imm);
667 }
668 }
669#else
670 void cmpl_rm(RegisterID reg, void* addr)
671 {
672 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
673 }
674
675 void cmpl_im(int imm, void* addr)
676 {
677 if (CAN_SIGN_EXTEND_8_32(imm)) {
678 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
679 m_formatter.immediate8(imm);
680 } else {
681 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
682 m_formatter.immediate32(imm);
683 }
684 }
685#endif
686
687 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
688 {
689 m_formatter.prefix(PRE_OPERAND_SIZE);
690 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
691 }
692
693 void testl_rr(RegisterID src, RegisterID dst)
694 {
695 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
696 }
697
698 void testl_i32r(int imm, RegisterID dst)
699 {
700 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
701 m_formatter.immediate32(imm);
702 }
703
704 void testl_i32m(int imm, int offset, RegisterID base)
705 {
706 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
707 m_formatter.immediate32(imm);
708 }
709
710 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
711 {
712 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
713 m_formatter.immediate32(imm);
714 }
715
716#if PLATFORM(X86_64)
717 void testq_rr(RegisterID src, RegisterID dst)
718 {
719 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
720 }
721
722 void testq_i32r(int imm, RegisterID dst)
723 {
724 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
725 m_formatter.immediate32(imm);
726 }
727
728 void testq_i32m(int imm, int offset, RegisterID base)
729 {
730 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
731 m_formatter.immediate32(imm);
732 }
733
734 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
735 {
736 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
737 m_formatter.immediate32(imm);
738 }
739#endif
740
741 void testb_i8r(int imm, RegisterID dst)
742 {
743 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
744 m_formatter.immediate8(imm);
745 }
746
747 void sete_r(RegisterID dst)
748 {
749 m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst);
750 }
751
752 void setz_r(RegisterID dst)
753 {
754 sete_r(dst);
755 }
756
757 void setne_r(RegisterID dst)
758 {
759 m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst);
760 }
761
762 void setnz_r(RegisterID dst)
763 {
764 setne_r(dst);
765 }
766
767 // Various move ops:
768
769 void cdq()
770 {
771 m_formatter.oneByteOp(OP_CDQ);
772 }
773
774 void xchgl_rr(RegisterID src, RegisterID dst)
775 {
776 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
777 }
778
779#if PLATFORM(X86_64)
780 void xchgq_rr(RegisterID src, RegisterID dst)
781 {
782 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
783 }
784#endif
785
786 void movl_rr(RegisterID src, RegisterID dst)
787 {
788 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
789 }
790
791 void movl_rm(RegisterID src, int offset, RegisterID base)
792 {
793 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
794 }
795
796 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
797 {
798 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
799 }
800
801 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
802 {
803 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
804 }
805
806 void movl_mEAX(void* addr)
807 {
808 m_formatter.oneByteOp(OP_MOV_EAXOv);
809#if PLATFORM(X86_64)
810 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
811#else
812 m_formatter.immediate32(reinterpret_cast<int>(addr));
813#endif
814 }
815
816 void movl_mr(int offset, RegisterID base, RegisterID dst)
817 {
818 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
819 }
820
821 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
822 {
823 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
824 }
825
826 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
827 {
828 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
829 }
830
831 void movl_i32r(int imm, RegisterID dst)
832 {
833 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
834 m_formatter.immediate32(imm);
835 }
836
837 void movl_i32m(int imm, int offset, RegisterID base)
838 {
839 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
840 m_formatter.immediate32(imm);
841 }
842
843 void movl_EAXm(void* addr)
844 {
845 m_formatter.oneByteOp(OP_MOV_OvEAX);
846#if PLATFORM(X86_64)
847 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
848#else
849 m_formatter.immediate32(reinterpret_cast<int>(addr));
850#endif
851 }
852
853#if PLATFORM(X86_64)
854 void movq_rr(RegisterID src, RegisterID dst)
855 {
856 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
857 }
858
859 void movq_rm(RegisterID src, int offset, RegisterID base)
860 {
861 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
862 }
863
864 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
865 {
866 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
867 }
868
869 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
870 {
871 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
872 }
873
874 void movq_mEAX(void* addr)
875 {
876 m_formatter.oneByteOp64(OP_MOV_EAXOv);
877 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
878 }
879
880 void movq_mr(int offset, RegisterID base, RegisterID dst)
881 {
882 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
883 }
884
885 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
886 {
887 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
888 }
889
890 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
891 {
892 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
893 }
894
895 void movq_i64r(int64_t imm, RegisterID dst)
896 {
897 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
898 m_formatter.immediate64(imm);
899 }
900
901 void movsxd_rr(RegisterID src, RegisterID dst)
902 {
903 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
904 }
905
906
907#else
908 void movl_mr(void* addr, RegisterID dst)
909 {
910 if (dst == X86::eax)
911 movl_mEAX(addr);
912 else
913 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
914 }
915
916 void movl_i32m(int imm, void* addr)
917 {
918 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
919 m_formatter.immediate32(imm);
920 }
921#endif
922
923 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
924 {
925 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
926 }
927
928 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
929 {
930 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
931 }
932
933 void movzbl_rr(RegisterID src, RegisterID dst)
934 {
935 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
936 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
937 // REX prefixes are defined to be silently ignored by the processor.
938 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
939 }
940
941 void leal_mr(int offset, RegisterID base, RegisterID dst)
942 {
943 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
944 }
945
946 // Flow control:
947
948 JmpSrc call()
949 {
950 m_formatter.oneByteOp(OP_CALL_rel32);
951 return m_formatter.immediateRel32();
952 }
953
954 JmpSrc call(RegisterID dst)
955 {
956 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
957 return JmpSrc(m_formatter.size());
958 }
959
960 JmpSrc jmp()
961 {
962 m_formatter.oneByteOp(OP_JMP_rel32);
963 return m_formatter.immediateRel32();
964 }
965
966 void jmp_r(RegisterID dst)
967 {
968 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
969 }
970
971 void jmp_m(int offset, RegisterID base)
972 {
973 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
974 }
975
976 JmpSrc jne()
977 {
978 m_formatter.twoByteOp(OP2_JNE_rel32);
979 return m_formatter.immediateRel32();
980 }
981
982 JmpSrc jnz()
983 {
984 return jne();
985 }
986
987 JmpSrc je()
988 {
989 m_formatter.twoByteOp(OP2_JE_rel32);
990 return m_formatter.immediateRel32();
991 }
992
993 JmpSrc jl()
994 {
995 m_formatter.twoByteOp(OP2_JL_rel32);
996 return m_formatter.immediateRel32();
997 }
998
999 JmpSrc jb()
1000 {
1001 m_formatter.twoByteOp(OP2_JB_rel32);
1002 return m_formatter.immediateRel32();
1003 }
1004
1005 JmpSrc jle()
1006 {
1007 m_formatter.twoByteOp(OP2_JLE_rel32);
1008 return m_formatter.immediateRel32();
1009 }
1010
1011 JmpSrc jbe()
1012 {
1013 m_formatter.twoByteOp(OP2_JBE_rel32);
1014 return m_formatter.immediateRel32();
1015 }
1016
1017 JmpSrc jge()
1018 {
1019 m_formatter.twoByteOp(OP2_JGE_rel32);
1020 return m_formatter.immediateRel32();
1021 }
1022
1023 JmpSrc jg()
1024 {
1025 m_formatter.twoByteOp(OP2_JG_rel32);
1026 return m_formatter.immediateRel32();
1027 }
1028
1029 JmpSrc ja()
1030 {
1031 m_formatter.twoByteOp(OP2_JA_rel32);
1032 return m_formatter.immediateRel32();
1033 }
1034
1035 JmpSrc jae()
1036 {
1037 m_formatter.twoByteOp(OP2_JAE_rel32);
1038 return m_formatter.immediateRel32();
1039 }
1040
1041 JmpSrc jo()
1042 {
1043 m_formatter.twoByteOp(OP2_JO_rel32);
1044 return m_formatter.immediateRel32();
1045 }
1046
1047 JmpSrc jp()
1048 {
1049 m_formatter.twoByteOp(OP2_JP_rel32);
1050 return m_formatter.immediateRel32();
1051 }
1052
1053 JmpSrc js()
1054 {
1055 m_formatter.twoByteOp(OP2_JS_rel32);
1056 return m_formatter.immediateRel32();
1057 }
1058
1059 // SSE operations:
1060
1061 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1062 {
1063 m_formatter.prefix(PRE_SSE_F2);
1064 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1065 }
1066
1067 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1068 {
1069 m_formatter.prefix(PRE_SSE_F2);
1070 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1071 }
1072
1073 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1074 {
1075 m_formatter.prefix(PRE_SSE_F2);
1076 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1077 }
1078
1079 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1080 {
1081 m_formatter.prefix(PRE_SSE_F2);
1082 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1083 }
1084
1085 void movd_rr(XMMRegisterID src, RegisterID dst)
1086 {
1087 m_formatter.prefix(PRE_SSE_66);
1088 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1089 }
1090
1091 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1092 {
1093 m_formatter.prefix(PRE_SSE_F2);
1094 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1095 }
1096
1097 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1098 {
1099 m_formatter.prefix(PRE_SSE_F2);
1100 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1101 }
1102
1103 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1104 {
1105 m_formatter.prefix(PRE_SSE_F2);
1106 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1107 }
1108
1109 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1110 {
1111 m_formatter.prefix(PRE_SSE_F2);
1112 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1113 }
1114
1115 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1116 {
1117 m_formatter.prefix(PRE_SSE_66);
1118 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1119 m_formatter.immediate8(whichWord);
1120 }
1121
1122 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1123 {
1124 m_formatter.prefix(PRE_SSE_F2);
1125 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1126 }
1127
1128 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1129 {
1130 m_formatter.prefix(PRE_SSE_F2);
1131 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1132 }
1133
1134 void ucomis_rr(XMMRegisterID src, XMMRegisterID dst)
1135 {
1136 m_formatter.prefix(PRE_SSE_66);
1137 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1138 }
1139
1140 // Misc instructions:
1141
1142 void int3()
1143 {
1144 m_formatter.oneByteOp(OP_INT3);
1145 }
1146
1147 void ret()
1148 {
1149 m_formatter.oneByteOp(OP_RET);
1150 }
1151
1152 void predictNotTaken()
1153 {
1154 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1155 }
1156
1157 // Assembler admin methods:
1158
1159 JmpDst label()
1160 {
1161 return JmpDst(m_formatter.size());
1162 }
1163
1164 JmpDst align(int alignment)
1165 {
1166 while (!m_formatter.isAligned(alignment))
1167 m_formatter.oneByteOp(OP_HLT);
1168
1169 return label();
1170 }
1171
1172 // Linking & patching:
1173
1174 void link(JmpSrc from, JmpDst to)
1175 {
1176 ASSERT(to.m_offset != -1);
1177 ASSERT(from.m_offset != -1);
1178
1179 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
1180 }
1181
1182 static void patchAddress(void* code, JmpDst position, void* value)
1183 {
1184 ASSERT(position.m_offset != -1);
1185
1186 reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
1187 }
1188
1189 static void link(void* code, JmpSrc from, void* to)
1190 {
1191 ASSERT(from.m_offset != -1);
1192
1193 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
1194 }
1195
1196 static void* getRelocatedAddress(void* code, JmpSrc jump)
1197 {
1198 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1199 }
1200
1201 static void* getRelocatedAddress(void* code, JmpDst destination)
1202 {
1203 ASSERT(destination.m_offset != -1);
1204
1205 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1206 }
1207
1208 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1209 {
1210 return dst.m_offset - src.m_offset;
1211 }
1212
1213 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1214 {
1215 return dst.m_offset - src.m_offset;
1216 }
1217
1218 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1219 {
1220 return dst.m_offset - src.m_offset;
1221 }
1222
1223 static void patchImmediate(intptr_t where, int32_t value)
1224 {
1225 reinterpret_cast<int32_t*>(where)[-1] = value;
1226 }
1227
1228 static void patchPointer(intptr_t where, intptr_t value)
1229 {
1230 reinterpret_cast<intptr_t*>(where)[-1] = value;
1231 }
1232
1233 static void patchBranchOffset(intptr_t where, void* destination)
1234 {
1235 intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
1236 ASSERT(offset == static_cast<int32_t>(offset));
1237 reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
1238 }
1239
1240 void* executableCopy(ExecutablePool* allocator)
1241 {
1242 void* copy = m_formatter.executableCopy(allocator);
1243 ASSERT(copy);
1244 return copy;
1245 }
1246
1247private:
1248
1249 class X86InstructionFormatter {
1250
1251 static const int maxInstructionSize = 16;
1252
1253 public:
1254
1255 // Legacy prefix bytes:
1256 //
1257 // These are emmitted prior to the instruction.
1258
1259 void prefix(OneByteOpcodeID pre)
1260 {
1261 m_buffer.putByte(pre);
1262 }
1263
1264 // Word-sized operands / no operand instruction formatters.
1265 //
1266 // In addition to the opcode, the following operand permutations are supported:
1267 // * None - instruction takes no operands.
1268 // * One register - the low three bits of the RegisterID are added into the opcode.
1269 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1270 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1271 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1272 //
1273 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1274 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1275 //
1276 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1277
1278 void oneByteOp(OneByteOpcodeID opcode)
1279 {
1280 m_buffer.ensureSpace(maxInstructionSize);
1281 m_buffer.putByteUnchecked(opcode);
1282 }
1283
1284 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1285 {
1286 m_buffer.ensureSpace(maxInstructionSize);
1287 emitRexIfNeeded(0, 0, reg);
1288 m_buffer.putByteUnchecked(opcode + (reg & 7));
1289 }
1290
1291 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1292 {
1293 m_buffer.ensureSpace(maxInstructionSize);
1294 emitRexIfNeeded(reg, 0, rm);
1295 m_buffer.putByteUnchecked(opcode);
1296 registerModRM(reg, rm);
1297 }
1298
1299 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1300 {
1301 m_buffer.ensureSpace(maxInstructionSize);
1302 emitRexIfNeeded(reg, 0, base);
1303 m_buffer.putByteUnchecked(opcode);
1304 memoryModRM(reg, base, offset);
1305 }
1306
1307 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1308 {
1309 m_buffer.ensureSpace(maxInstructionSize);
1310 emitRexIfNeeded(reg, 0, base);
1311 m_buffer.putByteUnchecked(opcode);
1312 memoryModRM_disp32(reg, base, offset);
1313 }
1314
1315 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1316 {
1317 m_buffer.ensureSpace(maxInstructionSize);
1318 emitRexIfNeeded(reg, index, base);
1319 m_buffer.putByteUnchecked(opcode);
1320 memoryModRM(reg, base, index, scale, offset);
1321 }
1322
1323#if !PLATFORM(X86_64)
1324 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1325 {
1326 m_buffer.ensureSpace(maxInstructionSize);
1327 m_buffer.putByteUnchecked(opcode);
1328 memoryModRM(reg, address);
1329 }
1330#endif
1331
1332 void twoByteOp(TwoByteOpcodeID opcode)
1333 {
1334 m_buffer.ensureSpace(maxInstructionSize);
1335 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1336 m_buffer.putByteUnchecked(opcode);
1337 }
1338
1339 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1340 {
1341 m_buffer.ensureSpace(maxInstructionSize);
1342 emitRexIfNeeded(reg, 0, rm);
1343 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1344 m_buffer.putByteUnchecked(opcode);
1345 registerModRM(reg, rm);
1346 }
1347
1348 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1349 {
1350 m_buffer.ensureSpace(maxInstructionSize);
1351 emitRexIfNeeded(reg, 0, base);
1352 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1353 m_buffer.putByteUnchecked(opcode);
1354 memoryModRM(reg, base, offset);
1355 }
1356
1357 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1358 {
1359 m_buffer.ensureSpace(maxInstructionSize);
1360 emitRexIfNeeded(reg, index, base);
1361 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1362 m_buffer.putByteUnchecked(opcode);
1363 memoryModRM(reg, base, index, scale, offset);
1364 }
1365
1366#if PLATFORM(X86_64)
1367 // Quad-word-sized operands:
1368 //
1369 // Used to format 64-bit operantions, planting a REX.w prefix.
1370 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1371 // the normal (non-'64'-postfixed) formatters should be used.
1372
1373 void oneByteOp64(OneByteOpcodeID opcode)
1374 {
1375 m_buffer.ensureSpace(maxInstructionSize);
1376 emitRexW(0, 0, 0);
1377 m_buffer.putByteUnchecked(opcode);
1378 }
1379
1380 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1381 {
1382 m_buffer.ensureSpace(maxInstructionSize);
1383 emitRexW(0, 0, reg);
1384 m_buffer.putByteUnchecked(opcode + (reg & 7));
1385 }
1386
1387 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1388 {
1389 m_buffer.ensureSpace(maxInstructionSize);
1390 emitRexW(reg, 0, rm);
1391 m_buffer.putByteUnchecked(opcode);
1392 registerModRM(reg, rm);
1393 }
1394
1395 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1396 {
1397 m_buffer.ensureSpace(maxInstructionSize);
1398 emitRexW(reg, 0, base);
1399 m_buffer.putByteUnchecked(opcode);
1400 memoryModRM(reg, base, offset);
1401 }
1402
1403 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1404 {
1405 m_buffer.ensureSpace(maxInstructionSize);
1406 emitRexW(reg, 0, base);
1407 m_buffer.putByteUnchecked(opcode);
1408 memoryModRM_disp32(reg, base, offset);
1409 }
1410
1411 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1412 {
1413 m_buffer.ensureSpace(maxInstructionSize);
1414 emitRexW(reg, index, base);
1415 m_buffer.putByteUnchecked(opcode);
1416 memoryModRM(reg, base, index, scale, offset);
1417 }
1418#endif
1419
1420 // Byte-operands:
1421 //
1422 // These methods format byte operations. Byte operations differ from the normal
1423 // formatters in the circumstances under which they will decide to emit REX prefixes.
1424 // These should be used where any register operand signifies a byte register.
1425 //
1426 // The disctinction is due to the handling of register numbers in the range 4..7 on
1427 // x86-64. These register numbers may either represent the second byte of the first
1428 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1429 //
1430 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1431 // be accessed where a REX prefix is present), these are likely best treated as
1432 // deprecated. In order to ensure the correct registers spl..dil are selected a
1433 // REX prefix will be emitted for any byte register operand in the range 4..15.
1434 //
1435 // These formatters may be used in instructions where a mix of operand sizes, in which
1436 // case an unnecessary REX will be emitted, for example:
1437 // movzbl %al, %edi
1438 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1439 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1440 // be silently ignored by the processor.
1441 //
1442 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1443 // is provided to check byte register operands.
1444
1445 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1446 {
1447 m_buffer.ensureSpace(maxInstructionSize);
1448 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1449 m_buffer.putByteUnchecked(opcode);
1450 registerModRM(groupOp, rm);
1451 }
1452
1453 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1454 {
1455 m_buffer.ensureSpace(maxInstructionSize);
1456 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1457 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1458 m_buffer.putByteUnchecked(opcode);
1459 registerModRM(reg, rm);
1460 }
1461
1462 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1463 {
1464 m_buffer.ensureSpace(maxInstructionSize);
1465 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1466 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1467 m_buffer.putByteUnchecked(opcode);
1468 registerModRM(groupOp, rm);
1469 }
1470
1471 // Immediates:
1472 //
1473 // An immedaite should be appended where appropriate after an op has been emitted.
1474 // The writes are unchecked since the opcode formatters above will have ensured space.
1475
1476 void immediate8(int imm)
1477 {
1478 m_buffer.putByteUnchecked(imm);
1479 }
1480
1481 void immediate32(int imm)
1482 {
1483 m_buffer.putIntUnchecked(imm);
1484 }
1485
1486 void immediate64(int64_t imm)
1487 {
1488 m_buffer.putInt64Unchecked(imm);
1489 }
1490
1491 JmpSrc immediateRel32()
1492 {
1493 m_buffer.putIntUnchecked(0);
1494 return JmpSrc(m_buffer.size());
1495 }
1496
1497 // Administrative methods:
1498
1499 size_t size() const { return m_buffer.size(); }
1500 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1501 void* data() const { return m_buffer.data(); }
1502 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1503
1504 private:
1505
1506 // Internals; ModRm and REX formatters.
1507
1508 static const RegisterID noBase = X86::ebp;
1509 static const RegisterID hasSib = X86::esp;
1510 static const RegisterID noIndex = X86::esp;
1511#if PLATFORM(X86_64)
1512 static const RegisterID noBase2 = X86::r13;
1513 static const RegisterID hasSib2 = X86::r12;
1514
1515 // Registers r8 & above require a REX prefixe.
1516 inline bool regRequiresRex(int reg)
1517 {
1518 return (reg >= X86::r8);
1519 }
1520
1521 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1522 inline bool byteRegRequiresRex(int reg)
1523 {
1524 return (reg >= X86::esp);
1525 }
1526
1527 // Format a REX prefix byte.
1528 inline void emitRex(bool w, int r, int x, int b)
1529 {
1530 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1531 }
1532
1533 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1534 inline void emitRexW(int r, int x, int b)
1535 {
1536 emitRex(true, r, x, b);
1537 }
1538
1539 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1540 // regRequiresRex() to check other registers (i.e. address base & index).
1541 inline void emitRexIf(bool condition, int r, int x, int b)
1542 {
1543 if (condition) emitRex(false, r, x, b);
1544 }
1545
1546 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1547 inline void emitRexIfNeeded(int r, int x, int b)
1548 {
1549 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1550 }
1551#else
1552 // No REX prefix bytes on 32-bit x86.
1553 inline bool regRequiresRex(int) { return false; }
1554 inline bool byteRegRequiresRex(int) { return false; }
1555 inline void emitRexIf(bool, int, int, int) {}
1556 inline void emitRexIfNeeded(int, int, int) {}
1557#endif
1558
1559 enum ModRmMode {
1560 ModRmMemoryNoDisp,
1561 ModRmMemoryDisp8,
1562 ModRmMemoryDisp32,
1563 ModRmRegister,
1564 };
1565
1566 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1567 {
1568 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1569 }
1570
1571 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1572 {
1573 ASSERT(mode != ModRmRegister);
1574
1575 // Encode sacle of (1,2,4,8) -> (0,1,2,3)
1576 int shift = 0;
1577 while (scale >>= 1)
1578 shift++;
1579
1580 putModRm(mode, reg, hasSib);
1581 m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7));
1582 }
1583
1584 void registerModRM(int reg, RegisterID rm)
1585 {
1586 putModRm(ModRmRegister, reg, rm);
1587 }
1588
1589 void memoryModRM(int reg, RegisterID base, int offset)
1590 {
1591 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1592#if PLATFORM(X86_64)
1593 if ((base == hasSib) || (base == hasSib2)) {
1594#else
1595 if (base == hasSib) {
1596#endif
1597 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1598 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1599 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1600 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1601 m_buffer.putByteUnchecked(offset);
1602 } else {
1603 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1604 m_buffer.putIntUnchecked(offset);
1605 }
1606 } else {
1607#if PLATFORM(X86_64)
1608 if (!offset && (base != noBase) && (base != noBase2))
1609#else
1610 if (!offset && (base != noBase))
1611#endif
1612 putModRm(ModRmMemoryNoDisp, reg, base);
1613 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1614 putModRm(ModRmMemoryDisp8, reg, base);
1615 m_buffer.putByteUnchecked(offset);
1616 } else {
1617 putModRm(ModRmMemoryDisp32, reg, base);
1618 m_buffer.putIntUnchecked(offset);
1619 }
1620 }
1621 }
1622
1623 void memoryModRM_disp32(int reg, RegisterID base, int offset)
1624 {
1625 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1626#if PLATFORM(X86_64)
1627 if ((base == hasSib) || (base == hasSib2)) {
1628#else
1629 if (base == hasSib) {
1630#endif
1631 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1632 m_buffer.putIntUnchecked(offset);
1633 } else {
1634 putModRm(ModRmMemoryDisp32, reg, base);
1635 m_buffer.putIntUnchecked(offset);
1636 }
1637 }
1638
1639 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
1640 {
1641 ASSERT(index != noIndex);
1642
1643#if PLATFORM(X86_64)
1644 if (!offset && (base != noBase) && (base != noBase2))
1645#else
1646 if (!offset && (base != noBase))
1647#endif
1648 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
1649 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1650 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
1651 m_buffer.putByteUnchecked(offset);
1652 } else {
1653 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
1654 m_buffer.putIntUnchecked(offset);
1655 }
1656 }
1657
1658#if !PLATFORM(X86_64)
1659 void memoryModRM(int reg, void* address)
1660 {
1661 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1662 putModRm(ModRmMemoryNoDisp, reg, noBase);
1663 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
1664 }
1665#endif
1666
1667 AssemblerBuffer m_buffer;
1668 } m_formatter;
1669};
1670
1671} // namespace JSC
1672
1673#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1674
1675#endif // X86Assembler_h
Note: See TracBrowser for help on using the repository browser.