source: webkit/trunk/JavaScriptCore/assembler/ARMv7Assembler.h@ 46832

Last change on this file since 46832 was 46247, checked in by [email protected], 16 years ago

2009-07-22 Gavin Barraclough <[email protected]>

Reviewed by Sam Weinig.

With ENABLE(ASSEMBLER_WX_EXCLUSIVE), only change permissions once per repatch event.
( https://bugs.webkit.org/show_bug.cgi?id=27564 )

Currently we change permissions forwards and backwards for each instruction modified,
instead we should only change permissions once per complete repatching event.

2.5% progression running with ENABLE(ASSEMBLER_WX_EXCLUSIVE) enabled,
which recoups 1/3 of the penalty of running with this mode enabled.

  • assembler/ARMAssembler.cpp: (JSC::ARMAssembler::linkBranch):
    • Replace usage of MakeWritable with cacheFlush.


  • assembler/ARMAssembler.h: (JSC::ARMAssembler::patchPointerInternal): (JSC::ARMAssembler::repatchLoadPtrToLEA):
    • Replace usage of MakeWritable with cacheFlush.
  • assembler/ARMv7Assembler.h: (JSC::ARMv7Assembler::relinkJump): (JSC::ARMv7Assembler::relinkCall): (JSC::ARMv7Assembler::repatchInt32): (JSC::ARMv7Assembler::repatchPointer): (JSC::ARMv7Assembler::repatchLoadPtrToLEA): (JSC::ARMv7Assembler::setInt32):
    • Replace usage of MakeWritable with cacheFlush.
  • assembler/LinkBuffer.h: (JSC::LinkBuffer::performFinalization):
    • Make explicit call to cacheFlush.
  • assembler/MacroAssemblerCodeRef.h: (JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
    • Make size always available.
  • assembler/RepatchBuffer.h: (JSC::RepatchBuffer::RepatchBuffer): (JSC::RepatchBuffer::~RepatchBuffer):
    • Add calls to MakeWritable & makeExecutable.
  • assembler/X86Assembler.h: (JSC::X86Assembler::relinkJump): (JSC::X86Assembler::relinkCall): (JSC::X86Assembler::repatchInt32): (JSC::X86Assembler::repatchPointer): (JSC::X86Assembler::repatchLoadPtrToLEA):
    • Remove usage of MakeWritable.
  • bytecode/CodeBlock.h: (JSC::CodeBlock::getJITCode):
    • Provide access to CodeBlock's JITCode.
  • jit/ExecutableAllocator.h: (JSC::ExecutableAllocator::makeExecutable): (JSC::ExecutableAllocator::cacheFlush):
    • Remove MakeWritable, make cacheFlush public.
  • jit/JIT.cpp: (JSC::ctiPatchNearCallByReturnAddress): (JSC::ctiPatchCallByReturnAddress): (JSC::JIT::privateCompile): (JSC::JIT::unlinkCall): (JSC::JIT::linkCall):
    • Add CodeBlock argument to RepatchBuffer.
  • jit/JIT.h:
    • Pass CodeBlock argument for use by RepatchBuffer.
  • jit/JITCode.h: (JSC::JITCode::start): (JSC::JITCode::size):
    • Provide access to code start & size.
  • jit/JITPropertyAccess.cpp: (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::patchGetByIdSelf): (JSC::JIT::patchMethodCallProto): (JSC::JIT::patchPutByIdReplace): (JSC::JIT::privateCompilePatchGetArrayLength): (JSC::JIT::privateCompileGetByIdProto): (JSC::JIT::privateCompileGetByIdSelfList): (JSC::JIT::privateCompileGetByIdProtoList): (JSC::JIT::privateCompileGetByIdChainList): (JSC::JIT::privateCompileGetByIdChain):
    • Add CodeBlock argument to RepatchBuffer.
  • jit/JITStubs.cpp: (JSC::JITThunks::tryCachePutByID): (JSC::JITThunks::tryCacheGetByID): (JSC::JITStubs::DEFINE_STUB_FUNCTION):
    • Pass CodeBlock argument for use by RepatchBuffer.
File size: 54.8 KB
Line 
1/*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef ARMAssembler_h
27#define ARMAssembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
32
33#include "AssemblerBuffer.h"
34#include <wtf/Assertions.h>
35#include <wtf/Vector.h>
36#include <stdint.h>
37
38namespace JSC {
39
40namespace ARM {
41 typedef enum {
42 r0,
43 r1,
44 r2,
45 r3,
46 r4,
47 r5,
48 r6,
49 r7, wr = r7, // thumb work register
50 r8,
51 r9, sb = r9, // static base
52 r10, sl = r10, // stack limit
53 r11, fp = r11, // frame pointer
54 r12, ip = r12,
55 r13, sp = r13,
56 r14, lr = r14,
57 r15, pc = r15,
58 } RegisterID;
59
60 // s0 == d0 == q0
61 // s4 == d2 == q1
62 // etc
63 typedef enum {
64 s0 = 0,
65 s1 = 1,
66 s2 = 2,
67 s3 = 3,
68 s4 = 4,
69 s5 = 5,
70 s6 = 6,
71 s7 = 7,
72 s8 = 8,
73 s9 = 9,
74 s10 = 10,
75 s11 = 11,
76 s12 = 12,
77 s13 = 13,
78 s14 = 14,
79 s15 = 15,
80 s16 = 16,
81 s17 = 17,
82 s18 = 18,
83 s19 = 19,
84 s20 = 20,
85 s21 = 21,
86 s22 = 22,
87 s23 = 23,
88 s24 = 24,
89 s25 = 25,
90 s26 = 26,
91 s27 = 27,
92 s28 = 28,
93 s29 = 29,
94 s30 = 30,
95 s31 = 31,
96 d0 = 0 << 1,
97 d1 = 1 << 1,
98 d2 = 2 << 1,
99 d3 = 3 << 1,
100 d4 = 4 << 1,
101 d5 = 5 << 1,
102 d6 = 6 << 1,
103 d7 = 7 << 1,
104 d8 = 8 << 1,
105 d9 = 9 << 1,
106 d10 = 10 << 1,
107 d11 = 11 << 1,
108 d12 = 12 << 1,
109 d13 = 13 << 1,
110 d14 = 14 << 1,
111 d15 = 15 << 1,
112 d16 = 16 << 1,
113 d17 = 17 << 1,
114 d18 = 18 << 1,
115 d19 = 19 << 1,
116 d20 = 20 << 1,
117 d21 = 21 << 1,
118 d22 = 22 << 1,
119 d23 = 23 << 1,
120 d24 = 24 << 1,
121 d25 = 25 << 1,
122 d26 = 26 << 1,
123 d27 = 27 << 1,
124 d28 = 28 << 1,
125 d29 = 29 << 1,
126 d30 = 30 << 1,
127 d31 = 31 << 1,
128 q0 = 0 << 2,
129 q1 = 1 << 2,
130 q2 = 2 << 2,
131 q3 = 3 << 2,
132 q4 = 4 << 2,
133 q5 = 5 << 2,
134 q6 = 6 << 2,
135 q7 = 7 << 2,
136 q8 = 8 << 2,
137 q9 = 9 << 2,
138 q10 = 10 << 2,
139 q11 = 11 << 2,
140 q12 = 12 << 2,
141 q13 = 13 << 2,
142 q14 = 14 << 2,
143 q15 = 15 << 2,
144 q16 = 16 << 2,
145 q17 = 17 << 2,
146 q18 = 18 << 2,
147 q19 = 19 << 2,
148 q20 = 20 << 2,
149 q21 = 21 << 2,
150 q22 = 22 << 2,
151 q23 = 23 << 2,
152 q24 = 24 << 2,
153 q25 = 25 << 2,
154 q26 = 26 << 2,
155 q27 = 27 << 2,
156 q28 = 28 << 2,
157 q29 = 29 << 2,
158 q30 = 30 << 2,
159 q31 = 31 << 2,
160 } FPRegisterID;
161}
162
163class ARMv7Assembler;
164class ARMThumbImmediate {
165 friend class ARMv7Assembler;
166
167 typedef uint8_t ThumbImmediateType;
168 static const ThumbImmediateType TypeInvalid = 0;
169 static const ThumbImmediateType TypeEncoded = 1;
170 static const ThumbImmediateType TypeUInt16 = 2;
171
172 typedef union {
173 int16_t asInt;
174 struct {
175 unsigned imm8 : 8;
176 unsigned imm3 : 3;
177 unsigned i : 1;
178 unsigned imm4 : 4;
179 };
180 // If this is an encoded immediate, then it may describe a shift, or a pattern.
181 struct {
182 unsigned shiftValue7 : 7;
183 unsigned shiftAmount : 5;
184 };
185 struct {
186 unsigned immediate : 8;
187 unsigned pattern : 4;
188 };
189 } ThumbImmediateValue;
190
191 // byte0 contains least significant bit; not using an array to make client code endian agnostic.
192 typedef union {
193 int32_t asInt;
194 struct {
195 uint8_t byte0;
196 uint8_t byte1;
197 uint8_t byte2;
198 uint8_t byte3;
199 };
200 } PatternBytes;
201
202 ALWAYS_INLINE static int32_t countLeadingZerosPartial(uint32_t& value, int32_t& zeros, const int N)
203 {
204 if (value & ~((1<<N)-1)) /* check for any of the top N bits (of 2N bits) are set */ \
205 value >>= N; /* if any were set, lose the bottom N */ \
206 else /* if none of the top N bits are set, */ \
207 zeros += N; /* then we have identified N leading zeros */
208 }
209
210 static int32_t countLeadingZeros(uint32_t value)
211 {
212 if (!value)
213 return 32;
214
215 int32_t zeros = 0;
216 countLeadingZerosPartial(value, zeros, 16);
217 countLeadingZerosPartial(value, zeros, 8);
218 countLeadingZerosPartial(value, zeros, 4);
219 countLeadingZerosPartial(value, zeros, 2);
220 countLeadingZerosPartial(value, zeros, 1);
221 return zeros;
222 }
223
224 ARMThumbImmediate()
225 : m_type(TypeInvalid)
226 {
227 m_value.asInt = 0;
228 }
229
230 ARMThumbImmediate(ThumbImmediateType type, ThumbImmediateValue value)
231 : m_type(type)
232 , m_value(value)
233 {
234 }
235
236 ARMThumbImmediate(ThumbImmediateType type, uint16_t value)
237 : m_type(TypeUInt16)
238 {
239 m_value.asInt = value;
240 }
241
242public:
243 static ARMThumbImmediate makeEncodedImm(uint32_t value)
244 {
245 ThumbImmediateValue encoding;
246 encoding.asInt = 0;
247
248 // okay, these are easy.
249 if (value < 256) {
250 encoding.immediate = value;
251 encoding.pattern = 0;
252 return ARMThumbImmediate(TypeEncoded, encoding);
253 }
254
255 int32_t leadingZeros = countLeadingZeros(value);
256 // if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
257 ASSERT(leadingZeros < 24);
258
259 // Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
260 // Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
261 // zero. count(B) == 8, so the count of bits to be checked is 24 - count(Z).
262 int32_t rightShiftAmount = 24 - leadingZeros;
263 if (value == ((value >> rightShiftAmount) << rightShiftAmount)) {
264 // Shift the value down to the low byte position. The assign to
265 // shiftValue7 drops the implicit top bit.
266 encoding.shiftValue7 = value >> rightShiftAmount;
267 // The endoded shift amount is the magnitude of a right rotate.
268 encoding.shiftAmount = 8 + leadingZeros;
269 return ARMThumbImmediate(TypeEncoded, encoding);
270 }
271
272 PatternBytes bytes;
273 bytes.asInt = value;
274
275 if ((bytes.byte0 == bytes.byte1) && (bytes.byte0 == bytes.byte2) && (bytes.byte0 == bytes.byte3)) {
276 encoding.immediate = bytes.byte0;
277 encoding.pattern = 3;
278 return ARMThumbImmediate(TypeEncoded, encoding);
279 }
280
281 if ((bytes.byte0 == bytes.byte2) && !(bytes.byte1 | bytes.byte3)) {
282 encoding.immediate = bytes.byte0;
283 encoding.pattern = 1;
284 return ARMThumbImmediate(TypeEncoded, encoding);
285 }
286
287 if ((bytes.byte1 == bytes.byte3) && !(bytes.byte0 | bytes.byte2)) {
288 encoding.immediate = bytes.byte0;
289 encoding.pattern = 2;
290 return ARMThumbImmediate(TypeEncoded, encoding);
291 }
292
293 return ARMThumbImmediate();
294 }
295
296 static ARMThumbImmediate makeUInt12(int32_t value)
297 {
298 return (!(value & 0xfffff000))
299 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
300 : ARMThumbImmediate();
301 }
302
303 static ARMThumbImmediate makeUInt12OrEncodedImm(int32_t value)
304 {
305 // If this is not a 12-bit unsigned it, try making an encoded immediate.
306 return (!(value & 0xfffff000))
307 ? ARMThumbImmediate(TypeUInt16, (uint16_t)value)
308 : makeEncodedImm(value);
309 }
310
311 // The 'make' methods, above, return a !isValid() value if the argument
312 // cannot be represented as the requested type. This methods is called
313 // 'get' since the argument can always be represented.
314 static ARMThumbImmediate makeUInt16(uint16_t value)
315 {
316 return ARMThumbImmediate(TypeUInt16, value);
317 }
318
319 bool isValid()
320 {
321 return m_type != TypeInvalid;
322 }
323
324 // These methods rely on the format of encoded byte values.
325 bool isUInt3() { return !(m_value.asInt & 0xfff8); }
326 bool isUInt4() { return !(m_value.asInt & 0xfff0); }
327 bool isUInt5() { return !(m_value.asInt & 0xffe0); }
328 bool isUInt6() { return !(m_value.asInt & 0xffc0); }
329 bool isUInt7() { return !(m_value.asInt & 0xff80); }
330 bool isUInt8() { return !(m_value.asInt & 0xff00); }
331 bool isUInt9() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfe00); }
332 bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
333 bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
334 bool isUInt16() { return m_type == TypeUInt16; }
335 uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
336 uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
337 uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
338 uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
339 uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
340 uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
341 uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
342 uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
343 uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
344 uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
345
346 bool isEncodedImm() { return m_type == TypeEncoded; }
347
348private:
349 ThumbImmediateType m_type;
350 ThumbImmediateValue m_value;
351};
352
353
354typedef enum {
355 SRType_LSL,
356 SRType_LSR,
357 SRType_ASR,
358 SRType_ROR,
359
360 SRType_RRX = SRType_ROR
361} ARMShiftType;
362
363class ARMv7Assembler;
364class ShiftTypeAndAmount {
365 friend class ARMv7Assembler;
366
367public:
368 ShiftTypeAndAmount()
369 {
370 m_u.type = (ARMShiftType)0;
371 m_u.amount = 0;
372 }
373
374 ShiftTypeAndAmount(ARMShiftType type, unsigned amount)
375 {
376 m_u.type = type;
377 m_u.amount = amount & 31;
378 }
379
380 unsigned lo4() { return m_u.lo4; }
381 unsigned hi4() { return m_u.hi4; }
382
383private:
384 union {
385 struct {
386 unsigned lo4 : 4;
387 unsigned hi4 : 4;
388 };
389 struct {
390 unsigned type : 2;
391 unsigned amount : 5;
392 };
393 } m_u;
394};
395
396
397/*
398Some features of the Thumb instruction set are deprecated in ARMv7. Deprecated features affecting
399instructions supported by ARMv7-M are as follows:
400• use of the PC as <Rd> or <Rm> in a 16-bit ADD (SP plus register) instruction
401• use of the SP as <Rm> in a 16-bit ADD (SP plus register) instruction
402• use of the SP as <Rm> in a 16-bit CMP (register) instruction
403• use of MOV (register) instructions in which <Rd> is the SP or PC and <Rm> is also the SP or PC.
404• use of <Rn> as the lowest-numbered register in the register list of a 16-bit STM instruction with base
405register writeback
406*/
407
408class ARMv7Assembler {
409public:
410 typedef ARM::RegisterID RegisterID;
411 typedef ARM::FPRegisterID FPRegisterID;
412
413 // (HS, LO, HI, LS) -> (AE, B, A, BE)
414 // (VS, VC) -> (O, NO)
415 typedef enum {
416 ConditionEQ,
417 ConditionNE,
418 ConditionHS,
419 ConditionLO,
420 ConditionMI,
421 ConditionPL,
422 ConditionVS,
423 ConditionVC,
424 ConditionHI,
425 ConditionLS,
426 ConditionGE,
427 ConditionLT,
428 ConditionGT,
429 ConditionLE,
430 ConditionAL,
431
432 ConditionCS = ConditionHS,
433 ConditionCC = ConditionLO,
434 } Condition;
435
436 class JmpSrc {
437 friend class ARMv7Assembler;
438 friend class ARMInstructionFormatter;
439 public:
440 JmpSrc()
441 : m_offset(-1)
442 {
443 }
444
445 void enableLatePatch() { }
446 private:
447 JmpSrc(int offset)
448 : m_offset(offset)
449 {
450 }
451
452 int m_offset;
453 };
454
455 class JmpDst {
456 friend class ARMv7Assembler;
457 friend class ARMInstructionFormatter;
458 public:
459 JmpDst()
460 : m_offset(-1)
461 , m_used(false)
462 {
463 }
464
465 bool isUsed() const { return m_used; }
466 void used() { m_used = true; }
467 private:
468 JmpDst(int offset)
469 : m_offset(offset)
470 , m_used(false)
471 {
472 ASSERT(m_offset == offset);
473 }
474
475 int m_offset : 31;
476 int m_used : 1;
477 };
478
479private:
480
481 // ARMv7, Appx-A.6.3
482 bool BadReg(RegisterID reg)
483 {
484 return (reg == ARM::sp) || (reg == ARM::pc);
485 }
486
487 bool isSingleRegister(FPRegisterID reg)
488 {
489 // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
490 return !(reg & ~31);
491 }
492
493 bool isDoubleRegister(FPRegisterID reg)
494 {
495 // Check that the high bit isn't set (q16+), and that the low bit isn't (s1, s3, etc).
496 return !(reg & ~(31 << 1));
497 }
498
499 bool isQuadRegister(FPRegisterID reg)
500 {
501 return !(reg & ~(31 << 2));
502 }
503
504 uint32_t singleRegisterNum(FPRegisterID reg)
505 {
506 ASSERT(isSingleRegister(reg));
507 return reg;
508 }
509
510 uint32_t doubleRegisterNum(FPRegisterID reg)
511 {
512 ASSERT(isDoubleRegister(reg));
513 return reg >> 1;
514 }
515
516 uint32_t quadRegisterNum(FPRegisterID reg)
517 {
518 ASSERT(isQuadRegister(reg));
519 return reg >> 2;
520 }
521
522 uint32_t singleRegisterMask(FPRegisterID rd, int highBitsShift, int lowBitShift)
523 {
524 uint32_t rdNum = singleRegisterNum(rd);
525 uint32_t rdMask = (rdNum >> 1) << highBitsShift;
526 if (rdNum & 1)
527 rdMask |= 1 << lowBitShift;
528 return rdMask;
529 }
530
531 uint32_t doubleRegisterMask(FPRegisterID rd, int highBitShift, int lowBitsShift)
532 {
533 uint32_t rdNum = doubleRegisterNum(rd);
534 uint32_t rdMask = (rdNum & 0xf) << lowBitsShift;
535 if (rdNum & 16)
536 rdMask |= 1 << highBitShift;
537 return rdMask;
538 }
539
540 typedef enum {
541 OP_ADD_reg_T1 = 0x1800,
542 OP_ADD_S_reg_T1 = 0x1800,
543 OP_SUB_reg_T1 = 0x1A00,
544 OP_SUB_S_reg_T1 = 0x1A00,
545 OP_ADD_imm_T1 = 0x1C00,
546 OP_ADD_S_imm_T1 = 0x1C00,
547 OP_SUB_imm_T1 = 0x1E00,
548 OP_SUB_S_imm_T1 = 0x1E00,
549 OP_MOV_imm_T1 = 0x2000,
550 OP_CMP_imm_T1 = 0x2800,
551 OP_ADD_imm_T2 = 0x3000,
552 OP_ADD_S_imm_T2 = 0x3000,
553 OP_SUB_imm_T2 = 0x3800,
554 OP_SUB_S_imm_T2 = 0x3800,
555 OP_AND_reg_T1 = 0x4000,
556 OP_EOR_reg_T1 = 0x4040,
557 OP_TST_reg_T1 = 0x4200,
558 OP_CMP_reg_T1 = 0x4280,
559 OP_ORR_reg_T1 = 0x4300,
560 OP_MVN_reg_T1 = 0x43C0,
561 OP_ADD_reg_T2 = 0x4400,
562 OP_MOV_reg_T1 = 0x4600,
563 OP_BLX = 0x4700,
564 OP_BX = 0x4700,
565 OP_LDRH_reg_T1 = 0x5A00,
566 OP_STR_reg_T1 = 0x5000,
567 OP_LDR_reg_T1 = 0x5800,
568 OP_STR_imm_T1 = 0x6000,
569 OP_LDR_imm_T1 = 0x6800,
570 OP_LDRH_imm_T1 = 0x8800,
571 OP_STR_imm_T2 = 0x9000,
572 OP_LDR_imm_T2 = 0x9800,
573 OP_ADD_SP_imm_T1 = 0xA800,
574 OP_ADD_SP_imm_T2 = 0xB000,
575 OP_SUB_SP_imm_T1 = 0xB080,
576 OP_BKPT = 0xBE00,
577 OP_IT = 0xBF00,
578 } OpcodeID;
579
580 typedef enum {
581 OP_AND_reg_T2 = 0xEA00,
582 OP_TST_reg_T2 = 0xEA10,
583 OP_ORR_reg_T2 = 0xEA40,
584 OP_ASR_imm_T1 = 0xEA4F,
585 OP_LSL_imm_T1 = 0xEA4F,
586 OP_LSR_imm_T1 = 0xEA4F,
587 OP_ROR_imm_T1 = 0xEA4F,
588 OP_MVN_reg_T2 = 0xEA6F,
589 OP_EOR_reg_T2 = 0xEA80,
590 OP_ADD_reg_T3 = 0xEB00,
591 OP_ADD_S_reg_T3 = 0xEB10,
592 OP_SUB_reg_T2 = 0xEBA0,
593 OP_SUB_S_reg_T2 = 0xEBB0,
594 OP_CMP_reg_T2 = 0xEBB0,
595 OP_B_T4a = 0xF000,
596 OP_AND_imm_T1 = 0xF000,
597 OP_TST_imm = 0xF010,
598 OP_ORR_imm_T1 = 0xF040,
599 OP_MOV_imm_T2 = 0xF040,
600 OP_MVN_imm = 0xF060,
601 OP_EOR_imm_T1 = 0xF080,
602 OP_ADD_imm_T3 = 0xF100,
603 OP_ADD_S_imm_T3 = 0xF110,
604 OP_CMN_imm = 0xF110,
605 OP_SUB_imm_T3 = 0xF1A0,
606 OP_SUB_S_imm_T3 = 0xF1B0,
607 OP_CMP_imm_T2 = 0xF1B0,
608 OP_ADD_imm_T4 = 0xF200,
609 OP_MOV_imm_T3 = 0xF240,
610 OP_SUB_imm_T4 = 0xF2A0,
611 OP_MOVT = 0xF2C0,
612 OP_LDRH_reg_T2 = 0xF830,
613 OP_LDRH_imm_T3 = 0xF830,
614 OP_STR_imm_T4 = 0xF840,
615 OP_STR_reg_T2 = 0xF840,
616 OP_LDR_imm_T4 = 0xF850,
617 OP_LDR_reg_T2 = 0xF850,
618 OP_LDRH_imm_T2 = 0xF8B0,
619 OP_STR_imm_T3 = 0xF8C0,
620 OP_LDR_imm_T3 = 0xF8D0,
621 OP_LSL_reg_T2 = 0xFA00,
622 OP_LSR_reg_T2 = 0xFA20,
623 OP_ASR_reg_T2 = 0xFA40,
624 OP_ROR_reg_T2 = 0xFA60,
625 OP_SMULL_T1 = 0xFB80,
626 } OpcodeID1;
627
628 typedef enum {
629 OP_B_T4b = 0x9000,
630 } OpcodeID2;
631
632 struct FourFours {
633 FourFours(unsigned f3, unsigned f2, unsigned f1, unsigned f0)
634 {
635 m_u.f0 = f0;
636 m_u.f1 = f1;
637 m_u.f2 = f2;
638 m_u.f3 = f3;
639 }
640
641 union {
642 unsigned value;
643 struct {
644 unsigned f0 : 4;
645 unsigned f1 : 4;
646 unsigned f2 : 4;
647 unsigned f3 : 4;
648 };
649 } m_u;
650 };
651
652 class ARMInstructionFormatter;
653
654 // false means else!
655 bool ifThenElseConditionBit(Condition condition, bool isIf)
656 {
657 return isIf ? (condition & 1) : !(condition & 1);
658 }
659 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
660 {
661 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
662 | (ifThenElseConditionBit(condition, inst3if) << 2)
663 | (ifThenElseConditionBit(condition, inst4if) << 1)
664 | 1;
665 ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
666 return (condition << 4) | mask;
667 }
668 uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
669 {
670 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
671 | (ifThenElseConditionBit(condition, inst3if) << 2)
672 | 2;
673 ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
674 return (condition << 4) | mask;
675 }
676 uint8_t ifThenElse(Condition condition, bool inst2if)
677 {
678 int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
679 | 4;
680 ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
681 return (condition << 4) | mask;
682 }
683
684 uint8_t ifThenElse(Condition condition)
685 {
686 int mask = 8;
687 ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
688 return (condition << 4) | mask;
689 }
690
691public:
692
693 void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
694 {
695 // Rd can only be SP if Rn is also SP.
696 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
697 ASSERT(rd != ARM::pc);
698 ASSERT(rn != ARM::pc);
699 ASSERT(imm.isValid());
700
701 if (rn == ARM::sp) {
702 if (!(rd & 8) && imm.isUInt10()) {
703 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2);
704 return;
705 } else if ((rd == ARM::sp) && imm.isUInt9()) {
706 m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2);
707 return;
708 }
709 } else if (!((rd | rn) & 8)) {
710 if (imm.isUInt3()) {
711 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
712 return;
713 } else if ((rd == rn) && imm.isUInt8()) {
714 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_imm_T2, rd, imm.getUInt8());
715 return;
716 }
717 }
718
719 if (imm.isEncodedImm())
720 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
721 else {
722 ASSERT(imm.isUInt12());
723 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
724 }
725 }
726
727 void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
728 {
729 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
730 ASSERT(rd != ARM::pc);
731 ASSERT(rn != ARM::pc);
732 ASSERT(!BadReg(rm));
733 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
734 }
735
736 // NOTE: In an IT block, add doesn't modify the flags register.
737 void add(RegisterID rd, RegisterID rn, RegisterID rm)
738 {
739 if (rd == rn)
740 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
741 else if (rd == rm)
742 m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rn, rd);
743 else if (!((rd | rn | rm) & 8))
744 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd);
745 else
746 add(rd, rn, rm, ShiftTypeAndAmount());
747 }
748
749 // Not allowed in an IT (if then) block.
750 void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
751 {
752 // Rd can only be SP if Rn is also SP.
753 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
754 ASSERT(rd != ARM::pc);
755 ASSERT(rn != ARM::pc);
756 ASSERT(imm.isEncodedImm());
757
758 if (!((rd | rn) & 8)) {
759 if (imm.isUInt3()) {
760 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
761 return;
762 } else if ((rd == rn) && imm.isUInt8()) {
763 m_formatter.oneWordOp5Reg3Imm8(OP_ADD_S_imm_T2, rd, imm.getUInt8());
764 return;
765 }
766 }
767
768 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_S_imm_T3, rn, rd, imm);
769 }
770
771 // Not allowed in an IT (if then) block?
772 void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
773 {
774 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
775 ASSERT(rd != ARM::pc);
776 ASSERT(rn != ARM::pc);
777 ASSERT(!BadReg(rm));
778 m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
779 }
780
781 // Not allowed in an IT (if then) block.
782 void add_S(RegisterID rd, RegisterID rn, RegisterID rm)
783 {
784 if (!((rd | rn | rm) & 8))
785 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_S_reg_T1, rm, rn, rd);
786 else
787 add_S(rd, rn, rm, ShiftTypeAndAmount());
788 }
789
790 void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
791 {
792 ASSERT(!BadReg(rd));
793 ASSERT(!BadReg(rn));
794 ASSERT(imm.isEncodedImm());
795 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
796 }
797
798 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
799 {
800 ASSERT(!BadReg(rd));
801 ASSERT(!BadReg(rn));
802 ASSERT(!BadReg(rm));
803 m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
804 }
805
806 void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm)
807 {
808 if ((rd == rn) && !((rd | rm) & 8))
809 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd);
810 else if ((rd == rm) && !((rd | rn) & 8))
811 m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rn, rd);
812 else
813 ARM_and(rd, rn, rm, ShiftTypeAndAmount());
814 }
815
816 void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
817 {
818 ASSERT(!BadReg(rd));
819 ASSERT(!BadReg(rm));
820 ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
821 m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
822 }
823
824 void asr(RegisterID rd, RegisterID rn, RegisterID rm)
825 {
826 ASSERT(!BadReg(rd));
827 ASSERT(!BadReg(rn));
828 ASSERT(!BadReg(rm));
829 m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
830 }
831
832 // Only allowed in IT (if then) block if last instruction.
833 JmpSrc b()
834 {
835 m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
836 return JmpSrc(m_formatter.size());
837 }
838
839 // Only allowed in IT (if then) block if last instruction.
840 JmpSrc blx(RegisterID rm)
841 {
842 ASSERT(rm != ARM::pc);
843 m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
844 return JmpSrc(m_formatter.size());
845 }
846
847 // Only allowed in IT (if then) block if last instruction.
848 JmpSrc bx(RegisterID rm)
849 {
850 m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
851 return JmpSrc(m_formatter.size());
852 }
853
854 void bkpt(uint8_t imm=0)
855 {
856 m_formatter.oneWordOp8Imm8(OP_BKPT, imm);
857 }
858
859 void cmn(RegisterID rn, ARMThumbImmediate imm)
860 {
861 ASSERT(rn != ARM::pc);
862 ASSERT(imm.isEncodedImm());
863
864 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
865 }
866
867 void cmp(RegisterID rn, ARMThumbImmediate imm)
868 {
869 ASSERT(rn != ARM::pc);
870 ASSERT(imm.isEncodedImm());
871
872 if (!(rn & 8) && imm.isUInt8())
873 m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
874 else
875 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm);
876 }
877
878 void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
879 {
880 ASSERT(rn != ARM::pc);
881 ASSERT(!BadReg(rm));
882 m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
883 }
884
885 void cmp(RegisterID rn, RegisterID rm)
886 {
887 if ((rn | rm) & 8)
888 cmp(rn, rm, ShiftTypeAndAmount());
889 else
890 m_formatter.oneWordOp10Reg3Reg3(OP_CMP_reg_T1, rm, rn);
891 }
892
893 // xor is not spelled with an 'e'. :-(
894 void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
895 {
896 ASSERT(!BadReg(rd));
897 ASSERT(!BadReg(rn));
898 ASSERT(imm.isEncodedImm());
899 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
900 }
901
902 // xor is not spelled with an 'e'. :-(
903 void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
904 {
905 ASSERT(!BadReg(rd));
906 ASSERT(!BadReg(rn));
907 ASSERT(!BadReg(rm));
908 m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
909 }
910
911 // xor is not spelled with an 'e'. :-(
912 void eor(RegisterID rd, RegisterID rn, RegisterID rm)
913 {
914 if ((rd == rn) && !((rd | rm) & 8))
915 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rm, rd);
916 else if ((rd == rm) && !((rd | rn) & 8))
917 m_formatter.oneWordOp10Reg3Reg3(OP_EOR_reg_T1, rn, rd);
918 else
919 eor(rd, rn, rm, ShiftTypeAndAmount());
920 }
921
922 void it(Condition cond)
923 {
924 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond));
925 }
926
927 void it(Condition cond, bool inst2if)
928 {
929 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if));
930 }
931
932 void it(Condition cond, bool inst2if, bool inst3if)
933 {
934 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if));
935 }
936
937 void it(Condition cond, bool inst2if, bool inst3if, bool inst4if)
938 {
939 m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if));
940 }
941
942 // rt == ARM::pc only allowed if last instruction in IT (if then) block.
943 void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
944 {
945 ASSERT(rn != ARM::pc); // LDR (literal)
946 ASSERT(imm.isUInt12());
947
948 if (!((rt | rn) & 8) && imm.isUInt7())
949 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
950 else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
951 m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2);
952 else
953 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12());
954 }
955
956 // If index is set, this is a regular offset or a pre-indexed load;
957 // if index is not set then is is a post-index load.
958 //
959 // If wback is set rn is updated - this is a pre or post index load,
960 // if wback is not set this is a regular offset memory access.
961 //
962 // (-255 <= offset <= 255)
963 // _reg = REG[rn]
964 // _tmp = _reg + offset
965 // MEM[index ? _tmp : _reg] = REG[rt]
966 // if (wback) REG[rn] = _tmp
967 void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
968 {
969 ASSERT(rt != ARM::pc);
970 ASSERT(rn != ARM::pc);
971 ASSERT(index || wback);
972 ASSERT(!wback | (rt != rn));
973
974 bool add = true;
975 if (offset < 0) {
976 add = false;
977 offset = -offset;
978 }
979 ASSERT((offset & ~0xff) == 0);
980
981 offset |= (wback << 8);
982 offset |= (add << 9);
983 offset |= (index << 10);
984 offset |= (1 << 11);
985
986 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T4, rn, rt, offset);
987 }
988
989 // rt == ARM::pc only allowed if last instruction in IT (if then) block.
990 void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
991 {
992 ASSERT(rn != ARM::pc); // LDR (literal)
993 ASSERT(!BadReg(rm));
994 ASSERT(shift <= 3);
995
996 if (!shift && !((rt | rn | rm) & 8))
997 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
998 else
999 m_formatter.twoWordOp12Reg4FourFours(OP_LDR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1000 }
1001
1002 // rt == ARM::pc only allowed if last instruction in IT (if then) block.
1003 void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1004 {
1005 ASSERT(rn != ARM::pc); // LDR (literal)
1006 ASSERT(imm.isUInt12());
1007
1008 if (!((rt | rn) & 8) && imm.isUInt6())
1009 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
1010 else
1011 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
1012 }
1013
1014 // If index is set, this is a regular offset or a pre-indexed load;
1015 // if index is not set then is is a post-index load.
1016 //
1017 // If wback is set rn is updated - this is a pre or post index load,
1018 // if wback is not set this is a regular offset memory access.
1019 //
1020 // (-255 <= offset <= 255)
1021 // _reg = REG[rn]
1022 // _tmp = _reg + offset
1023 // MEM[index ? _tmp : _reg] = REG[rt]
1024 // if (wback) REG[rn] = _tmp
1025 void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1026 {
1027 ASSERT(rt != ARM::pc);
1028 ASSERT(rn != ARM::pc);
1029 ASSERT(index || wback);
1030 ASSERT(!wback | (rt != rn));
1031
1032 bool add = true;
1033 if (offset < 0) {
1034 add = false;
1035 offset = -offset;
1036 }
1037 ASSERT((offset & ~0xff) == 0);
1038
1039 offset |= (wback << 8);
1040 offset |= (add << 9);
1041 offset |= (index << 10);
1042 offset |= (1 << 11);
1043
1044 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset);
1045 }
1046
1047 void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
1048 {
1049 ASSERT(!BadReg(rt)); // Memory hint
1050 ASSERT(rn != ARM::pc); // LDRH (literal)
1051 ASSERT(!BadReg(rm));
1052 ASSERT(shift <= 3);
1053
1054 if (!shift && !((rt | rn | rm) & 8))
1055 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
1056 else
1057 m_formatter.twoWordOp12Reg4FourFours(OP_LDRH_reg_T2, rn, FourFours(rt, 0, shift, rm));
1058 }
1059
1060 void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1061 {
1062 ASSERT(!BadReg(rd));
1063 ASSERT(!BadReg(rm));
1064 ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
1065 m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1066 }
1067
1068 void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
1069 {
1070 ASSERT(!BadReg(rd));
1071 ASSERT(!BadReg(rn));
1072 ASSERT(!BadReg(rm));
1073 m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1074 }
1075
1076 void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1077 {
1078 ASSERT(!BadReg(rd));
1079 ASSERT(!BadReg(rm));
1080 ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
1081 m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1082 }
1083
1084 void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
1085 {
1086 ASSERT(!BadReg(rd));
1087 ASSERT(!BadReg(rn));
1088 ASSERT(!BadReg(rm));
1089 m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1090 }
1091
1092 void movT3(RegisterID rd, ARMThumbImmediate imm)
1093 {
1094 ASSERT(imm.isValid());
1095 ASSERT(!imm.isEncodedImm());
1096 ASSERT(!BadReg(rd));
1097
1098 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
1099 }
1100
1101 void mov(RegisterID rd, ARMThumbImmediate imm)
1102 {
1103 ASSERT(imm.isValid());
1104 ASSERT(!BadReg(rd));
1105
1106 if ((rd < 8) && imm.isUInt8())
1107 m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
1108 else if (imm.isEncodedImm())
1109 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T2, 0xf, rd, imm);
1110 else
1111 movT3(rd, imm);
1112 }
1113
1114 void mov(RegisterID rd, RegisterID rm)
1115 {
1116 m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd);
1117 }
1118
1119 void movt(RegisterID rd, ARMThumbImmediate imm)
1120 {
1121 ASSERT(imm.isUInt16());
1122 ASSERT(!BadReg(rd));
1123 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
1124 }
1125
1126 void mvn(RegisterID rd, ARMThumbImmediate imm)
1127 {
1128 ASSERT(imm.isEncodedImm());
1129 ASSERT(!BadReg(rd));
1130
1131 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
1132 }
1133
1134 void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
1135 {
1136 ASSERT(!BadReg(rd));
1137 ASSERT(!BadReg(rm));
1138 m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1139 }
1140
1141 void mvn(RegisterID rd, RegisterID rm)
1142 {
1143 if (!((rd | rm) & 8))
1144 m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd);
1145 else
1146 mvn(rd, rm, ShiftTypeAndAmount());
1147 }
1148
1149 void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1150 {
1151 ASSERT(!BadReg(rd));
1152 ASSERT(!BadReg(rn));
1153 ASSERT(imm.isEncodedImm());
1154 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
1155 }
1156
1157 void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1158 {
1159 ASSERT(!BadReg(rd));
1160 ASSERT(!BadReg(rn));
1161 ASSERT(!BadReg(rm));
1162 m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1163 }
1164
1165 void orr(RegisterID rd, RegisterID rn, RegisterID rm)
1166 {
1167 if ((rd == rn) && !((rd | rm) & 8))
1168 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rm, rd);
1169 else if ((rd == rm) && !((rd | rn) & 8))
1170 m_formatter.oneWordOp10Reg3Reg3(OP_ORR_reg_T1, rn, rd);
1171 else
1172 orr(rd, rn, rm, ShiftTypeAndAmount());
1173 }
1174
1175 void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
1176 {
1177 ASSERT(!BadReg(rd));
1178 ASSERT(!BadReg(rm));
1179 ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
1180 m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1181 }
1182
1183 void ror(RegisterID rd, RegisterID rn, RegisterID rm)
1184 {
1185 ASSERT(!BadReg(rd));
1186 ASSERT(!BadReg(rn));
1187 ASSERT(!BadReg(rm));
1188 m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
1189 }
1190
1191 void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
1192 {
1193 ASSERT(!BadReg(rdLo));
1194 ASSERT(!BadReg(rdHi));
1195 ASSERT(!BadReg(rn));
1196 ASSERT(!BadReg(rm));
1197 ASSERT(rdLo != rdHi);
1198 m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
1199 }
1200
1201 // rt == ARM::pc only allowed if last instruction in IT (if then) block.
1202 void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
1203 {
1204 ASSERT(rt != ARM::pc);
1205 ASSERT(rn != ARM::pc);
1206 ASSERT(imm.isUInt12());
1207
1208 if (!((rt | rn) & 8) && imm.isUInt7())
1209 m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
1210 else if ((rn == ARM::sp) && !(rt & 8) && imm.isUInt10())
1211 m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2);
1212 else
1213 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12());
1214 }
1215
1216 // If index is set, this is a regular offset or a pre-indexed store;
1217 // if index is not set then is is a post-index store.
1218 //
1219 // If wback is set rn is updated - this is a pre or post index store,
1220 // if wback is not set this is a regular offset memory access.
1221 //
1222 // (-255 <= offset <= 255)
1223 // _reg = REG[rn]
1224 // _tmp = _reg + offset
1225 // MEM[index ? _tmp : _reg] = REG[rt]
1226 // if (wback) REG[rn] = _tmp
1227 void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
1228 {
1229 ASSERT(rt != ARM::pc);
1230 ASSERT(rn != ARM::pc);
1231 ASSERT(index || wback);
1232 ASSERT(!wback | (rt != rn));
1233
1234 bool add = true;
1235 if (offset < 0) {
1236 add = false;
1237 offset = -offset;
1238 }
1239 ASSERT((offset & ~0xff) == 0);
1240
1241 offset |= (wback << 8);
1242 offset |= (add << 9);
1243 offset |= (index << 10);
1244 offset |= (1 << 11);
1245
1246 m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T4, rn, rt, offset);
1247 }
1248
1249 // rt == ARM::pc only allowed if last instruction in IT (if then) block.
1250 void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
1251 {
1252 ASSERT(rn != ARM::pc);
1253 ASSERT(!BadReg(rm));
1254 ASSERT(shift <= 3);
1255
1256 if (!shift && !((rt | rn | rm) & 8))
1257 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
1258 else
1259 m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm));
1260 }
1261
1262 void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1263 {
1264 // Rd can only be SP if Rn is also SP.
1265 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
1266 ASSERT(rd != ARM::pc);
1267 ASSERT(rn != ARM::pc);
1268 ASSERT(imm.isValid());
1269
1270 if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
1271 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
1272 return;
1273 } else if (!((rd | rn) & 8)) {
1274 if (imm.isUInt3()) {
1275 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1276 return;
1277 } else if ((rd == rn) && imm.isUInt8()) {
1278 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_imm_T2, rd, imm.getUInt8());
1279 return;
1280 }
1281 }
1282
1283 if (imm.isEncodedImm())
1284 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
1285 else {
1286 ASSERT(imm.isUInt12());
1287 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
1288 }
1289 }
1290
1291 void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1292 {
1293 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
1294 ASSERT(rd != ARM::pc);
1295 ASSERT(rn != ARM::pc);
1296 ASSERT(!BadReg(rm));
1297 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1298 }
1299
1300 // NOTE: In an IT block, add doesn't modify the flags register.
1301 void sub(RegisterID rd, RegisterID rn, RegisterID rm)
1302 {
1303 if (!((rd | rn | rm) & 8))
1304 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd);
1305 else
1306 sub(rd, rn, rm, ShiftTypeAndAmount());
1307 }
1308
1309 // Not allowed in an IT (if then) block.
1310 void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
1311 {
1312 // Rd can only be SP if Rn is also SP.
1313 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
1314 ASSERT(rd != ARM::pc);
1315 ASSERT(rn != ARM::pc);
1316 ASSERT(imm.isValid());
1317
1318 if ((rn == ARM::sp) && (rd == ARM::sp) && imm.isUInt9()) {
1319 m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
1320 return;
1321 } else if (!((rd | rn) & 8)) {
1322 if (imm.isUInt3()) {
1323 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_imm_T1, (RegisterID)imm.getUInt3(), rn, rd);
1324 return;
1325 } else if ((rd == rn) && imm.isUInt8()) {
1326 m_formatter.oneWordOp5Reg3Imm8(OP_SUB_S_imm_T2, rd, imm.getUInt8());
1327 return;
1328 }
1329 }
1330
1331 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm);
1332 }
1333
1334 // Not allowed in an IT (if then) block?
1335 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1336 {
1337 ASSERT((rd != ARM::sp) || (rn == ARM::sp));
1338 ASSERT(rd != ARM::pc);
1339 ASSERT(rn != ARM::pc);
1340 ASSERT(!BadReg(rm));
1341 m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
1342 }
1343
1344 // Not allowed in an IT (if then) block.
1345 void sub_S(RegisterID rd, RegisterID rn, RegisterID rm)
1346 {
1347 if (!((rd | rn | rm) & 8))
1348 m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_S_reg_T1, rm, rn, rd);
1349 else
1350 sub_S(rd, rn, rm, ShiftTypeAndAmount());
1351 }
1352
1353 void tst(RegisterID rn, ARMThumbImmediate imm)
1354 {
1355 ASSERT(!BadReg(rn));
1356 ASSERT(imm.isEncodedImm());
1357
1358 m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
1359 }
1360
1361 void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
1362 {
1363 ASSERT(!BadReg(rn));
1364 ASSERT(!BadReg(rm));
1365 m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
1366 }
1367
1368 void tst(RegisterID rn, RegisterID rm)
1369 {
1370 if ((rn | rm) & 8)
1371 tst(rn, rm, ShiftTypeAndAmount());
1372 else
1373 m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn);
1374 }
1375
1376 void vadd_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
1377 {
1378 m_formatter.vfpOp(0x0b00ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
1379 }
1380
1381 void vcmp_F64(FPRegisterID rd, FPRegisterID rm)
1382 {
1383 m_formatter.vfpOp(0x0bc0eeb4 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rm, 21, 16));
1384 }
1385
1386 void vcvt_F64_S32(FPRegisterID fd, FPRegisterID sm)
1387 {
1388 m_formatter.vfpOp(0x0bc0eeb8 | doubleRegisterMask(fd, 6, 28) | singleRegisterMask(sm, 16, 21));
1389 }
1390
1391 void vcvt_S32_F64(FPRegisterID sd, FPRegisterID fm)
1392 {
1393 m_formatter.vfpOp(0x0bc0eebd | singleRegisterMask(sd, 28, 6) | doubleRegisterMask(fm, 21, 16));
1394 }
1395
1396 void vldr(FPRegisterID rd, RegisterID rn, int32_t imm)
1397 {
1398 vmem(rd, rn, imm, true);
1399 }
1400
1401 void vmov(RegisterID rd, FPRegisterID sn)
1402 {
1403 m_formatter.vfpOp(0x0a10ee10 | (rd << 28) | singleRegisterMask(sn, 0, 23));
1404 }
1405
1406 void vmov(FPRegisterID sn, RegisterID rd)
1407 {
1408 m_formatter.vfpOp(0x0a10ee00 | (rd << 28) | singleRegisterMask(sn, 0, 23));
1409 }
1410
1411 // move FPSCR flags to APSR.
1412 void vmrs_APSR_nzcv_FPSCR()
1413 {
1414 m_formatter.vfpOp(0xfa10eef1);
1415 }
1416
1417 void vmul_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
1418 {
1419 m_formatter.vfpOp(0x0b00ee20 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
1420 }
1421
1422 void vstr(FPRegisterID rd, RegisterID rn, int32_t imm)
1423 {
1424 vmem(rd, rn, imm, false);
1425 }
1426
1427 void vsub_F64(FPRegisterID rd, FPRegisterID rn, FPRegisterID rm)
1428 {
1429 m_formatter.vfpOp(0x0b40ee30 | doubleRegisterMask(rd, 6, 28) | doubleRegisterMask(rn, 23, 0) | doubleRegisterMask(rm, 21, 16));
1430 }
1431
1432
1433 JmpDst label()
1434 {
1435 return JmpDst(m_formatter.size());
1436 }
1437
1438 JmpDst align(int alignment)
1439 {
1440 while (!m_formatter.isAligned(alignment))
1441 bkpt();
1442
1443 return label();
1444 }
1445
1446 static void* getRelocatedAddress(void* code, JmpSrc jump)
1447 {
1448 ASSERT(jump.m_offset != -1);
1449
1450 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1451 }
1452
1453 static void* getRelocatedAddress(void* code, JmpDst destination)
1454 {
1455 ASSERT(destination.m_offset != -1);
1456
1457 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1458 }
1459
1460 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1461 {
1462 return dst.m_offset - src.m_offset;
1463 }
1464
1465 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1466 {
1467 return dst.m_offset - src.m_offset;
1468 }
1469
1470 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1471 {
1472 return dst.m_offset - src.m_offset;
1473 }
1474
1475 // Assembler admin methods:
1476
1477 size_t size() const
1478 {
1479 return m_formatter.size();
1480 }
1481
1482 void* executableCopy(ExecutablePool* allocator)
1483 {
1484 void* copy = m_formatter.executableCopy(allocator);
1485 ASSERT(copy);
1486 return copy;
1487 }
1488
1489 static unsigned getCallReturnOffset(JmpSrc call)
1490 {
1491 ASSERT(call.m_offset >= 0);
1492 return call.m_offset;
1493 }
1494
1495 // Linking & patching:
1496 //
1497 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1498 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1499 // code has been finalized it is (platform support permitting) within a non-
1500 // writable region of memory; to modify the code in an execute-only execuable
1501 // pool the 'repatch' and 'relink' methods should be used.
1502
1503 void linkJump(JmpSrc from, JmpDst to)
1504 {
1505 ASSERT(to.m_offset != -1);
1506 ASSERT(from.m_offset != -1);
1507
1508 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(m_formatter.data()) + from.m_offset);
1509 intptr_t relative = to.m_offset - from.m_offset;
1510
1511 linkWithOffset(location, relative);
1512 }
1513
1514 static void linkJump(void* code, JmpSrc from, void* to)
1515 {
1516 ASSERT(from.m_offset != -1);
1517
1518 uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
1519 intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(location);
1520
1521 linkWithOffset(location, relative);
1522 }
1523
1524 // bah, this mathod should really be static, since it is used by the LinkBuffer.
1525 // return a bool saying whether the link was successful?
1526 static void linkCall(void* code, JmpSrc from, void* to)
1527 {
1528 ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
1529 ASSERT(from.m_offset != -1);
1530 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1531
1532 setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
1533 }
1534
1535 static void linkPointer(void* code, JmpDst where, void* value)
1536 {
1537 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1538 }
1539
1540 static void relinkJump(void* from, void* to)
1541 {
1542 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1543 ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
1544
1545 intptr_t relative = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1546 linkWithOffset(reinterpret_cast<uint16_t*>(from), relative);
1547
1548 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 2, 2 * sizeof(uint16_t));
1549 }
1550
1551 static void relinkCall(void* from, void* to)
1552 {
1553 ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
1554 ASSERT(reinterpret_cast<intptr_t>(to) & 1);
1555
1556 setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
1557
1558 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 4 * sizeof(uint16_t));
1559 }
1560
1561 static void repatchInt32(void* where, int32_t value)
1562 {
1563 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1564
1565 setInt32(where, value);
1566
1567 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
1568 }
1569
1570 static void repatchPointer(void* where, void* value)
1571 {
1572 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1573
1574 setPointer(where, value);
1575
1576 ExecutableAllocator::cacheFlush(reinterpret_cast<uint16_t*>(where) - 4, 4 * sizeof(uint16_t));
1577 }
1578
1579 static void repatchLoadPtrToLEA(void* where)
1580 {
1581 ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
1582
1583 uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
1584 ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
1585
1586 *loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
1587 ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
1588 }
1589
1590private:
1591
1592 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
1593 // (i.e. +/-(0..255) 32-bit words)
1594 void vmem(FPRegisterID rd, RegisterID rn, int32_t imm, bool isLoad)
1595 {
1596 bool up;
1597 uint32_t offset;
1598 if (imm < 0) {
1599 offset = -imm;
1600 up = false;
1601 } else {
1602 offset = imm;
1603 up = true;
1604 }
1605
1606 // offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
1607 // reperesented in the instruction. Left shift by 14, to mov it into position 0x00AA0000.
1608 ASSERT((offset & ~(0xff << 2)) == 0);
1609 offset <<= 14;
1610
1611 m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
1612 }
1613
1614 static void setInt32(void* code, uint32_t value)
1615 {
1616 uint16_t* location = reinterpret_cast<uint16_t*>(code);
1617
1618 uint16_t lo16 = value;
1619 uint16_t hi16 = value >> 16;
1620
1621 spliceHi5(location - 4, lo16);
1622 spliceLo11(location - 3, lo16);
1623 spliceHi5(location - 2, hi16);
1624 spliceLo11(location - 1, hi16);
1625
1626 ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t));
1627 }
1628
1629 static void setPointer(void* code, void* value)
1630 {
1631 setInt32(code, reinterpret_cast<uint32_t>(value));
1632 }
1633
1634 // Linking & patching:
1635 // This method assumes that the JmpSrc being linked is a T4 b instruction.
1636 static void linkWithOffset(uint16_t* instruction, intptr_t relative)
1637 {
1638 // Currently branches > 16m = mostly deathy.
1639 if (((relative << 7) >> 7) != relative) {
1640 // FIXME: This CRASH means we cannot turn the JIT on by default on arm-v7.
1641 fprintf(stderr, "Error: Cannot link T4b.\n");
1642 CRASH();
1643 }
1644
1645 // ARM encoding for the top two bits below the sign bit is 'peculiar'.
1646 if (relative >= 0)
1647 relative ^= 0xC00000;
1648
1649 // All branch offsets should be an even distance.
1650 ASSERT(!(relative & 1));
1651
1652 int word1 = ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
1653 int word2 = ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
1654
1655 instruction[-2] = OP_B_T4a | word1;
1656 instruction[-1] = OP_B_T4b | word2;
1657 }
1658
1659 // These functions can be used to splice 16-bit immediates back into previously generated instructions.
1660 static void spliceHi5(uint16_t* where, uint16_t what)
1661 {
1662 uint16_t pattern = (what >> 12) | ((what & 0x0800) >> 1);
1663 *where = (*where & 0xFBF0) | pattern;
1664 }
1665 static void spliceLo11(uint16_t* where, uint16_t what)
1666 {
1667 uint16_t pattern = ((what & 0x0700) << 4) | (what & 0x00FF);
1668 *where = (*where & 0x8F00) | pattern;
1669 }
1670
1671 class ARMInstructionFormatter {
1672 public:
1673 void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm)
1674 {
1675 m_buffer.putShort(op | (rd << 8) | imm);
1676 }
1677
1678 void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2)
1679 {
1680 m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2);
1681 }
1682
1683 void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3)
1684 {
1685 m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
1686 }
1687
1688 void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
1689 {
1690 m_buffer.putShort(op | imm);
1691 }
1692
1693 void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2)
1694 {
1695 m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7));
1696 }
1697 void oneWordOp9Imm7(OpcodeID op, uint8_t imm)
1698 {
1699 m_buffer.putShort(op | imm);
1700 }
1701
1702 void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2)
1703 {
1704 m_buffer.putShort(op | (reg1 << 3) | reg2);
1705 }
1706
1707 void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff)
1708 {
1709 m_buffer.putShort(op | reg);
1710 m_buffer.putShort(ff.m_u.value);
1711 }
1712
1713 void twoWordOp16FourFours(OpcodeID1 op, FourFours ff)
1714 {
1715 m_buffer.putShort(op);
1716 m_buffer.putShort(ff.m_u.value);
1717 }
1718
1719 void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2)
1720 {
1721 m_buffer.putShort(op1);
1722 m_buffer.putShort(op2);
1723 }
1724
1725 void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
1726 {
1727 m_buffer.putShort(op | (imm.m_value.i << 10) | imm4);
1728 m_buffer.putShort((imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8);
1729 }
1730
1731 void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm)
1732 {
1733 m_buffer.putShort(op | reg1);
1734 m_buffer.putShort((reg2 << 12) | imm);
1735 }
1736
1737 void vfpOp(int32_t op)
1738 {
1739 m_buffer.putInt(op);
1740 }
1741
1742
1743 // Administrative methods:
1744
1745 size_t size() const { return m_buffer.size(); }
1746 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
1747 void* data() const { return m_buffer.data(); }
1748 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1749
1750 private:
1751 AssemblerBuffer m_buffer;
1752 } m_formatter;
1753};
1754
1755} // namespace JSC
1756
1757#endif // ENABLE(ASSEMBLER) && PLATFORM_ARM_ARCH(7)
1758
1759#endif // ARMAssembler_h
Note: See TracBrowser for help on using the repository browser.