source: webkit/trunk/JavaScriptCore/assembler/MacroAssemblerARMv7.h@ 58562

Last change on this file since 58562 was 58562, checked in by [email protected], 15 years ago

2010-04-29 Oliver Hunt <[email protected]>

Reviewed by Gavin Barraclough.

Add codegen support for unsigned right shift
https://bugs.webkit.org/show_bug.cgi?id=38375

Expose unsigned right shift in the macro assembler, and make use of it
from the jit. Currently if the result is outside the range 0..231-1
we simply fall back to the slow case, even in JSVALUE64 and JSVALUE32_64
where technically we could still return an immediate value.

  • assembler/MacroAssemblerARM.h: (JSC::MacroAssemblerARM::urshift32):
  • assembler/MacroAssemblerARMv7.h: (JSC::MacroAssemblerARMv7::urshift32):
  • assembler/MacroAssemblerX86Common.h: (JSC::MacroAssemblerX86Common::urshift32):
  • assembler/X86Assembler.h: (JSC::X86Assembler::): (JSC::X86Assembler::shrl_i8r): (JSC::X86Assembler::shrl_CLr):

Add unsigned right shift to the x86 assembler

  • jit/JIT.cpp: (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases):

op_rshift no longer simply get thrown to a stub function

  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::emit_op_urshift): (JSC::JIT::emitSlow_op_urshift): JSVALUE32 and JSVALUE64 implementation. Only supports double lhs in JSVALUE64.
  • jit/JITArithmetic32_64.cpp: (JSC::JIT::emit_op_rshift): (JSC::JIT::emitSlow_op_rshift): (JSC::JIT::emit_op_urshift): (JSC::JIT::emitSlow_op_urshift): Refactor right shift code to have shared implementation between signed and unsigned versions.
File size: 40.1 KB
Line 
1/*
2 * Copyright (C) 2009 Apple Inc. All rights reserved.
3 * Copyright (C) 2010 University of Szeged
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#ifndef MacroAssemblerARMv7_h
28#define MacroAssemblerARMv7_h
29
30#if ENABLE(ASSEMBLER)
31
32#include "ARMv7Assembler.h"
33#include "AbstractMacroAssembler.h"
34
35namespace JSC {
36
37class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
38 // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
39 // - dTR is likely used more than aTR, and we'll get better instruction
40 // encoding if it's in the low 8 registers.
41 static const ARMRegisters::RegisterID dataTempRegister = ARMRegisters::ip;
42 static const RegisterID addressTempRegister = ARMRegisters::r3;
43 static const FPRegisterID fpTempRegister = ARMRegisters::d7;
44
45 struct ArmAddress {
46 enum AddressType {
47 HasOffset,
48 HasIndex,
49 } type;
50 RegisterID base;
51 union {
52 int32_t offset;
53 struct {
54 RegisterID index;
55 Scale scale;
56 };
57 } u;
58
59 explicit ArmAddress(RegisterID base, int32_t offset = 0)
60 : type(HasOffset)
61 , base(base)
62 {
63 u.offset = offset;
64 }
65
66 explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne)
67 : type(HasIndex)
68 , base(base)
69 {
70 u.index = index;
71 u.scale = scale;
72 }
73 };
74
75public:
76
77 static const Scale ScalePtr = TimesFour;
78
79 enum Condition {
80 Equal = ARMv7Assembler::ConditionEQ,
81 NotEqual = ARMv7Assembler::ConditionNE,
82 Above = ARMv7Assembler::ConditionHI,
83 AboveOrEqual = ARMv7Assembler::ConditionHS,
84 Below = ARMv7Assembler::ConditionLO,
85 BelowOrEqual = ARMv7Assembler::ConditionLS,
86 GreaterThan = ARMv7Assembler::ConditionGT,
87 GreaterThanOrEqual = ARMv7Assembler::ConditionGE,
88 LessThan = ARMv7Assembler::ConditionLT,
89 LessThanOrEqual = ARMv7Assembler::ConditionLE,
90 Overflow = ARMv7Assembler::ConditionVS,
91 Signed = ARMv7Assembler::ConditionMI,
92 Zero = ARMv7Assembler::ConditionEQ,
93 NonZero = ARMv7Assembler::ConditionNE
94 };
95 enum DoubleCondition {
96 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
97 DoubleEqual = ARMv7Assembler::ConditionEQ,
98 DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
99 DoubleGreaterThan = ARMv7Assembler::ConditionGT,
100 DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE,
101 DoubleLessThan = ARMv7Assembler::ConditionLO,
102 DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS,
103 // If either operand is NaN, these conditions always evaluate to true.
104 DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
105 DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE,
106 DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI,
107 DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS,
108 DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT,
109 DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE,
110 };
111
112 static const RegisterID stackPointerRegister = ARMRegisters::sp;
113 static const RegisterID linkRegister = ARMRegisters::lr;
114
115 // Integer arithmetic operations:
116 //
117 // Operations are typically two operand - operation(source, srcDst)
118 // For many operations the source may be an Imm32, the srcDst operand
119 // may often be a memory location (explictly described using an Address
120 // object).
121
122 void add32(RegisterID src, RegisterID dest)
123 {
124 m_assembler.add(dest, dest, src);
125 }
126
127 void add32(Imm32 imm, RegisterID dest)
128 {
129 add32(imm, dest, dest);
130 }
131
132 void add32(Imm32 imm, RegisterID src, RegisterID dest)
133 {
134 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
135 if (armImm.isValid())
136 m_assembler.add(dest, src, armImm);
137 else {
138 move(imm, dataTempRegister);
139 m_assembler.add(dest, src, dataTempRegister);
140 }
141 }
142
143 void add32(Imm32 imm, Address address)
144 {
145 load32(address, dataTempRegister);
146
147 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
148 if (armImm.isValid())
149 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
150 else {
151 // Hrrrm, since dataTempRegister holds the data loaded,
152 // use addressTempRegister to hold the immediate.
153 move(imm, addressTempRegister);
154 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
155 }
156
157 store32(dataTempRegister, address);
158 }
159
160 void add32(Address src, RegisterID dest)
161 {
162 load32(src, dataTempRegister);
163 add32(dataTempRegister, dest);
164 }
165
166 void add32(Imm32 imm, AbsoluteAddress address)
167 {
168 load32(address.m_ptr, dataTempRegister);
169
170 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
171 if (armImm.isValid())
172 m_assembler.add(dataTempRegister, dataTempRegister, armImm);
173 else {
174 // Hrrrm, since dataTempRegister holds the data loaded,
175 // use addressTempRegister to hold the immediate.
176 move(imm, addressTempRegister);
177 m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister);
178 }
179
180 store32(dataTempRegister, address.m_ptr);
181 }
182
183 void and32(RegisterID src, RegisterID dest)
184 {
185 m_assembler.ARM_and(dest, dest, src);
186 }
187
188 void and32(Imm32 imm, RegisterID dest)
189 {
190 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
191 if (armImm.isValid())
192 m_assembler.ARM_and(dest, dest, armImm);
193 else {
194 move(imm, dataTempRegister);
195 m_assembler.ARM_and(dest, dest, dataTempRegister);
196 }
197 }
198
199 void lshift32(RegisterID shift_amount, RegisterID dest)
200 {
201 // Clamp the shift to the range 0..31
202 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
203 ASSERT(armImm.isValid());
204 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
205
206 m_assembler.lsl(dest, dest, dataTempRegister);
207 }
208
209 void lshift32(Imm32 imm, RegisterID dest)
210 {
211 m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
212 }
213
214 void mul32(RegisterID src, RegisterID dest)
215 {
216 m_assembler.smull(dest, dataTempRegister, dest, src);
217 }
218
219 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
220 {
221 move(imm, dataTempRegister);
222 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
223 }
224
225 void not32(RegisterID srcDest)
226 {
227 m_assembler.mvn(srcDest, srcDest);
228 }
229
230 void or32(RegisterID src, RegisterID dest)
231 {
232 m_assembler.orr(dest, dest, src);
233 }
234
235 void or32(Imm32 imm, RegisterID dest)
236 {
237 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
238 if (armImm.isValid())
239 m_assembler.orr(dest, dest, armImm);
240 else {
241 move(imm, dataTempRegister);
242 m_assembler.orr(dest, dest, dataTempRegister);
243 }
244 }
245
246 void rshift32(RegisterID shift_amount, RegisterID dest)
247 {
248 // Clamp the shift to the range 0..31
249 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
250 ASSERT(armImm.isValid());
251 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
252
253 m_assembler.asr(dest, dest, dataTempRegister);
254 }
255
256 void rshift32(Imm32 imm, RegisterID dest)
257 {
258 m_assembler.asr(dest, dest, imm.m_value & 0x1f);
259 }
260
261 void urshift32(RegisterID shift_amount, RegisterID dest)
262 {
263 // Clamp the shift to the range 0..31
264 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
265 ASSERT(armImm.isValid());
266 m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
267
268 m_assembler.lsr(dest, dest, dataTempRegister);
269 }
270
271 void urshift32(Imm32 imm, RegisterID dest)
272 {
273 m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
274 }
275
276 void sub32(RegisterID src, RegisterID dest)
277 {
278 m_assembler.sub(dest, dest, src);
279 }
280
281 void sub32(Imm32 imm, RegisterID dest)
282 {
283 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
284 if (armImm.isValid())
285 m_assembler.sub(dest, dest, armImm);
286 else {
287 move(imm, dataTempRegister);
288 m_assembler.sub(dest, dest, dataTempRegister);
289 }
290 }
291
292 void sub32(Imm32 imm, Address address)
293 {
294 load32(address, dataTempRegister);
295
296 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
297 if (armImm.isValid())
298 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
299 else {
300 // Hrrrm, since dataTempRegister holds the data loaded,
301 // use addressTempRegister to hold the immediate.
302 move(imm, addressTempRegister);
303 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
304 }
305
306 store32(dataTempRegister, address);
307 }
308
309 void sub32(Address src, RegisterID dest)
310 {
311 load32(src, dataTempRegister);
312 sub32(dataTempRegister, dest);
313 }
314
315 void sub32(Imm32 imm, AbsoluteAddress address)
316 {
317 load32(address.m_ptr, dataTempRegister);
318
319 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
320 if (armImm.isValid())
321 m_assembler.sub(dataTempRegister, dataTempRegister, armImm);
322 else {
323 // Hrrrm, since dataTempRegister holds the data loaded,
324 // use addressTempRegister to hold the immediate.
325 move(imm, addressTempRegister);
326 m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister);
327 }
328
329 store32(dataTempRegister, address.m_ptr);
330 }
331
332 void xor32(RegisterID src, RegisterID dest)
333 {
334 m_assembler.eor(dest, dest, src);
335 }
336
337 void xor32(Imm32 imm, RegisterID dest)
338 {
339 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
340 if (armImm.isValid())
341 m_assembler.eor(dest, dest, armImm);
342 else {
343 move(imm, dataTempRegister);
344 m_assembler.eor(dest, dest, dataTempRegister);
345 }
346 }
347
348
349 // Memory access operations:
350 //
351 // Loads are of the form load(address, destination) and stores of the form
352 // store(source, address). The source for a store may be an Imm32. Address
353 // operand objects to loads and store will be implicitly constructed if a
354 // register is passed.
355
356private:
357 void load32(ArmAddress address, RegisterID dest)
358 {
359 if (address.type == ArmAddress::HasIndex)
360 m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
361 else if (address.u.offset >= 0) {
362 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
363 ASSERT(armImm.isValid());
364 m_assembler.ldr(dest, address.base, armImm);
365 } else {
366 ASSERT(address.u.offset >= -255);
367 m_assembler.ldr(dest, address.base, address.u.offset, true, false);
368 }
369 }
370
371 void load16(ArmAddress address, RegisterID dest)
372 {
373 if (address.type == ArmAddress::HasIndex)
374 m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
375 else if (address.u.offset >= 0) {
376 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
377 ASSERT(armImm.isValid());
378 m_assembler.ldrh(dest, address.base, armImm);
379 } else {
380 ASSERT(address.u.offset >= -255);
381 m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
382 }
383 }
384
385 void load8(ArmAddress address, RegisterID dest)
386 {
387 if (address.type == ArmAddress::HasIndex)
388 m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale);
389 else if (address.u.offset >= 0) {
390 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
391 ASSERT(armImm.isValid());
392 m_assembler.ldrb(dest, address.base, armImm);
393 } else {
394 ASSERT(address.u.offset >= -255);
395 m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
396 }
397 }
398
399 void store32(RegisterID src, ArmAddress address)
400 {
401 if (address.type == ArmAddress::HasIndex)
402 m_assembler.str(src, address.base, address.u.index, address.u.scale);
403 else if (address.u.offset >= 0) {
404 ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
405 ASSERT(armImm.isValid());
406 m_assembler.str(src, address.base, armImm);
407 } else {
408 ASSERT(address.u.offset >= -255);
409 m_assembler.str(src, address.base, address.u.offset, true, false);
410 }
411 }
412
413public:
414 void load32(ImplicitAddress address, RegisterID dest)
415 {
416 load32(setupArmAddress(address), dest);
417 }
418
419 void load32(BaseIndex address, RegisterID dest)
420 {
421 load32(setupArmAddress(address), dest);
422 }
423
424 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
425 {
426 load32(setupArmAddress(address), dest);
427 }
428
429 void load32(void* address, RegisterID dest)
430 {
431 move(ImmPtr(address), addressTempRegister);
432 m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
433 }
434
435 void load8(ImplicitAddress address, RegisterID dest)
436 {
437 load8(setupArmAddress(address), dest);
438 }
439
440 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
441 {
442 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
443 load32(ArmAddress(address.base, dataTempRegister), dest);
444 return label;
445 }
446
447 Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
448 {
449 Label label(this);
450 moveFixedWidthEncoding(Imm32(address.offset), dataTempRegister);
451 load32(ArmAddress(address.base, dataTempRegister), dest);
452 return label;
453 }
454
455 void load16(BaseIndex address, RegisterID dest)
456 {
457 m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
458 }
459
460 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
461 {
462 DataLabel32 label = moveWithPatch(Imm32(address.offset), dataTempRegister);
463 store32(src, ArmAddress(address.base, dataTempRegister));
464 return label;
465 }
466
467 void store32(RegisterID src, ImplicitAddress address)
468 {
469 store32(src, setupArmAddress(address));
470 }
471
472 void store32(RegisterID src, BaseIndex address)
473 {
474 store32(src, setupArmAddress(address));
475 }
476
477 void store32(Imm32 imm, ImplicitAddress address)
478 {
479 move(imm, dataTempRegister);
480 store32(dataTempRegister, setupArmAddress(address));
481 }
482
483 void store32(RegisterID src, void* address)
484 {
485 move(ImmPtr(address), addressTempRegister);
486 m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
487 }
488
489 void store32(Imm32 imm, void* address)
490 {
491 move(imm, dataTempRegister);
492 store32(dataTempRegister, address);
493 }
494
495
496 // Floating-point operations:
497
498 bool supportsFloatingPoint() const { return true; }
499 // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
500 // If a value is not representable as an integer, and possibly for some values that are,
501 // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
502 // a branch will be taken. It is not clear whether this interface will be well suited to
503 // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
504 // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
505 // temporary solution while we work out what this interface should be. Either we need to
506 // decide to make this interface work on all platforms, rework the interface to make it more
507 // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
508 // operations, and make clients go directly to the m_assembler to plant truncation instructions.
509 // In short, FIXME:.
510 bool supportsFloatingPointTruncate() const { return false; }
511
512 bool supportsFloatingPointSqrt() const
513 {
514 return false;
515 }
516
517 void loadDouble(ImplicitAddress address, FPRegisterID dest)
518 {
519 RegisterID base = address.base;
520 int32_t offset = address.offset;
521
522 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
523 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
524 add32(Imm32(offset), base, addressTempRegister);
525 base = addressTempRegister;
526 offset = 0;
527 }
528
529 m_assembler.vldr(dest, base, offset);
530 }
531
532 void storeDouble(FPRegisterID src, ImplicitAddress address)
533 {
534 RegisterID base = address.base;
535 int32_t offset = address.offset;
536
537 // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
538 if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
539 add32(Imm32(offset), base, addressTempRegister);
540 base = addressTempRegister;
541 offset = 0;
542 }
543
544 m_assembler.vstr(src, base, offset);
545 }
546
547 void addDouble(FPRegisterID src, FPRegisterID dest)
548 {
549 m_assembler.vadd_F64(dest, dest, src);
550 }
551
552 void addDouble(Address src, FPRegisterID dest)
553 {
554 loadDouble(src, fpTempRegister);
555 addDouble(fpTempRegister, dest);
556 }
557
558 void subDouble(FPRegisterID src, FPRegisterID dest)
559 {
560 m_assembler.vsub_F64(dest, dest, src);
561 }
562
563 void subDouble(Address src, FPRegisterID dest)
564 {
565 loadDouble(src, fpTempRegister);
566 subDouble(fpTempRegister, dest);
567 }
568
569 void mulDouble(FPRegisterID src, FPRegisterID dest)
570 {
571 m_assembler.vmul_F64(dest, dest, src);
572 }
573
574 void mulDouble(Address src, FPRegisterID dest)
575 {
576 loadDouble(src, fpTempRegister);
577 mulDouble(fpTempRegister, dest);
578 }
579
580 void sqrtDouble(FPRegisterID, FPRegisterID)
581 {
582 ASSERT_NOT_REACHED();
583 }
584
585 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
586 {
587 m_assembler.vmov(fpTempRegister, src);
588 m_assembler.vcvt_F64_S32(dest, fpTempRegister);
589 }
590
591 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
592 {
593 m_assembler.vcmp_F64(left, right);
594 m_assembler.vmrs_APSR_nzcv_FPSCR();
595
596 if (cond == DoubleNotEqual) {
597 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
598 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
599 Jump result = makeBranch(ARMv7Assembler::ConditionNE);
600 unordered.link(this);
601 return result;
602 }
603 if (cond == DoubleEqualOrUnordered) {
604 Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
605 Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
606 unordered.link(this);
607 // We get here if either unordered, or equal.
608 Jump result = makeJump();
609 notEqual.link(this);
610 return result;
611 }
612 return makeBranch(cond);
613 }
614
615 Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
616 {
617 ASSERT_NOT_REACHED();
618 return jump();
619 }
620
621
622 // Stack manipulation operations:
623 //
624 // The ABI is assumed to provide a stack abstraction to memory,
625 // containing machine word sized units of data. Push and pop
626 // operations add and remove a single register sized unit of data
627 // to or from the stack. Peek and poke operations read or write
628 // values on the stack, without moving the current stack position.
629
630 void pop(RegisterID dest)
631 {
632 // store postindexed with writeback
633 m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
634 }
635
636 void push(RegisterID src)
637 {
638 // store preindexed with writeback
639 m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
640 }
641
642 void push(Address address)
643 {
644 load32(address, dataTempRegister);
645 push(dataTempRegister);
646 }
647
648 void push(Imm32 imm)
649 {
650 move(imm, dataTempRegister);
651 push(dataTempRegister);
652 }
653
654 // Register move operations:
655 //
656 // Move values in registers.
657
658 void move(Imm32 imm, RegisterID dest)
659 {
660 uint32_t value = imm.m_value;
661
662 if (imm.m_isPointer)
663 moveFixedWidthEncoding(imm, dest);
664 else {
665 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
666
667 if (armImm.isValid())
668 m_assembler.mov(dest, armImm);
669 else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
670 m_assembler.mvn(dest, armImm);
671 else {
672 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
673 if (value & 0xffff0000)
674 m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
675 }
676 }
677 }
678
679 void move(RegisterID src, RegisterID dest)
680 {
681 m_assembler.mov(dest, src);
682 }
683
684 void move(ImmPtr imm, RegisterID dest)
685 {
686 move(Imm32(imm), dest);
687 }
688
689 void swap(RegisterID reg1, RegisterID reg2)
690 {
691 move(reg1, dataTempRegister);
692 move(reg2, reg1);
693 move(dataTempRegister, reg2);
694 }
695
696 void signExtend32ToPtr(RegisterID src, RegisterID dest)
697 {
698 if (src != dest)
699 move(src, dest);
700 }
701
702 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
703 {
704 if (src != dest)
705 move(src, dest);
706 }
707
708
709 // Forwards / external control flow operations:
710 //
711 // This set of jump and conditional branch operations return a Jump
712 // object which may linked at a later point, allow forwards jump,
713 // or jumps that will require external linkage (after the code has been
714 // relocated).
715 //
716 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
717 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
718 // used (representing the names 'below' and 'above').
719 //
720 // Operands to the comparision are provided in the expected order, e.g.
721 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
722 // treated as a signed 32bit value, is less than or equal to 5.
723 //
724 // jz and jnz test whether the first operand is equal to zero, and take
725 // an optional second operand of a mask under which to perform the test.
726private:
727
728 // Should we be using TEQ for equal/not-equal?
729 void compare32(RegisterID left, Imm32 right)
730 {
731 int32_t imm = right.m_value;
732 if (!imm)
733 m_assembler.tst(left, left);
734 else {
735 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
736 if (armImm.isValid())
737 m_assembler.cmp(left, armImm);
738 if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
739 m_assembler.cmn(left, armImm);
740 else {
741 move(Imm32(imm), dataTempRegister);
742 m_assembler.cmp(left, dataTempRegister);
743 }
744 }
745 }
746
747 void test32(RegisterID reg, Imm32 mask)
748 {
749 int32_t imm = mask.m_value;
750
751 if (imm == -1)
752 m_assembler.tst(reg, reg);
753 else {
754 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
755 if (armImm.isValid())
756 m_assembler.tst(reg, armImm);
757 else {
758 move(mask, dataTempRegister);
759 m_assembler.tst(reg, dataTempRegister);
760 }
761 }
762 }
763
764public:
765 Jump branch32(Condition cond, RegisterID left, RegisterID right)
766 {
767 m_assembler.cmp(left, right);
768 return Jump(makeBranch(cond));
769 }
770
771 Jump branch32(Condition cond, RegisterID left, Imm32 right)
772 {
773 compare32(left, right);
774 return Jump(makeBranch(cond));
775 }
776
777 Jump branch32(Condition cond, RegisterID left, Address right)
778 {
779 load32(right, dataTempRegister);
780 return branch32(cond, left, dataTempRegister);
781 }
782
783 Jump branch32(Condition cond, Address left, RegisterID right)
784 {
785 load32(left, dataTempRegister);
786 return branch32(cond, dataTempRegister, right);
787 }
788
789 Jump branch32(Condition cond, Address left, Imm32 right)
790 {
791 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
792 load32(left, addressTempRegister);
793 return branch32(cond, addressTempRegister, right);
794 }
795
796 Jump branch32(Condition cond, BaseIndex left, Imm32 right)
797 {
798 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
799 load32(left, addressTempRegister);
800 return branch32(cond, addressTempRegister, right);
801 }
802
803 Jump branch32WithUnalignedHalfWords(Condition cond, BaseIndex left, Imm32 right)
804 {
805 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
806 load32WithUnalignedHalfWords(left, addressTempRegister);
807 return branch32(cond, addressTempRegister, right);
808 }
809
810 Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
811 {
812 load32(left.m_ptr, dataTempRegister);
813 return branch32(cond, dataTempRegister, right);
814 }
815
816 Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
817 {
818 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
819 load32(left.m_ptr, addressTempRegister);
820 return branch32(cond, addressTempRegister, right);
821 }
822
823 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
824 {
825 load16(left, dataTempRegister);
826 m_assembler.lsl(addressTempRegister, right, 16);
827 m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
828 return branch32(cond, dataTempRegister, addressTempRegister);
829 }
830
831 Jump branch16(Condition cond, BaseIndex left, Imm32 right)
832 {
833 // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
834 load16(left, addressTempRegister);
835 m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
836 return branch32(cond, addressTempRegister, Imm32(right.m_value << 16));
837 }
838
839 Jump branch8(Condition cond, RegisterID left, Imm32 right)
840 {
841 compare32(left, right);
842 return Jump(makeBranch(cond));
843 }
844
845 Jump branch8(Condition cond, Address left, Imm32 right)
846 {
847 // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
848 load8(left, addressTempRegister);
849 return branch8(cond, addressTempRegister, right);
850 }
851
852 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
853 {
854 ASSERT((cond == Zero) || (cond == NonZero));
855 m_assembler.tst(reg, mask);
856 return Jump(makeBranch(cond));
857 }
858
859 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
860 {
861 ASSERT((cond == Zero) || (cond == NonZero));
862 test32(reg, mask);
863 return Jump(makeBranch(cond));
864 }
865
866 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
867 {
868 ASSERT((cond == Zero) || (cond == NonZero));
869 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
870 load32(address, addressTempRegister);
871 return branchTest32(cond, addressTempRegister, mask);
872 }
873
874 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
875 {
876 ASSERT((cond == Zero) || (cond == NonZero));
877 // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
878 load32(address, addressTempRegister);
879 return branchTest32(cond, addressTempRegister, mask);
880 }
881
882 Jump branchTest8(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
883 {
884 ASSERT((cond == Zero) || (cond == NonZero));
885 test32(reg, mask);
886 return Jump(makeBranch(cond));
887 }
888
889 Jump branchTest8(Condition cond, Address address, Imm32 mask = Imm32(-1))
890 {
891 ASSERT((cond == Zero) || (cond == NonZero));
892 // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
893 load8(address, addressTempRegister);
894 return branchTest8(cond, addressTempRegister, mask);
895 }
896
897 Jump jump()
898 {
899 return Jump(makeJump());
900 }
901
902 void jump(RegisterID target)
903 {
904 m_assembler.bx(target);
905 }
906
907 // Address is a memory location containing the address to jump to
908 void jump(Address address)
909 {
910 load32(address, dataTempRegister);
911 m_assembler.bx(dataTempRegister);
912 }
913
914
915 // Arithmetic control flow operations:
916 //
917 // This set of conditional branch operations branch based
918 // on the result of an arithmetic operation. The operation
919 // is performed as normal, storing the result.
920 //
921 // * jz operations branch if the result is zero.
922 // * jo operations branch if the (signed) arithmetic
923 // operation caused an overflow to occur.
924
925 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
926 {
927 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
928 m_assembler.add_S(dest, dest, src);
929 return Jump(makeBranch(cond));
930 }
931
932 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
933 {
934 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
935 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
936 if (armImm.isValid())
937 m_assembler.add_S(dest, dest, armImm);
938 else {
939 move(imm, dataTempRegister);
940 m_assembler.add_S(dest, dest, dataTempRegister);
941 }
942 return Jump(makeBranch(cond));
943 }
944
945 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
946 {
947 ASSERT(cond == Overflow);
948 m_assembler.smull(dest, dataTempRegister, dest, src);
949 m_assembler.asr(addressTempRegister, dest, 31);
950 return branch32(NotEqual, addressTempRegister, dataTempRegister);
951 }
952
953 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
954 {
955 ASSERT(cond == Overflow);
956 move(imm, dataTempRegister);
957 m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
958 m_assembler.asr(addressTempRegister, dest, 31);
959 return branch32(NotEqual, addressTempRegister, dataTempRegister);
960 }
961
962 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
963 {
964 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
965 m_assembler.sub_S(dest, dest, src);
966 return Jump(makeBranch(cond));
967 }
968
969 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
970 {
971 ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
972 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
973 if (armImm.isValid())
974 m_assembler.sub_S(dest, dest, armImm);
975 else {
976 move(imm, dataTempRegister);
977 m_assembler.sub_S(dest, dest, dataTempRegister);
978 }
979 return Jump(makeBranch(cond));
980 }
981
982
983 // Miscellaneous operations:
984
985 void breakpoint()
986 {
987 m_assembler.bkpt();
988 }
989
990 Call nearCall()
991 {
992 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
993 return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
994 }
995
996 Call call()
997 {
998 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
999 return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
1000 }
1001
1002 Call call(RegisterID target)
1003 {
1004 return Call(m_assembler.blx(target), Call::None);
1005 }
1006
1007 Call call(Address address)
1008 {
1009 load32(address, dataTempRegister);
1010 return Call(m_assembler.blx(dataTempRegister), Call::None);
1011 }
1012
1013 void ret()
1014 {
1015 m_assembler.bx(linkRegister);
1016 }
1017
1018 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1019 {
1020 m_assembler.cmp(left, right);
1021 m_assembler.it(armV7Condition(cond), false);
1022 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1023 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1024 }
1025
1026 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
1027 {
1028 compare32(left, right);
1029 m_assembler.it(armV7Condition(cond), false);
1030 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1031 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1032 }
1033
1034 // FIXME:
1035 // The mask should be optional... paerhaps the argument order should be
1036 // dest-src, operations always have a dest? ... possibly not true, considering
1037 // asm ops like test, or pseudo ops like pop().
1038 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
1039 {
1040 load32(address, dataTempRegister);
1041 test32(dataTempRegister, mask);
1042 m_assembler.it(armV7Condition(cond), false);
1043 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1044 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1045 }
1046
1047 void setTest8(Condition cond, Address address, Imm32 mask, RegisterID dest)
1048 {
1049 load8(address, dataTempRegister);
1050 test32(dataTempRegister, mask);
1051 m_assembler.it(armV7Condition(cond), false);
1052 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1));
1053 m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0));
1054 }
1055
1056 DataLabel32 moveWithPatch(Imm32 imm, RegisterID dst)
1057 {
1058 moveFixedWidthEncoding(imm, dst);
1059 return DataLabel32(this);
1060 }
1061
1062 DataLabelPtr moveWithPatch(ImmPtr imm, RegisterID dst)
1063 {
1064 moveFixedWidthEncoding(Imm32(imm), dst);
1065 return DataLabelPtr(this);
1066 }
1067
1068 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1069 {
1070 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1071 return branch32(cond, left, dataTempRegister);
1072 }
1073
1074 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1075 {
1076 load32(left, addressTempRegister);
1077 dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
1078 return branch32(cond, addressTempRegister, dataTempRegister);
1079 }
1080
1081 DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
1082 {
1083 DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister);
1084 store32(dataTempRegister, address);
1085 return label;
1086 }
1087 DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(ImmPtr(0), address); }
1088
1089
1090 Call tailRecursiveCall()
1091 {
1092 // Like a normal call, but don't link.
1093 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1094 return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
1095 }
1096
1097 Call makeTailRecursiveCall(Jump oldJump)
1098 {
1099 oldJump.link(this);
1100 return tailRecursiveCall();
1101 }
1102
1103
1104protected:
1105 ARMv7Assembler::JmpSrc makeJump()
1106 {
1107 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1108 return m_assembler.bx(dataTempRegister);
1109 }
1110
1111 ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
1112 {
1113 m_assembler.it(cond, true, true);
1114 moveFixedWidthEncoding(Imm32(0), dataTempRegister);
1115 return m_assembler.bx(dataTempRegister);
1116 }
1117 ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
1118 ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
1119
1120 ArmAddress setupArmAddress(BaseIndex address)
1121 {
1122 if (address.offset) {
1123 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1124 if (imm.isValid())
1125 m_assembler.add(addressTempRegister, address.base, imm);
1126 else {
1127 move(Imm32(address.offset), addressTempRegister);
1128 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1129 }
1130
1131 return ArmAddress(addressTempRegister, address.index, address.scale);
1132 } else
1133 return ArmAddress(address.base, address.index, address.scale);
1134 }
1135
1136 ArmAddress setupArmAddress(Address address)
1137 {
1138 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1139 return ArmAddress(address.base, address.offset);
1140
1141 move(Imm32(address.offset), addressTempRegister);
1142 return ArmAddress(address.base, addressTempRegister);
1143 }
1144
1145 ArmAddress setupArmAddress(ImplicitAddress address)
1146 {
1147 if ((address.offset >= -0xff) && (address.offset <= 0xfff))
1148 return ArmAddress(address.base, address.offset);
1149
1150 move(Imm32(address.offset), addressTempRegister);
1151 return ArmAddress(address.base, addressTempRegister);
1152 }
1153
1154 RegisterID makeBaseIndexBase(BaseIndex address)
1155 {
1156 if (!address.offset)
1157 return address.base;
1158
1159 ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset);
1160 if (imm.isValid())
1161 m_assembler.add(addressTempRegister, address.base, imm);
1162 else {
1163 move(Imm32(address.offset), addressTempRegister);
1164 m_assembler.add(addressTempRegister, addressTempRegister, address.base);
1165 }
1166
1167 return addressTempRegister;
1168 }
1169
1170 void moveFixedWidthEncoding(Imm32 imm, RegisterID dst)
1171 {
1172 uint32_t value = imm.m_value;
1173 m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff));
1174 m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16));
1175 }
1176
1177 ARMv7Assembler::Condition armV7Condition(Condition cond)
1178 {
1179 return static_cast<ARMv7Assembler::Condition>(cond);
1180 }
1181
1182 ARMv7Assembler::Condition armV7Condition(DoubleCondition cond)
1183 {
1184 return static_cast<ARMv7Assembler::Condition>(cond);
1185 }
1186
1187private:
1188 friend class LinkBuffer;
1189 friend class RepatchBuffer;
1190
1191 static void linkCall(void* code, Call call, FunctionPtr function)
1192 {
1193 ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
1194 }
1195
1196 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1197 {
1198 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1199 }
1200
1201 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1202 {
1203 ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
1204 }
1205};
1206
1207} // namespace JSC
1208
1209#endif // ENABLE(ASSEMBLER)
1210
1211#endif // MacroAssemblerARMv7_h
Note: See TracBrowser for help on using the repository browser.