source: webkit/trunk/JavaScriptCore/assembler/MacroAssembler.h@ 40562

Last change on this file since 40562 was 40562, checked in by [email protected], 16 years ago

2009-02-03 Gavin Barraclough <[email protected]>

Reviewed by Geoff Garen.

https://bugs.webkit.org/show_bug.cgi?id=23715

Simplify MacroAssembler interface, by combining comparison methods.
Seprate operations are combined as follows:

jz32/jnz32/jzPtr/jnzPtr -> branchTest32/branchTestPtr,
j*(Add|Mul|Sub)32/j*(Add|Mul|Sub)Ptr -> branch(Add|Mul|Sub)32/branch(Add|Mul|Sub)Ptr
j*32/j*Ptr (all other two op combparisons) -> branch32/brnachPtr
set*32 -> set32

Also, represent the Scale of BaseIndex addresses as a plain enum (0,1,2,3),
instead of as multiplicands (1,2,4,8).

This patch singificantly reduces replication of code, and increases functionality supported
by the MacroAssembler. No performance impact.

  • assembler/MacroAssembler.h: (JSC::MacroAssembler::): (JSC::MacroAssembler::branchPtr): (JSC::MacroAssembler::branchPtrWithPatch): (JSC::MacroAssembler::branch32): (JSC::MacroAssembler::branch16): (JSC::MacroAssembler::branchTestPtr): (JSC::MacroAssembler::branchTest32): (JSC::MacroAssembler::branchAddPtr): (JSC::MacroAssembler::branchAdd32): (JSC::MacroAssembler::branchMul32): (JSC::MacroAssembler::branchSubPtr): (JSC::MacroAssembler::branchSub32): (JSC::MacroAssembler::set32): (JSC::MacroAssembler::setTest32):
  • assembler/X86Assembler.h: (JSC::X86Assembler::): (JSC::X86Assembler::jccRel32): (JSC::X86Assembler::setccOpcode): (JSC::X86Assembler::cmpq_mr): (JSC::X86Assembler::setcc_r): (JSC::X86Assembler::sete_r): (JSC::X86Assembler::setne_r): (JSC::X86Assembler::jne): (JSC::X86Assembler::je): (JSC::X86Assembler::jl): (JSC::X86Assembler::jb): (JSC::X86Assembler::jle): (JSC::X86Assembler::jbe): (JSC::X86Assembler::jge): (JSC::X86Assembler::jg): (JSC::X86Assembler::ja): (JSC::X86Assembler::jae): (JSC::X86Assembler::jo): (JSC::X86Assembler::jp): (JSC::X86Assembler::js): (JSC::X86Assembler::jcc): (JSC::X86Assembler::X86InstructionFormatter::putModRmSib):
  • jit/JIT.cpp: (JSC::JIT::compileOpStrictEq): (JSC::JIT::emitSlowScriptCheck): (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases): (JSC::JIT::privateCompile): (JSC::JIT::privateCompileCTIMachineTrampolines):
  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArith_op_lshift): (JSC::JIT::compileFastArith_op_mod): (JSC::JIT::compileFastArith_op_post_inc): (JSC::JIT::compileFastArith_op_post_dec): (JSC::JIT::compileFastArith_op_pre_inc): (JSC::JIT::compileFastArith_op_pre_dec): (JSC::JIT::compileBinaryArithOp): (JSC::JIT::compileFastArith_op_add): (JSC::JIT::compileFastArith_op_mul):
  • jit/JITCall.cpp: (JSC::JIT::compileOpCall): (JSC::JIT::compileOpCallSlowCase):
  • jit/JITInlineMethods.h: (JSC::JIT::checkStructure): (JSC::JIT::emitJumpIfJSCell): (JSC::JIT::emitJumpIfNotJSCell): (JSC::JIT::emitJumpIfImmediateNumber): (JSC::JIT::emitJumpIfNotImmediateNumber): (JSC::JIT::emitJumpIfImmediateInteger): (JSC::JIT::emitJumpIfNotImmediateInteger): (JSC::JIT::emitFastArithDeTagImmediateJumpIfZero):
  • jit/JITPropertyAccess.cpp: (JSC::JIT::compileGetByIdHotPath): (JSC::JIT::compilePutByIdHotPath): (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::privateCompilePatchGetArrayLength): (JSC::JIT::privateCompileGetByIdProto): (JSC::JIT::privateCompileGetByIdProtoList): (JSC::JIT::privateCompileGetByIdChainList): (JSC::JIT::privateCompileGetByIdChain):
  • runtime/RegExp.cpp: (JSC::RegExp::match):
  • wrec/WRECGenerator.cpp: (JSC::WREC::Generator::generateEnter): (JSC::WREC::Generator::generateIncrementIndex): (JSC::WREC::Generator::generateLoadCharacter): (JSC::WREC::Generator::generateJumpIfNotEndOfInput): (JSC::WREC::Generator::generateBackreferenceQuantifier): (JSC::WREC::Generator::generateNonGreedyQuantifier): (JSC::WREC::Generator::generateGreedyQuantifier): (JSC::WREC::Generator::generatePatternCharacterPair): (JSC::WREC::Generator::generatePatternCharacter): (JSC::WREC::Generator::generateCharacterClassInvertedRange): (JSC::WREC::Generator::generateCharacterClassInverted): (JSC::WREC::Generator::generateAssertionBOL): (JSC::WREC::Generator::generateAssertionEOL): (JSC::WREC::Generator::generateAssertionWordBoundary): (JSC::WREC::Generator::generateBackreference):
File size: 44.6 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER)
32
33#include "X86Assembler.h"
34
35namespace JSC {
36
37class MacroAssembler {
38protected:
39 X86Assembler m_assembler;
40
41#if PLATFORM(X86_64)
42 static const X86::RegisterID scratchRegister = X86::r11;
43#endif
44
45public:
46 typedef X86::RegisterID RegisterID;
47
48 // Note: do not rely on values in this enum, these will change (to 0..3).
49 enum Scale {
50 TimesOne,
51 TimesTwo,
52 TimesFour,
53 TimesEight,
54#if PLATFORM(X86)
55 ScalePtr = TimesFour
56#endif
57#if PLATFORM(X86_64)
58 ScalePtr = TimesEight
59#endif
60 };
61
62 typedef X86Assembler::Condition Condition;
63 static const Condition Equal = X86Assembler::ConditionE;
64 static const Condition NotEqual = X86Assembler::ConditionNE;
65 static const Condition Above = X86Assembler::ConditionA;
66 static const Condition AboveOrEqual = X86Assembler::ConditionAE;
67 static const Condition Below = X86Assembler::ConditionB;
68 static const Condition BelowOrEqual = X86Assembler::ConditionBE;
69 static const Condition GreaterThan = X86Assembler::ConditionG;
70 static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
71 static const Condition LessThan = X86Assembler::ConditionL;
72 static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
73 static const Condition Overflow = X86Assembler::ConditionO;
74 static const Condition Zero = X86Assembler::ConditionE;
75 static const Condition NonZero = X86Assembler::ConditionNE;
76
77 MacroAssembler()
78 {
79 }
80
81 size_t size() { return m_assembler.size(); }
82 void* copyCode(ExecutablePool* allocator)
83 {
84 return m_assembler.executableCopy(allocator);
85 }
86
87
88 // Address:
89 //
90 // Describes a simple base-offset address.
91 struct Address {
92 explicit Address(RegisterID base, int32_t offset = 0)
93 : base(base)
94 , offset(offset)
95 {
96 }
97
98 RegisterID base;
99 int32_t offset;
100 };
101
102 // ImplicitAddress:
103 //
104 // This class is used for explicit 'load' and 'store' operations
105 // (as opposed to situations in which a memory operand is provided
106 // to a generic operation, such as an integer arithmetic instruction).
107 //
108 // In the case of a load (or store) operation we want to permit
109 // addresses to be implicitly constructed, e.g. the two calls:
110 //
111 // load32(Address(addrReg), destReg);
112 // load32(addrReg, destReg);
113 //
114 // Are equivalent, and the explicit wrapping of the Address in the former
115 // is unnecessary.
116 struct ImplicitAddress {
117 ImplicitAddress(RegisterID base)
118 : base(base)
119 , offset(0)
120 {
121 }
122
123 ImplicitAddress(Address address)
124 : base(address.base)
125 , offset(address.offset)
126 {
127 }
128
129 RegisterID base;
130 int32_t offset;
131 };
132
133 // BaseIndex:
134 //
135 // Describes a complex addressing mode.
136 struct BaseIndex {
137 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
138 : base(base)
139 , index(index)
140 , scale(scale)
141 , offset(offset)
142 {
143 }
144
145 RegisterID base;
146 RegisterID index;
147 Scale scale;
148 int32_t offset;
149 };
150
151 // AbsoluteAddress:
152 //
153 // Describes an memory operand given by a pointer. For regular load & store
154 // operations an unwrapped void* will be used, rather than using this.
155 struct AbsoluteAddress {
156 explicit AbsoluteAddress(void* ptr)
157 : m_ptr(ptr)
158 {
159 }
160
161 void* m_ptr;
162 };
163
164
165 class Jump;
166 class PatchBuffer;
167
168 // DataLabelPtr:
169 //
170 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
171 // patched after the code has been generated.
172 class DataLabelPtr {
173 friend class MacroAssembler;
174 friend class PatchBuffer;
175
176 public:
177 DataLabelPtr()
178 {
179 }
180
181 DataLabelPtr(MacroAssembler* masm)
182 : m_label(masm->m_assembler.label())
183 {
184 }
185
186 static void patch(void* address, void* value)
187 {
188 X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
189 }
190
191 private:
192 X86Assembler::JmpDst m_label;
193 };
194
195 // DataLabel32:
196 //
197 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
198 // patched after the code has been generated.
199 class DataLabel32 {
200 friend class MacroAssembler;
201 friend class PatchBuffer;
202
203 public:
204 DataLabel32()
205 {
206 }
207
208 DataLabel32(MacroAssembler* masm)
209 : m_label(masm->m_assembler.label())
210 {
211 }
212
213 static void patch(void* address, int32_t value)
214 {
215 X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value);
216 }
217
218 private:
219 X86Assembler::JmpDst m_label;
220 };
221
222 // Label:
223 //
224 // A Label records a point in the generated instruction stream, typically such that
225 // it may be used as a destination for a jump.
226 class Label {
227 friend class Jump;
228 friend class MacroAssembler;
229 friend class PatchBuffer;
230
231 public:
232 Label()
233 {
234 }
235
236 Label(MacroAssembler* masm)
237 : m_label(masm->m_assembler.label())
238 {
239 }
240
241 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
242 operator X86Assembler::JmpDst()
243 {
244 return m_label;
245 }
246
247 private:
248 X86Assembler::JmpDst m_label;
249 };
250
251
252 // Jump:
253 //
254 // A jump object is a reference to a jump instruction that has been planted
255 // into the code buffer - it is typically used to link the jump, setting the
256 // relative offset such that when executed it will jump to the desired
257 // destination.
258 //
259 // Jump objects retain a pointer to the assembler for syntactic purposes -
260 // to allow the jump object to be able to link itself, e.g.:
261 //
262 // Jump forwardsBranch = jne32(Imm32(0), reg1);
263 // // ...
264 // forwardsBranch.link();
265 //
266 // Jumps may also be linked to a Label.
267 class Jump {
268 friend class PatchBuffer;
269 friend class MacroAssembler;
270
271 public:
272 Jump()
273 {
274 }
275
276 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
277 Jump(X86Assembler::JmpSrc jmp)
278 : m_jmp(jmp)
279 {
280 }
281
282 void link(MacroAssembler* masm)
283 {
284 masm->m_assembler.link(m_jmp, masm->m_assembler.label());
285 }
286
287 void linkTo(Label label, MacroAssembler* masm)
288 {
289 masm->m_assembler.link(m_jmp, label.m_label);
290 }
291
292 // FIXME: transitionary method, while we replace JmpSrces with Jumps.
293 operator X86Assembler::JmpSrc()
294 {
295 return m_jmp;
296 }
297
298 static void patch(void* address, void* destination)
299 {
300 X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
301 }
302
303 private:
304 X86Assembler::JmpSrc m_jmp;
305 };
306
307 // JumpList:
308 //
309 // A JumpList is a set of Jump objects.
310 // All jumps in the set will be linked to the same destination.
311 class JumpList {
312 friend class PatchBuffer;
313
314 public:
315 void link(MacroAssembler* masm)
316 {
317 size_t size = m_jumps.size();
318 for (size_t i = 0; i < size; ++i)
319 m_jumps[i].link(masm);
320 m_jumps.clear();
321 }
322
323 void linkTo(Label label, MacroAssembler* masm)
324 {
325 size_t size = m_jumps.size();
326 for (size_t i = 0; i < size; ++i)
327 m_jumps[i].linkTo(label, masm);
328 m_jumps.clear();
329 }
330
331 void append(Jump jump)
332 {
333 m_jumps.append(jump);
334 }
335
336 void append(JumpList& other)
337 {
338 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
339 }
340
341 bool empty()
342 {
343 return !m_jumps.size();
344 }
345
346 private:
347 Vector<Jump, 16> m_jumps;
348 };
349
350
351 // PatchBuffer:
352 //
353 // This class assists in linking code generated by the macro assembler, once code generation
354 // has been completed, and the code has been copied to is final location in memory. At this
355 // time pointers to labels within the code may be resolved, and relative offsets to external
356 // addresses may be fixed.
357 //
358 // Specifically:
359 // * Jump objects may be linked to external targets,
360 // * The address of Jump objects may taken, such that it can later be relinked.
361 // * The return address of a Jump object representing a call may be acquired.
362 // * The address of a Label pointing into the code may be resolved.
363 // * The value referenced by a DataLabel may be fixed.
364 //
365 // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
366 // address of calls, as opposed to a point that can be used to later relink a Jump -
367 // possibly wrap the later up in an object that can do just that).
368 class PatchBuffer {
369 public:
370 PatchBuffer(void* code)
371 : m_code(code)
372 {
373 }
374
375 void link(Jump jump, void* target)
376 {
377 X86Assembler::link(m_code, jump.m_jmp, target);
378 }
379
380 void link(JumpList list, void* target)
381 {
382 for (unsigned i = 0; i < list.m_jumps.size(); ++i)
383 X86Assembler::link(m_code, list.m_jumps[i], target);
384 }
385
386 void* addressOf(Jump jump)
387 {
388 return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
389 }
390
391 void* addressOf(Label label)
392 {
393 return X86Assembler::getRelocatedAddress(m_code, label.m_label);
394 }
395
396 void* addressOf(DataLabelPtr label)
397 {
398 return X86Assembler::getRelocatedAddress(m_code, label.m_label);
399 }
400
401 void* addressOf(DataLabel32 label)
402 {
403 return X86Assembler::getRelocatedAddress(m_code, label.m_label);
404 }
405
406 void setPtr(DataLabelPtr label, void* value)
407 {
408 X86Assembler::patchAddress(m_code, label.m_label, value);
409 }
410
411 private:
412 void* m_code;
413 };
414
415
416 // ImmPtr:
417 //
418 // A pointer sized immediate operand to an instruction - this is wrapped
419 // in a class requiring explicit construction in order to differentiate
420 // from pointers used as absolute addresses to memory operations
421 struct ImmPtr {
422 explicit ImmPtr(void* value)
423 : m_value(value)
424 {
425 }
426
427 intptr_t asIntptr()
428 {
429 return reinterpret_cast<intptr_t>(m_value);
430 }
431
432 void* m_value;
433 };
434
435
436 // Imm32:
437 //
438 // A 32bit immediate operand to an instruction - this is wrapped in a
439 // class requiring explicit construction in order to prevent RegisterIDs
440 // (which are implemented as an enum) from accidentally being passed as
441 // immediate values.
442 struct Imm32 {
443 explicit Imm32(int32_t value)
444 : m_value(value)
445 {
446 }
447
448#if PLATFORM(X86)
449 explicit Imm32(ImmPtr ptr)
450 : m_value(ptr.asIntptr())
451 {
452 }
453#endif
454
455 int32_t m_value;
456 };
457
458 // Integer arithmetic operations:
459 //
460 // Operations are typically two operand - operation(source, srcDst)
461 // For many operations the source may be an Imm32, the srcDst operand
462 // may often be a memory location (explictly described using an Address
463 // object).
464
465 void addPtr(RegisterID src, RegisterID dest)
466 {
467#if PLATFORM(X86_64)
468 m_assembler.addq_rr(src, dest);
469#else
470 add32(src, dest);
471#endif
472 }
473
474 void addPtr(Imm32 imm, RegisterID srcDest)
475 {
476#if PLATFORM(X86_64)
477 m_assembler.addq_ir(imm.m_value, srcDest);
478#else
479 add32(imm, srcDest);
480#endif
481 }
482
483 void addPtr(ImmPtr imm, RegisterID dest)
484 {
485#if PLATFORM(X86_64)
486 move(imm, scratchRegister);
487 m_assembler.addq_rr(scratchRegister, dest);
488#else
489 add32(Imm32(imm), dest);
490#endif
491 }
492
493 void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
494 {
495 m_assembler.leal_mr(imm.m_value, src, dest);
496 }
497
498 void add32(RegisterID src, RegisterID dest)
499 {
500 m_assembler.addl_rr(src, dest);
501 }
502
503 void add32(Imm32 imm, Address address)
504 {
505 m_assembler.addl_im(imm.m_value, address.offset, address.base);
506 }
507
508 void add32(Imm32 imm, RegisterID dest)
509 {
510 m_assembler.addl_ir(imm.m_value, dest);
511 }
512
513 void add32(Imm32 imm, AbsoluteAddress address)
514 {
515#if PLATFORM(X86_64)
516 move(ImmPtr(address.m_ptr), scratchRegister);
517 add32(imm, Address(scratchRegister));
518#else
519 m_assembler.addl_im(imm.m_value, address.m_ptr);
520#endif
521 }
522
523 void add32(Address src, RegisterID dest)
524 {
525 m_assembler.addl_mr(src.offset, src.base, dest);
526 }
527
528 void andPtr(RegisterID src, RegisterID dest)
529 {
530#if PLATFORM(X86_64)
531 m_assembler.andq_rr(src, dest);
532#else
533 and32(src, dest);
534#endif
535 }
536
537 void andPtr(Imm32 imm, RegisterID srcDest)
538 {
539#if PLATFORM(X86_64)
540 m_assembler.andq_ir(imm.m_value, srcDest);
541#else
542 and32(imm, srcDest);
543#endif
544 }
545
546 void and32(RegisterID src, RegisterID dest)
547 {
548 m_assembler.andl_rr(src, dest);
549 }
550
551 void and32(Imm32 imm, RegisterID dest)
552 {
553 m_assembler.andl_ir(imm.m_value, dest);
554 }
555
556 void lshift32(Imm32 imm, RegisterID dest)
557 {
558 m_assembler.shll_i8r(imm.m_value, dest);
559 }
560
561 void lshift32(RegisterID shift_amount, RegisterID dest)
562 {
563 // On x86 we can only shift by ecx; if asked to shift by another register we'll
564 // need rejig the shift amount into ecx first, and restore the registers afterwards.
565 if (shift_amount != X86::ecx) {
566 swap(shift_amount, X86::ecx);
567
568 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
569 if (dest == shift_amount)
570 m_assembler.shll_CLr(X86::ecx);
571 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
572 else if (dest == X86::ecx)
573 m_assembler.shll_CLr(shift_amount);
574 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
575 else
576 m_assembler.shll_CLr(dest);
577
578 swap(shift_amount, X86::ecx);
579 } else
580 m_assembler.shll_CLr(dest);
581 }
582
583 // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
584 // For now, this operation has specific register requirements, and the three register must
585 // be unique. It is unfortunate to expose this in the MacroAssembler interface, however
586 // given the complexity to fix, the fact that it is not uncommmon for processors to have
587 // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
588 // support a hardware divide at all, it may not be
589 void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
590 {
591#ifdef NDEBUG
592#pragma unused(dividend,remainder)
593#else
594 ASSERT((dividend == X86::eax) && (remainder == X86::edx));
595 ASSERT((dividend != divisor) && (remainder != divisor));
596#endif
597
598 m_assembler.cdq();
599 m_assembler.idivl_r(divisor);
600 }
601
602 void mul32(RegisterID src, RegisterID dest)
603 {
604 m_assembler.imull_rr(src, dest);
605 }
606
607 void mul32(Imm32 imm, RegisterID src, RegisterID dest)
608 {
609 m_assembler.imull_i32r(src, imm.m_value, dest);
610 }
611
612 void not32(RegisterID srcDest)
613 {
614 m_assembler.notl_r(srcDest);
615 }
616
617 void orPtr(RegisterID src, RegisterID dest)
618 {
619#if PLATFORM(X86_64)
620 m_assembler.orq_rr(src, dest);
621#else
622 or32(src, dest);
623#endif
624 }
625
626 void orPtr(ImmPtr imm, RegisterID dest)
627 {
628#if PLATFORM(X86_64)
629 move(imm, scratchRegister);
630 m_assembler.orq_rr(scratchRegister, dest);
631#else
632 or32(Imm32(imm), dest);
633#endif
634 }
635
636 void orPtr(Imm32 imm, RegisterID dest)
637 {
638#if PLATFORM(X86_64)
639 m_assembler.orq_ir(imm.m_value, dest);
640#else
641 or32(imm, dest);
642#endif
643 }
644
645 void or32(RegisterID src, RegisterID dest)
646 {
647 m_assembler.orl_rr(src, dest);
648 }
649
650 void or32(Imm32 imm, RegisterID dest)
651 {
652 m_assembler.orl_ir(imm.m_value, dest);
653 }
654
655 void rshiftPtr(RegisterID shift_amount, RegisterID dest)
656 {
657#if PLATFORM(X86_64)
658 // On x86 we can only shift by ecx; if asked to shift by another register we'll
659 // need rejig the shift amount into ecx first, and restore the registers afterwards.
660 if (shift_amount != X86::ecx) {
661 swap(shift_amount, X86::ecx);
662
663 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
664 if (dest == shift_amount)
665 m_assembler.sarq_CLr(X86::ecx);
666 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
667 else if (dest == X86::ecx)
668 m_assembler.sarq_CLr(shift_amount);
669 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
670 else
671 m_assembler.sarq_CLr(dest);
672
673 swap(shift_amount, X86::ecx);
674 } else
675 m_assembler.sarq_CLr(dest);
676#else
677 rshift32(shift_amount, dest);
678#endif
679 }
680
681 void rshiftPtr(Imm32 imm, RegisterID dest)
682 {
683#if PLATFORM(X86_64)
684 m_assembler.sarq_i8r(imm.m_value, dest);
685#else
686 rshift32(imm, dest);
687#endif
688 }
689
690 void rshift32(RegisterID shift_amount, RegisterID dest)
691 {
692 // On x86 we can only shift by ecx; if asked to shift by another register we'll
693 // need rejig the shift amount into ecx first, and restore the registers afterwards.
694 if (shift_amount != X86::ecx) {
695 swap(shift_amount, X86::ecx);
696
697 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
698 if (dest == shift_amount)
699 m_assembler.sarl_CLr(X86::ecx);
700 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
701 else if (dest == X86::ecx)
702 m_assembler.sarl_CLr(shift_amount);
703 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
704 else
705 m_assembler.sarl_CLr(dest);
706
707 swap(shift_amount, X86::ecx);
708 } else
709 m_assembler.sarl_CLr(dest);
710 }
711
712 void rshift32(Imm32 imm, RegisterID dest)
713 {
714 m_assembler.sarl_i8r(imm.m_value, dest);
715 }
716
717 void subPtr(RegisterID src, RegisterID dest)
718 {
719#if PLATFORM(X86_64)
720 m_assembler.subq_rr(src, dest);
721#else
722 sub32(src, dest);
723#endif
724 }
725
726 void subPtr(Imm32 imm, RegisterID dest)
727 {
728#if PLATFORM(X86_64)
729 m_assembler.subq_ir(imm.m_value, dest);
730#else
731 sub32(imm, dest);
732#endif
733 }
734
735 void subPtr(ImmPtr imm, RegisterID dest)
736 {
737#if PLATFORM(X86_64)
738 move(imm, scratchRegister);
739 m_assembler.subq_rr(scratchRegister, dest);
740#else
741 sub32(Imm32(imm), dest);
742#endif
743 }
744
745 void sub32(RegisterID src, RegisterID dest)
746 {
747 m_assembler.subl_rr(src, dest);
748 }
749
750 void sub32(Imm32 imm, RegisterID dest)
751 {
752 m_assembler.subl_ir(imm.m_value, dest);
753 }
754
755 void sub32(Imm32 imm, Address address)
756 {
757 m_assembler.subl_im(imm.m_value, address.offset, address.base);
758 }
759
760 void sub32(Imm32 imm, AbsoluteAddress address)
761 {
762#if PLATFORM(X86_64)
763 move(ImmPtr(address.m_ptr), scratchRegister);
764 sub32(imm, Address(scratchRegister));
765#else
766 m_assembler.subl_im(imm.m_value, address.m_ptr);
767#endif
768 }
769
770 void sub32(Address src, RegisterID dest)
771 {
772 m_assembler.subl_mr(src.offset, src.base, dest);
773 }
774
775 void xorPtr(RegisterID src, RegisterID dest)
776 {
777#if PLATFORM(X86_64)
778 m_assembler.xorq_rr(src, dest);
779#else
780 xor32(src, dest);
781#endif
782 }
783
784 void xorPtr(Imm32 imm, RegisterID srcDest)
785 {
786#if PLATFORM(X86_64)
787 m_assembler.xorq_ir(imm.m_value, srcDest);
788#else
789 xor32(imm, srcDest);
790#endif
791 }
792
793 void xor32(RegisterID src, RegisterID dest)
794 {
795 m_assembler.xorl_rr(src, dest);
796 }
797
798 void xor32(Imm32 imm, RegisterID srcDest)
799 {
800 m_assembler.xorl_ir(imm.m_value, srcDest);
801 }
802
803
804 // Memory access operations:
805 //
806 // Loads are of the form load(address, destination) and stores of the form
807 // store(source, address). The source for a store may be an Imm32. Address
808 // operand objects to loads and store will be implicitly constructed if a
809 // register is passed.
810
811 void loadPtr(ImplicitAddress address, RegisterID dest)
812 {
813#if PLATFORM(X86_64)
814 m_assembler.movq_mr(address.offset, address.base, dest);
815#else
816 load32(address, dest);
817#endif
818 }
819
820 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
821 {
822#if PLATFORM(X86_64)
823 m_assembler.movq_mr_disp32(address.offset, address.base, dest);
824 return DataLabel32(this);
825#else
826 m_assembler.movl_mr_disp32(address.offset, address.base, dest);
827 return DataLabel32(this);
828#endif
829 }
830
831 void loadPtr(BaseIndex address, RegisterID dest)
832 {
833#if PLATFORM(X86_64)
834 m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
835#else
836 load32(address, dest);
837#endif
838 }
839
840 void loadPtr(void* address, RegisterID dest)
841 {
842#if PLATFORM(X86_64)
843 if (dest == X86::eax)
844 m_assembler.movq_mEAX(address);
845 else {
846 move(X86::eax, dest);
847 m_assembler.movq_mEAX(address);
848 swap(X86::eax, dest);
849 }
850#else
851 load32(address, dest);
852#endif
853 }
854
855 void load32(ImplicitAddress address, RegisterID dest)
856 {
857 m_assembler.movl_mr(address.offset, address.base, dest);
858 }
859
860 void load32(BaseIndex address, RegisterID dest)
861 {
862 m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
863 }
864
865 void load32(void* address, RegisterID dest)
866 {
867#if PLATFORM(X86_64)
868 if (dest == X86::eax)
869 m_assembler.movl_mEAX(address);
870 else {
871 move(X86::eax, dest);
872 m_assembler.movl_mEAX(address);
873 swap(X86::eax, dest);
874 }
875#else
876 m_assembler.movl_mr(address, dest);
877#endif
878 }
879
880 void load16(BaseIndex address, RegisterID dest)
881 {
882 m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
883 }
884
885 void storePtr(RegisterID src, ImplicitAddress address)
886 {
887#if PLATFORM(X86_64)
888 m_assembler.movq_rm(src, address.offset, address.base);
889#else
890 store32(src, address);
891#endif
892 }
893
894 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
895 {
896#if PLATFORM(X86_64)
897 m_assembler.movq_rm_disp32(src, address.offset, address.base);
898 return DataLabel32(this);
899#else
900 m_assembler.movl_rm_disp32(src, address.offset, address.base);
901 return DataLabel32(this);
902#endif
903 }
904
905 void storePtr(RegisterID src, BaseIndex address)
906 {
907#if PLATFORM(X86_64)
908 m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
909#else
910 store32(src, address);
911#endif
912 }
913
914 void storePtr(ImmPtr imm, ImplicitAddress address)
915 {
916#if PLATFORM(X86_64)
917 move(imm, scratchRegister);
918 storePtr(scratchRegister, address);
919#else
920 m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base);
921#endif
922 }
923
924#if !PLATFORM(X86_64)
925 void storePtr(ImmPtr imm, void* address)
926 {
927 store32(Imm32(imm), address);
928 }
929#endif
930
931 DataLabelPtr storePtrWithPatch(Address address)
932 {
933#if PLATFORM(X86_64)
934 m_assembler.movq_i64r(0, scratchRegister);
935 DataLabelPtr label(this);
936 storePtr(scratchRegister, address);
937 return label;
938#else
939 m_assembler.movl_i32m(0, address.offset, address.base);
940 return DataLabelPtr(this);
941#endif
942 }
943
944 void store32(RegisterID src, ImplicitAddress address)
945 {
946 m_assembler.movl_rm(src, address.offset, address.base);
947 }
948
949 void store32(RegisterID src, BaseIndex address)
950 {
951 m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
952 }
953
954 void store32(Imm32 imm, ImplicitAddress address)
955 {
956 m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
957 }
958
959 void store32(Imm32 imm, void* address)
960 {
961#if PLATFORM(X86_64)
962 move(X86::eax, scratchRegister);
963 move(imm, X86::eax);
964 m_assembler.movl_EAXm(address);
965 move(scratchRegister, X86::eax);
966#else
967 m_assembler.movl_i32m(imm.m_value, address);
968#endif
969 }
970
971
972 // Stack manipulation operations:
973 //
974 // The ABI is assumed to provide a stack abstraction to memory,
975 // containing machine word sized units of data. Push and pop
976 // operations add and remove a single register sized unit of data
977 // to or from the stack. Peek and poke operations read or write
978 // values on the stack, without moving the current stack position.
979
980 void pop(RegisterID dest)
981 {
982 m_assembler.pop_r(dest);
983 }
984
985 void push(RegisterID src)
986 {
987 m_assembler.push_r(src);
988 }
989
990 void push(Address address)
991 {
992 m_assembler.push_m(address.offset, address.base);
993 }
994
995 void push(Imm32 imm)
996 {
997 m_assembler.push_i32(imm.m_value);
998 }
999
1000 void pop()
1001 {
1002 addPtr(Imm32(sizeof(void*)), X86::esp);
1003 }
1004
1005 void peek(RegisterID dest, int index = 0)
1006 {
1007 loadPtr(Address(X86::esp, (index * sizeof(void *))), dest);
1008 }
1009
1010 void poke(RegisterID src, int index = 0)
1011 {
1012 storePtr(src, Address(X86::esp, (index * sizeof(void *))));
1013 }
1014
1015 void poke(Imm32 value, int index = 0)
1016 {
1017 store32(value, Address(X86::esp, (index * sizeof(void *))));
1018 }
1019
1020 void poke(ImmPtr imm, int index = 0)
1021 {
1022 storePtr(imm, Address(X86::esp, (index * sizeof(void *))));
1023 }
1024
1025 // Register move operations:
1026 //
1027 // Move values in registers.
1028
1029 void move(Imm32 imm, RegisterID dest)
1030 {
1031 // Note: on 64-bit the Imm32 value is zero extended into the register, it
1032 // may be useful to have a separate version that sign extends the value?
1033 if (!imm.m_value)
1034 m_assembler.xorl_rr(dest, dest);
1035 else
1036 m_assembler.movl_i32r(imm.m_value, dest);
1037 }
1038
1039 void move(RegisterID src, RegisterID dest)
1040 {
1041 // Note: on 64-bit this is is a full register move; perhaps it would be
1042 // useful to have separate move32 & movePtr, with move32 zero extending?
1043#if PLATFORM(X86_64)
1044 m_assembler.movq_rr(src, dest);
1045#else
1046 m_assembler.movl_rr(src, dest);
1047#endif
1048 }
1049
1050 void move(ImmPtr imm, RegisterID dest)
1051 {
1052#if PLATFORM(X86_64)
1053 if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
1054 m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
1055 else
1056 m_assembler.movq_i64r(imm.asIntptr(), dest);
1057#else
1058 m_assembler.movl_i32r(imm.asIntptr(), dest);
1059#endif
1060 }
1061
1062 void swap(RegisterID reg1, RegisterID reg2)
1063 {
1064#if PLATFORM(X86_64)
1065 m_assembler.xchgq_rr(reg1, reg2);
1066#else
1067 m_assembler.xchgl_rr(reg1, reg2);
1068#endif
1069 }
1070
1071 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1072 {
1073#if PLATFORM(X86_64)
1074 m_assembler.movsxd_rr(src, dest);
1075#else
1076 if (src != dest)
1077 move(src, dest);
1078#endif
1079 }
1080
1081 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1082 {
1083#if PLATFORM(X86_64)
1084 m_assembler.movl_rr(src, dest);
1085#else
1086 if (src != dest)
1087 move(src, dest);
1088#endif
1089 }
1090
1091
1092 // Forwards / external control flow operations:
1093 //
1094 // This set of jump and conditional branch operations return a Jump
1095 // object which may linked at a later point, allow forwards jump,
1096 // or jumps that will require external linkage (after the code has been
1097 // relocated).
1098 //
1099 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1100 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1101 // used (representing the names 'below' and 'above').
1102 //
1103 // Operands to the comparision are provided in the expected order, e.g.
1104 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
1105 // treated as a signed 32bit value, is less than or equal to 5.
1106 //
1107 // jz and jnz test whether the first operand is equal to zero, and take
1108 // an optional second operand of a mask under which to perform the test.
1109
1110public:
1111 Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
1112 {
1113#if PLATFORM(X86_64)
1114 m_assembler.cmpq_rr(right, left);
1115 return Jump(m_assembler.jCC(cond));
1116#else
1117 return branch32(cond, left, right);
1118#endif
1119 }
1120
1121 Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
1122 {
1123#if PLATFORM(X86_64)
1124 intptr_t imm = right.asIntptr();
1125 if (CAN_SIGN_EXTEND_32_64(imm)) {
1126 if (!imm)
1127 m_assembler.testq_rr(left, left);
1128 else
1129 m_assembler.cmpq_ir(imm, left);
1130 return Jump(m_assembler.jCC(cond));
1131 } else {
1132 move(right, scratchRegister);
1133 return branchPtr(cond, left, scratchRegister);
1134 }
1135#else
1136 return branch32(cond, left, Imm32(right));
1137#endif
1138 }
1139
1140 Jump branchPtr(Condition cond, RegisterID left, Address right)
1141 {
1142#if PLATFORM(X86_64)
1143 m_assembler.cmpq_mr(right.offset, right.base, left);
1144 return Jump(m_assembler.jCC(cond));
1145#else
1146 return branch32(cond, left, right);
1147#endif
1148 }
1149
1150 Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
1151 {
1152#if PLATFORM(X86_64)
1153 move(ImmPtr(left.m_ptr), scratchRegister);
1154 return branchPtr(cond, Address(scratchRegister), right);
1155#else
1156 m_assembler.cmpl_rm(right, left.m_ptr);
1157 return Jump(m_assembler.jCC(cond));
1158#endif
1159 }
1160
1161 Jump branchPtr(Condition cond, Address left, RegisterID right)
1162 {
1163#if PLATFORM(X86_64)
1164 m_assembler.cmpq_rm(right, left.offset, left.base);
1165 return Jump(m_assembler.jCC(cond));
1166#else
1167 return branch32(cond, left, right);
1168#endif
1169 }
1170
1171 Jump branchPtr(Condition cond, Address left, ImmPtr right)
1172 {
1173#if PLATFORM(X86_64)
1174 move(right, scratchRegister);
1175 return branchPtr(cond, left, scratchRegister);
1176#else
1177 return branch32(cond, left, Imm32(right));
1178#endif
1179 }
1180
1181#if !PLATFORM(X86_64)
1182 Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
1183 {
1184 m_assembler.cmpl_im(right.asIntptr(), left.m_ptr);
1185 return Jump(m_assembler.jCC(cond));
1186 }
1187#endif
1188
1189 Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1190 {
1191#if PLATFORM(X86_64)
1192 m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
1193 dataLabel = DataLabelPtr(this);
1194 return branchPtr(cond, left, scratchRegister);
1195#else
1196 m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
1197 dataLabel = DataLabelPtr(this);
1198 return Jump(m_assembler.jCC(cond));
1199#endif
1200 }
1201
1202 Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
1203 {
1204#if PLATFORM(X86_64)
1205 m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
1206 dataLabel = DataLabelPtr(this);
1207 return branchPtr(cond, left, scratchRegister);
1208#else
1209 m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
1210 dataLabel = DataLabelPtr(this);
1211 return Jump(m_assembler.jCC(cond));
1212#endif
1213 }
1214
1215 Jump branch32(Condition cond, RegisterID left, RegisterID right)
1216 {
1217 m_assembler.cmpl_rr(right, left);
1218 return Jump(m_assembler.jCC(cond));
1219 }
1220
1221 Jump branch32(Condition cond, RegisterID left, Imm32 right)
1222 {
1223 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1224 m_assembler.testl_rr(left, left);
1225 else
1226 m_assembler.cmpl_ir(right.m_value, left);
1227 return Jump(m_assembler.jCC(cond));
1228 }
1229
1230 Jump branch32(Condition cond, RegisterID left, Address right)
1231 {
1232 m_assembler.cmpl_mr(right.offset, right.base, left);
1233 return Jump(m_assembler.jCC(cond));
1234 }
1235
1236 Jump branch32(Condition cond, Address left, RegisterID right)
1237 {
1238 m_assembler.cmpl_rm(right, left.offset, left.base);
1239 return Jump(m_assembler.jCC(cond));
1240 }
1241
1242 Jump branch32(Condition cond, Address left, Imm32 right)
1243 {
1244 m_assembler.cmpl_im(right.m_value, left.offset, left.base);
1245 return Jump(m_assembler.jCC(cond));
1246 }
1247
1248 Jump branch16(Condition cond, BaseIndex left, RegisterID right)
1249 {
1250 m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
1251 return Jump(m_assembler.jCC(cond));
1252 }
1253
1254 Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
1255 {
1256#if PLATFORM(X86_64)
1257 m_assembler.testq_rr(reg, mask);
1258 return Jump(m_assembler.jCC(cond));
1259#else
1260 return branchTest32(cond, reg, mask);
1261#endif
1262 }
1263
1264 Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
1265 {
1266#if PLATFORM(X86_64)
1267 // if we are only interested in the low seven bits, this can be tested with a testb
1268 if (mask.m_value == -1)
1269 m_assembler.testq_rr(reg, reg);
1270 else if ((mask.m_value & ~0x7f) == 0)
1271 m_assembler.testb_i8r(mask.m_value, reg);
1272 else
1273 m_assembler.testq_i32r(mask.m_value, reg);
1274 return Jump(m_assembler.jCC(cond));
1275#else
1276 return branchTest32(cond, reg, mask);
1277#endif
1278 }
1279
1280 Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
1281 {
1282#if PLATFORM(X86_64)
1283 if (mask.m_value == -1)
1284 m_assembler.cmpq_im(0, address.offset, address.base);
1285 else
1286 m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
1287 return Jump(m_assembler.jCC(cond));
1288#else
1289 return branchTest32(cond, address, mask);
1290#endif
1291 }
1292
1293 Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
1294 {
1295#if PLATFORM(X86_64)
1296 if (mask.m_value == -1)
1297 m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
1298 else
1299 m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1300 return Jump(m_assembler.jCC(cond));
1301#else
1302 return branchTest32(cond, address, mask);
1303#endif
1304 }
1305
1306 Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
1307 {
1308 ASSERT((cond == Zero) || (cond == NonZero));
1309 m_assembler.testl_rr(reg, mask);
1310 return Jump(m_assembler.jCC(cond));
1311 }
1312
1313 Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
1314 {
1315 ASSERT((cond == Zero) || (cond == NonZero));
1316 // if we are only interested in the low seven bits, this can be tested with a testb
1317 if (mask.m_value == -1)
1318 m_assembler.testl_rr(reg, reg);
1319 else if ((mask.m_value & ~0x7f) == 0)
1320 m_assembler.testb_i8r(mask.m_value, reg);
1321 else
1322 m_assembler.testl_i32r(mask.m_value, reg);
1323 return Jump(m_assembler.jCC(cond));
1324 }
1325
1326 Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
1327 {
1328 ASSERT((cond == Zero) || (cond == NonZero));
1329 if (mask.m_value == -1)
1330 m_assembler.cmpl_im(0, address.offset, address.base);
1331 else
1332 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1333 return Jump(m_assembler.jCC(cond));
1334 }
1335
1336 Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
1337 {
1338 ASSERT((cond == Zero) || (cond == NonZero));
1339 if (mask.m_value == -1)
1340 m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1341 else
1342 m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1343 return Jump(m_assembler.jCC(cond));
1344 }
1345
1346 Jump jump()
1347 {
1348 return Jump(m_assembler.jmp());
1349 }
1350
1351
1352 // Backwards, local control flow operations:
1353 //
1354 // These operations provide a shorter notation for local
1355 // backwards branches, which may be both more convenient
1356 // for the user, and for the programmer, and for the
1357 // assembler (allowing shorter values to be used in
1358 // relative offsets).
1359 //
1360 // The code sequence:
1361 //
1362 // Label topOfLoop(this);
1363 // // ...
1364 // jne32(reg1, reg2, topOfLoop);
1365 //
1366 // Is equivalent to the longer, potentially less efficient form:
1367 //
1368 // Label topOfLoop(this);
1369 // // ...
1370 // jne32(reg1, reg2).linkTo(topOfLoop);
1371
1372 void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
1373 {
1374 branchPtr(cond, op1, imm).linkTo(target, this);
1375 }
1376
1377 void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
1378 {
1379 branch32(cond, op1, op2).linkTo(target, this);
1380 }
1381
1382 void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
1383 {
1384 branch32(cond, op1, imm).linkTo(target, this);
1385 }
1386
1387 void branch32(Condition cond, RegisterID left, Address right, Label target)
1388 {
1389 branch32(cond, left, right).linkTo(target, this);
1390 }
1391
1392 void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
1393 {
1394 branch16(cond, left, right).linkTo(target, this);
1395 }
1396
1397 void branchTestPtr(Condition cond, RegisterID reg, Label target)
1398 {
1399 branchTestPtr(cond, reg).linkTo(target, this);
1400 }
1401
1402 void jump(Label target)
1403 {
1404 m_assembler.link(m_assembler.jmp(), target.m_label);
1405 }
1406
1407 void jump(RegisterID target)
1408 {
1409 m_assembler.jmp_r(target);
1410 }
1411
1412 // Address is a memory location containing the address to jump to
1413 void jump(Address address)
1414 {
1415 m_assembler.jmp_m(address.offset, address.base);
1416 }
1417
1418
1419 // Arithmetic control flow operations:
1420 //
1421 // This set of conditional branch operations branch based
1422 // on the result of an arithmetic operation. The operation
1423 // is performed as normal, storing the result.
1424 //
1425 // * jz operations branch if the result is zero.
1426 // * jo operations branch if the (signed) arithmetic
1427 // operation caused an overflow to occur.
1428
1429 Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
1430 {
1431 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1432 addPtr(src, dest);
1433 return Jump(m_assembler.jCC(cond));
1434 }
1435
1436 Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
1437 {
1438 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1439 add32(src, dest);
1440 return Jump(m_assembler.jCC(cond));
1441 }
1442
1443 Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
1444 {
1445 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1446 add32(imm, dest);
1447 return Jump(m_assembler.jCC(cond));
1448 }
1449
1450 Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
1451 {
1452 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1453 mul32(src, dest);
1454 return Jump(m_assembler.jCC(cond));
1455 }
1456
1457 Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
1458 {
1459 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1460 mul32(imm, src, dest);
1461 return Jump(m_assembler.jCC(cond));
1462 }
1463
1464 Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
1465 {
1466 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1467 subPtr(imm, dest);
1468 return Jump(m_assembler.jCC(cond));
1469 }
1470
1471 Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
1472 {
1473 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1474 sub32(src, dest);
1475 return Jump(m_assembler.jCC(cond));
1476 }
1477
1478 Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
1479 {
1480 ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
1481 sub32(imm, dest);
1482 return Jump(m_assembler.jCC(cond));
1483 }
1484
1485
1486 // Miscellaneous operations:
1487
1488 void breakpoint()
1489 {
1490 m_assembler.int3();
1491 }
1492
1493 Jump call()
1494 {
1495 return Jump(m_assembler.call());
1496 }
1497
1498 // FIXME: why does this return a Jump object? - it can't be linked.
1499 // This may be to get a reference to the return address of the call.
1500 //
1501 // This should probably be handled by a separate label type to a regular
1502 // jump. Todo: add a CallLabel type, for the regular call - can be linked
1503 // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
1504 // Also add a CallReturnLabel type for this to return (just a more JmpDsty
1505 // form of label, can get the void* after the code has been linked, but can't
1506 // try to link it like a Jump object), and let the CallLabel be cast into a
1507 // CallReturnLabel.
1508 Jump call(RegisterID target)
1509 {
1510 return Jump(m_assembler.call(target));
1511 }
1512
1513 Label label()
1514 {
1515 return Label(this);
1516 }
1517
1518 Label align()
1519 {
1520 m_assembler.align(16);
1521 return Label(this);
1522 }
1523
1524 ptrdiff_t differenceBetween(Label from, Jump to)
1525 {
1526 return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
1527 }
1528
1529 ptrdiff_t differenceBetween(Label from, Label to)
1530 {
1531 return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1532 }
1533
1534 ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
1535 {
1536 return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1537 }
1538
1539 ptrdiff_t differenceBetween(Label from, DataLabel32 to)
1540 {
1541 return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
1542 }
1543
1544 ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
1545 {
1546 return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
1547 }
1548
1549 void ret()
1550 {
1551 m_assembler.ret();
1552 }
1553
1554 void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
1555 {
1556 m_assembler.cmpl_rr(right, left);
1557 m_assembler.setCC_r(cond, dest);
1558 m_assembler.movzbl_rr(dest, dest);
1559 }
1560
1561 void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
1562 {
1563 if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1564 m_assembler.testl_rr(left, left);
1565 else
1566 m_assembler.cmpl_ir(right.m_value, left);
1567 m_assembler.setCC_r(cond, dest);
1568 m_assembler.movzbl_rr(dest, dest);
1569 }
1570
1571 // FIXME:
1572 // The mask should be optional... paerhaps the argument order should be
1573 // dest-src, operations always have a dest? ... possibly not true, considering
1574 // asm ops like test, or pseudo ops like pop().
1575 void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
1576 {
1577 if (mask.m_value == -1)
1578 m_assembler.cmpl_im(0, address.offset, address.base);
1579 else
1580 m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1581 m_assembler.setCC_r(cond, dest);
1582 m_assembler.movzbl_rr(dest, dest);
1583 }
1584};
1585
1586} // namespace JSC
1587
1588#endif // ENABLE(ASSEMBLER)
1589
1590#endif // MacroAssembler_h
Note: See TracBrowser for help on using the repository browser.