Changeset 40660 in webkit for trunk/JavaScriptCore/assembler


Ignore:
Timestamp:
Feb 4, 2009, 7:40:39 PM (16 years ago)
Author:
[email protected]
Message:

2009-02-04 Gavin Barraclough <[email protected]>

Rubber stamped by Sam 'Big Mac' Weinig.

  • assembler/AbstractMacroAssembler.h: Copied from assembler/MacroAssembler.h.
  • assembler/MacroAssemblerX86.h: Copied from assembler/MacroAssembler.h.
  • assembler/MacroAssemblerX86Common.h: Copied from assembler/MacroAssembler.h.
  • assembler/MacroAssemblerX86_64.h: Copied from assembler/MacroAssembler.h.
Location:
trunk/JavaScriptCore/assembler
Files:
1 edited
4 copied

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/assembler/AbstractMacroAssembler.h

    r40656 r40660  
    2424 */
    2525
    26 #ifndef MacroAssembler_h
    27 #define MacroAssembler_h
     26#ifndef AbstractMacroAssembler_h
     27#define AbstractMacroAssembler_h
    2828
    2929#include <wtf/Platform.h>
    3030
    3131#if ENABLE(ASSEMBLER)
    32 
    33 #include "X86Assembler.h"
    3432
    3533namespace JSC {
     
    460458};
    461459
    462 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
    463 public:
    464 
    465     typedef X86Assembler::Condition Condition;
    466     static const Condition Equal = X86Assembler::ConditionE;
    467     static const Condition NotEqual = X86Assembler::ConditionNE;
    468     static const Condition Above = X86Assembler::ConditionA;
    469     static const Condition AboveOrEqual = X86Assembler::ConditionAE;
    470     static const Condition Below = X86Assembler::ConditionB;
    471     static const Condition BelowOrEqual = X86Assembler::ConditionBE;
    472     static const Condition GreaterThan = X86Assembler::ConditionG;
    473     static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
    474     static const Condition LessThan = X86Assembler::ConditionL;
    475     static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
    476     static const Condition Overflow = X86Assembler::ConditionO;
    477     static const Condition Zero = X86Assembler::ConditionE;
    478     static const Condition NonZero = X86Assembler::ConditionNE;
    479 
    480     static const RegisterID stackPointerRegister = X86::esp;
    481 
    482     // Integer arithmetic operations:
    483     //
    484     // Operations are typically two operand - operation(source, srcDst)
    485     // For many operations the source may be an Imm32, the srcDst operand
    486     // may often be a memory location (explictly described using an Address
    487     // object).
    488 
    489     void add32(RegisterID src, RegisterID dest)
    490     {
    491         m_assembler.addl_rr(src, dest);
    492     }
    493 
    494     void add32(Imm32 imm, Address address)
    495     {
    496         m_assembler.addl_im(imm.m_value, address.offset, address.base);
    497     }
    498 
    499     void add32(Imm32 imm, RegisterID dest)
    500     {
    501         m_assembler.addl_ir(imm.m_value, dest);
    502     }
    503    
    504     void add32(Address src, RegisterID dest)
    505     {
    506         m_assembler.addl_mr(src.offset, src.base, dest);
    507     }
    508    
    509     void and32(RegisterID src, RegisterID dest)
    510     {
    511         m_assembler.andl_rr(src, dest);
    512     }
    513 
    514     void and32(Imm32 imm, RegisterID dest)
    515     {
    516         m_assembler.andl_ir(imm.m_value, dest);
    517     }
    518 
    519     void lshift32(Imm32 imm, RegisterID dest)
    520     {
    521         m_assembler.shll_i8r(imm.m_value, dest);
    522     }
    523    
    524     void lshift32(RegisterID shift_amount, RegisterID dest)
    525     {
    526         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    527         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    528         if (shift_amount != X86::ecx) {
    529             swap(shift_amount, X86::ecx);
    530 
    531             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    532             if (dest == shift_amount)
    533                 m_assembler.shll_CLr(X86::ecx);
    534             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    535             else if (dest == X86::ecx)
    536                 m_assembler.shll_CLr(shift_amount);
    537             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    538             else
    539                 m_assembler.shll_CLr(dest);
    540        
    541             swap(shift_amount, X86::ecx);
    542         } else
    543             m_assembler.shll_CLr(dest);
    544     }
    545    
    546     // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
    547     // For now, this operation has specific register requirements, and the three register must
    548     // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
    549     // given the complexity to fix, the fact that it is not uncommmon  for processors to have
    550     // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
    551     // support a hardware divide at all, it may not be
    552     void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
    553     {
    554 #ifdef NDEBUG
    555 #pragma unused(dividend,remainder)
    556 #else
    557         ASSERT((dividend == X86::eax) && (remainder == X86::edx));
    558         ASSERT((dividend != divisor) && (remainder != divisor));
    559 #endif
    560 
    561         m_assembler.cdq();
    562         m_assembler.idivl_r(divisor);
    563     }
    564 
    565     void mul32(RegisterID src, RegisterID dest)
    566     {
    567         m_assembler.imull_rr(src, dest);
    568     }
    569    
    570     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
    571     {
    572         m_assembler.imull_i32r(src, imm.m_value, dest);
    573     }
    574    
    575     void not32(RegisterID srcDest)
    576     {
    577         m_assembler.notl_r(srcDest);
    578     }
    579    
    580     void or32(RegisterID src, RegisterID dest)
    581     {
    582         m_assembler.orl_rr(src, dest);
    583     }
    584 
    585     void or32(Imm32 imm, RegisterID dest)
    586     {
    587         m_assembler.orl_ir(imm.m_value, dest);
    588     }
    589 
    590     void rshift32(RegisterID shift_amount, RegisterID dest)
    591     {
    592         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    593         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    594         if (shift_amount != X86::ecx) {
    595             swap(shift_amount, X86::ecx);
    596 
    597             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    598             if (dest == shift_amount)
    599                 m_assembler.sarl_CLr(X86::ecx);
    600             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    601             else if (dest == X86::ecx)
    602                 m_assembler.sarl_CLr(shift_amount);
    603             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    604             else
    605                 m_assembler.sarl_CLr(dest);
    606        
    607             swap(shift_amount, X86::ecx);
    608         } else
    609             m_assembler.sarl_CLr(dest);
    610     }
    611 
    612     void rshift32(Imm32 imm, RegisterID dest)
    613     {
    614         m_assembler.sarl_i8r(imm.m_value, dest);
    615     }
    616 
    617     void sub32(RegisterID src, RegisterID dest)
    618     {
    619         m_assembler.subl_rr(src, dest);
    620     }
    621    
    622     void sub32(Imm32 imm, RegisterID dest)
    623     {
    624         m_assembler.subl_ir(imm.m_value, dest);
    625     }
    626    
    627     void sub32(Imm32 imm, Address address)
    628     {
    629         m_assembler.subl_im(imm.m_value, address.offset, address.base);
    630     }
    631 
    632     void sub32(Address src, RegisterID dest)
    633     {
    634         m_assembler.subl_mr(src.offset, src.base, dest);
    635     }
    636 
    637     void xor32(RegisterID src, RegisterID dest)
    638     {
    639         m_assembler.xorl_rr(src, dest);
    640     }
    641 
    642     void xor32(Imm32 imm, RegisterID srcDest)
    643     {
    644         m_assembler.xorl_ir(imm.m_value, srcDest);
    645     }
    646    
    647 
    648     // Memory access operations:
    649     //
    650     // Loads are of the form load(address, destination) and stores of the form
    651     // store(source, address).  The source for a store may be an Imm32.  Address
    652     // operand objects to loads and store will be implicitly constructed if a
    653     // register is passed.
    654 
    655     void load32(ImplicitAddress address, RegisterID dest)
    656     {
    657         m_assembler.movl_mr(address.offset, address.base, dest);
    658     }
    659 
    660     void load32(BaseIndex address, RegisterID dest)
    661     {
    662         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
    663     }
    664 
    665     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    666     {
    667         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
    668         return DataLabel32(this);
    669     }
    670 
    671     void load16(BaseIndex address, RegisterID dest)
    672     {
    673         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
    674     }
    675 
    676     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    677     {
    678         m_assembler.movl_rm_disp32(src, address.offset, address.base);
    679         return DataLabel32(this);
    680     }
    681 
    682     void store32(RegisterID src, ImplicitAddress address)
    683     {
    684         m_assembler.movl_rm(src, address.offset, address.base);
    685     }
    686 
    687     void store32(RegisterID src, BaseIndex address)
    688     {
    689         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
    690     }
    691 
    692     void store32(Imm32 imm, ImplicitAddress address)
    693     {
    694         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
    695     }
    696    
    697 
    698     // Stack manipulation operations:
    699     //
    700     // The ABI is assumed to provide a stack abstraction to memory,
    701     // containing machine word sized units of data.  Push and pop
    702     // operations add and remove a single register sized unit of data
    703     // to or from the stack.  Peek and poke operations read or write
    704     // values on the stack, without moving the current stack position.
    705    
    706     void pop(RegisterID dest)
    707     {
    708         m_assembler.pop_r(dest);
    709     }
    710 
    711     void push(RegisterID src)
    712     {
    713         m_assembler.push_r(src);
    714     }
    715 
    716     void push(Address address)
    717     {
    718         m_assembler.push_m(address.offset, address.base);
    719     }
    720 
    721     void push(Imm32 imm)
    722     {
    723         m_assembler.push_i32(imm.m_value);
    724     }
    725 
    726     // Register move operations:
    727     //
    728     // Move values in registers.
    729 
    730     void move(Imm32 imm, RegisterID dest)
    731     {
    732         // Note: on 64-bit the Imm32 value is zero extended into the register, it
    733         // may be useful to have a separate version that sign extends the value?
    734         if (!imm.m_value)
    735             m_assembler.xorl_rr(dest, dest);
    736         else
    737             m_assembler.movl_i32r(imm.m_value, dest);
    738     }
    739 
    740 #if PLATFORM(X86_64)
    741     void move(RegisterID src, RegisterID dest)
    742     {
    743         // Note: on 64-bit this is is a full register move; perhaps it would be
    744         // useful to have separate move32 & movePtr, with move32 zero extending?
    745         m_assembler.movq_rr(src, dest);
    746     }
    747 
    748     void move(ImmPtr imm, RegisterID dest)
    749     {
    750         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
    751             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
    752         else
    753             m_assembler.movq_i64r(imm.asIntptr(), dest);
    754     }
    755 
    756     void swap(RegisterID reg1, RegisterID reg2)
    757     {
    758         m_assembler.xchgq_rr(reg1, reg2);
    759     }
    760 
    761     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    762     {
    763         m_assembler.movsxd_rr(src, dest);
    764     }
    765 
    766     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    767     {
    768         m_assembler.movl_rr(src, dest);
    769     }
    770 #else
    771     void move(RegisterID src, RegisterID dest)
    772     {
    773         m_assembler.movl_rr(src, dest);
    774     }
    775 
    776     void move(ImmPtr imm, RegisterID dest)
    777     {
    778         m_assembler.movl_i32r(imm.asIntptr(), dest);
    779     }
    780 
    781     void swap(RegisterID reg1, RegisterID reg2)
    782     {
    783         m_assembler.xchgl_rr(reg1, reg2);
    784     }
    785 
    786     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    787     {
    788         if (src != dest)
    789             move(src, dest);
    790     }
    791 
    792     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    793     {
    794         if (src != dest)
    795             move(src, dest);
    796     }
    797 #endif
    798 
    799 
    800     // Forwards / external control flow operations:
    801     //
    802     // This set of jump and conditional branch operations return a Jump
    803     // object which may linked at a later point, allow forwards jump,
    804     // or jumps that will require external linkage (after the code has been
    805     // relocated).
    806     //
    807     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
    808     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
    809     // used (representing the names 'below' and 'above').
    810     //
    811     // Operands to the comparision are provided in the expected order, e.g.
    812     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
    813     // treated as a signed 32bit value, is less than or equal to 5.
    814     //
    815     // jz and jnz test whether the first operand is equal to zero, and take
    816     // an optional second operand of a mask under which to perform the test.
    817 
    818 public:
    819     Jump branch32(Condition cond, RegisterID left, RegisterID right)
    820     {
    821         m_assembler.cmpl_rr(right, left);
    822         return Jump(m_assembler.jCC(cond));
    823     }
    824 
    825     Jump branch32(Condition cond, RegisterID left, Imm32 right)
    826     {
    827         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    828             m_assembler.testl_rr(left, left);
    829         else
    830             m_assembler.cmpl_ir(right.m_value, left);
    831         return Jump(m_assembler.jCC(cond));
    832     }
    833    
    834     Jump branch32(Condition cond, RegisterID left, Address right)
    835     {
    836         m_assembler.cmpl_mr(right.offset, right.base, left);
    837         return Jump(m_assembler.jCC(cond));
    838     }
    839    
    840     Jump branch32(Condition cond, Address left, RegisterID right)
    841     {
    842         m_assembler.cmpl_rm(right, left.offset, left.base);
    843         return Jump(m_assembler.jCC(cond));
    844     }
    845 
    846     Jump branch32(Condition cond, Address left, Imm32 right)
    847     {
    848         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
    849         return Jump(m_assembler.jCC(cond));
    850     }
    851 
    852     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    853     {
    854         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
    855         return Jump(m_assembler.jCC(cond));
    856     }
    857 
    858     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    859     {
    860         ASSERT((cond == Zero) || (cond == NonZero));
    861         m_assembler.testl_rr(reg, mask);
    862         return Jump(m_assembler.jCC(cond));
    863     }
    864 
    865     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    866     {
    867         ASSERT((cond == Zero) || (cond == NonZero));
    868         // if we are only interested in the low seven bits, this can be tested with a testb
    869         if (mask.m_value == -1)
    870             m_assembler.testl_rr(reg, reg);
    871         else if ((mask.m_value & ~0x7f) == 0)
    872             m_assembler.testb_i8r(mask.m_value, reg);
    873         else
    874             m_assembler.testl_i32r(mask.m_value, reg);
    875         return Jump(m_assembler.jCC(cond));
    876     }
    877 
    878     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    879     {
    880         ASSERT((cond == Zero) || (cond == NonZero));
    881         if (mask.m_value == -1)
    882             m_assembler.cmpl_im(0, address.offset, address.base);
    883         else
    884             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    885         return Jump(m_assembler.jCC(cond));
    886     }
    887 
    888     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    889     {
    890         ASSERT((cond == Zero) || (cond == NonZero));
    891         if (mask.m_value == -1)
    892             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
    893         else
    894             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    895         return Jump(m_assembler.jCC(cond));
    896     }
    897 
    898     Jump jump()
    899     {
    900         return Jump(m_assembler.jmp());
    901     }
    902 
    903     void jump(RegisterID target)
    904     {
    905         m_assembler.jmp_r(target);
    906     }
    907 
    908     // Address is a memory location containing the address to jump to
    909     void jump(Address address)
    910     {
    911         m_assembler.jmp_m(address.offset, address.base);
    912     }
    913 
    914 
    915     // Arithmetic control flow operations:
    916     //
    917     // This set of conditional branch operations branch based
    918     // on the result of an arithmetic operation.  The operation
    919     // is performed as normal, storing the result.
    920     //
    921     // * jz operations branch if the result is zero.
    922     // * jo operations branch if the (signed) arithmetic
    923     //   operation caused an overflow to occur.
    924    
    925     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    926     {
    927         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    928         add32(src, dest);
    929         return Jump(m_assembler.jCC(cond));
    930     }
    931    
    932     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    933     {
    934         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    935         add32(imm, dest);
    936         return Jump(m_assembler.jCC(cond));
    937     }
    938    
    939     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    940     {
    941         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    942         mul32(src, dest);
    943         return Jump(m_assembler.jCC(cond));
    944     }
    945    
    946     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    947     {
    948         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    949         mul32(imm, src, dest);
    950         return Jump(m_assembler.jCC(cond));
    951     }
    952    
    953     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    954     {
    955         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    956         sub32(src, dest);
    957         return Jump(m_assembler.jCC(cond));
    958     }
    959    
    960     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    961     {
    962         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    963         sub32(imm, dest);
    964         return Jump(m_assembler.jCC(cond));
    965     }
    966    
    967 
    968     // Miscellaneous operations:
    969 
    970     void breakpoint()
    971     {
    972         m_assembler.int3();
    973     }
    974 
    975     Jump call()
    976     {
    977         return Jump(m_assembler.call());
    978     }
    979 
    980     // FIXME: why does this return a Jump object? - it can't be linked.
    981     // This may be to get a reference to the return address of the call.
    982     //
    983     // This should probably be handled by a separate label type to a regular
    984     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
    985     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
    986     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
    987     // form of label, can get the void* after the code has been linked, but can't
    988     // try to link it like a Jump object), and let the CallLabel be cast into a
    989     // CallReturnLabel.
    990     Jump call(RegisterID target)
    991     {
    992         return Jump(m_assembler.call(target));
    993     }
    994 
    995     void ret()
    996     {
    997         m_assembler.ret();
    998     }
    999 
    1000     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    1001     {
    1002         m_assembler.cmpl_rr(right, left);
    1003         m_assembler.setCC_r(cond, dest);
    1004         m_assembler.movzbl_rr(dest, dest);
    1005     }
    1006 
    1007     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    1008     {
    1009         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1010             m_assembler.testl_rr(left, left);
    1011         else
    1012             m_assembler.cmpl_ir(right.m_value, left);
    1013         m_assembler.setCC_r(cond, dest);
    1014         m_assembler.movzbl_rr(dest, dest);
    1015     }
    1016 
    1017     // FIXME:
    1018     // The mask should be optional... paerhaps the argument order should be
    1019     // dest-src, operations always have a dest? ... possibly not true, considering
    1020     // asm ops like test, or pseudo ops like pop().
    1021     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    1022     {
    1023         if (mask.m_value == -1)
    1024             m_assembler.cmpl_im(0, address.offset, address.base);
    1025         else
    1026             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1027         m_assembler.setCC_r(cond, dest);
    1028         m_assembler.movzbl_rr(dest, dest);
    1029     }
    1030 };
    1031 
    1032 
    1033 #if PLATFORM(X86_64)
    1034 
    1035 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
    1036 protected:
    1037     static const X86::RegisterID scratchRegister = X86::r11;
    1038 
    1039 public:
    1040     static const Scale ScalePtr = TimesEight;
    1041 
    1042     using MacroAssemblerX86Common::add32;
    1043     using MacroAssemblerX86Common::sub32;
    1044     using MacroAssemblerX86Common::load32;
    1045     using MacroAssemblerX86Common::store32;
    1046 
    1047     void add32(Imm32 imm, AbsoluteAddress address)
    1048     {
    1049         move(ImmPtr(address.m_ptr), scratchRegister);
    1050         add32(imm, Address(scratchRegister));
    1051     }
    1052    
    1053     void sub32(Imm32 imm, AbsoluteAddress address)
    1054     {
    1055         move(ImmPtr(address.m_ptr), scratchRegister);
    1056         sub32(imm, Address(scratchRegister));
    1057     }
    1058 
    1059     void load32(void* address, RegisterID dest)
    1060     {
    1061         if (dest == X86::eax)
    1062             m_assembler.movl_mEAX(address);
    1063         else {
    1064             move(X86::eax, dest);
    1065             m_assembler.movl_mEAX(address);
    1066             swap(X86::eax, dest);
    1067         }
    1068     }
    1069 
    1070     void store32(Imm32 imm, void* address)
    1071     {
    1072         move(X86::eax, scratchRegister);
    1073         move(imm, X86::eax);
    1074         m_assembler.movl_EAXm(address);
    1075         move(scratchRegister, X86::eax);
    1076     }
    1077 
    1078 
    1079 
    1080     void addPtr(RegisterID src, RegisterID dest)
    1081     {
    1082         m_assembler.addq_rr(src, dest);
    1083     }
    1084 
    1085     void addPtr(Imm32 imm, RegisterID srcDest)
    1086     {
    1087         m_assembler.addq_ir(imm.m_value, srcDest);
    1088     }
    1089 
    1090     void addPtr(ImmPtr imm, RegisterID dest)
    1091     {
    1092         move(imm, scratchRegister);
    1093         m_assembler.addq_rr(scratchRegister, dest);
    1094     }
    1095 
    1096     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1097     {
    1098         m_assembler.leal_mr(imm.m_value, src, dest);
    1099     }
    1100 
    1101     void andPtr(RegisterID src, RegisterID dest)
    1102     {
    1103         m_assembler.andq_rr(src, dest);
    1104     }
    1105 
    1106     void andPtr(Imm32 imm, RegisterID srcDest)
    1107     {
    1108         m_assembler.andq_ir(imm.m_value, srcDest);
    1109     }
    1110 
    1111     void orPtr(RegisterID src, RegisterID dest)
    1112     {
    1113         m_assembler.orq_rr(src, dest);
    1114     }
    1115 
    1116     void orPtr(ImmPtr imm, RegisterID dest)
    1117     {
    1118         move(imm, scratchRegister);
    1119         m_assembler.orq_rr(scratchRegister, dest);
    1120     }
    1121 
    1122     void orPtr(Imm32 imm, RegisterID dest)
    1123     {
    1124         m_assembler.orq_ir(imm.m_value, dest);
    1125     }
    1126 
    1127     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1128     {
    1129         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    1130         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    1131         if (shift_amount != X86::ecx) {
    1132             swap(shift_amount, X86::ecx);
    1133 
    1134             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    1135             if (dest == shift_amount)
    1136                 m_assembler.sarq_CLr(X86::ecx);
    1137             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    1138             else if (dest == X86::ecx)
    1139                 m_assembler.sarq_CLr(shift_amount);
    1140             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    1141             else
    1142                 m_assembler.sarq_CLr(dest);
    1143        
    1144             swap(shift_amount, X86::ecx);
    1145         } else
    1146             m_assembler.sarq_CLr(dest);
    1147     }
    1148 
    1149     void rshiftPtr(Imm32 imm, RegisterID dest)
    1150     {
    1151         m_assembler.sarq_i8r(imm.m_value, dest);
    1152     }
    1153 
    1154     void subPtr(RegisterID src, RegisterID dest)
    1155     {
    1156         m_assembler.subq_rr(src, dest);
    1157     }
    1158    
    1159     void subPtr(Imm32 imm, RegisterID dest)
    1160     {
    1161         m_assembler.subq_ir(imm.m_value, dest);
    1162     }
    1163    
    1164     void subPtr(ImmPtr imm, RegisterID dest)
    1165     {
    1166         move(imm, scratchRegister);
    1167         m_assembler.subq_rr(scratchRegister, dest);
    1168     }
    1169 
    1170     void xorPtr(RegisterID src, RegisterID dest)
    1171     {
    1172         m_assembler.xorq_rr(src, dest);
    1173     }
    1174 
    1175     void xorPtr(Imm32 imm, RegisterID srcDest)
    1176     {
    1177         m_assembler.xorq_ir(imm.m_value, srcDest);
    1178     }
    1179 
    1180 
    1181     void loadPtr(ImplicitAddress address, RegisterID dest)
    1182     {
    1183         m_assembler.movq_mr(address.offset, address.base, dest);
    1184     }
    1185 
    1186     void loadPtr(BaseIndex address, RegisterID dest)
    1187     {
    1188         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    1189     }
    1190 
    1191     void loadPtr(void* address, RegisterID dest)
    1192     {
    1193         if (dest == X86::eax)
    1194             m_assembler.movq_mEAX(address);
    1195         else {
    1196             move(X86::eax, dest);
    1197             m_assembler.movq_mEAX(address);
    1198             swap(X86::eax, dest);
    1199         }
    1200     }
    1201 
    1202     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1203     {
    1204         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    1205         return DataLabel32(this);
    1206     }
    1207 
    1208     void storePtr(RegisterID src, ImplicitAddress address)
    1209     {
    1210         m_assembler.movq_rm(src, address.offset, address.base);
    1211     }
    1212 
    1213     void storePtr(RegisterID src, BaseIndex address)
    1214     {
    1215         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    1216     }
    1217 
    1218     void storePtr(ImmPtr imm, ImplicitAddress address)
    1219     {
    1220         move(imm, scratchRegister);
    1221         storePtr(scratchRegister, address);
    1222     }
    1223 
    1224     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1225     {
    1226         m_assembler.movq_rm_disp32(src, address.offset, address.base);
    1227         return DataLabel32(this);
    1228     }
    1229 
    1230 
    1231     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1232     {
    1233         m_assembler.cmpq_rr(right, left);
    1234         return Jump(m_assembler.jCC(cond));
    1235     }
    1236 
    1237     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1238     {
    1239         intptr_t imm = right.asIntptr();
    1240         if (CAN_SIGN_EXTEND_32_64(imm)) {
    1241             if (!imm)
    1242                 m_assembler.testq_rr(left, left);
    1243             else
    1244                 m_assembler.cmpq_ir(imm, left);
    1245             return Jump(m_assembler.jCC(cond));
    1246         } else {
    1247             move(right, scratchRegister);
    1248             return branchPtr(cond, left, scratchRegister);
    1249         }
    1250     }
    1251 
    1252     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1253     {
    1254         m_assembler.cmpq_mr(right.offset, right.base, left);
    1255         return Jump(m_assembler.jCC(cond));
    1256     }
    1257 
    1258     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1259     {
    1260         move(ImmPtr(left.m_ptr), scratchRegister);
    1261         return branchPtr(cond, Address(scratchRegister), right);
    1262     }
    1263 
    1264     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1265     {
    1266         m_assembler.cmpq_rm(right, left.offset, left.base);
    1267         return Jump(m_assembler.jCC(cond));
    1268     }
    1269 
    1270     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1271     {
    1272         move(right, scratchRegister);
    1273         return branchPtr(cond, left, scratchRegister);
    1274     }
    1275 
    1276     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1277     {
    1278         m_assembler.testq_rr(reg, mask);
    1279         return Jump(m_assembler.jCC(cond));
    1280     }
    1281 
    1282     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1283     {
    1284         // if we are only interested in the low seven bits, this can be tested with a testb
    1285         if (mask.m_value == -1)
    1286             m_assembler.testq_rr(reg, reg);
    1287         else if ((mask.m_value & ~0x7f) == 0)
    1288             m_assembler.testb_i8r(mask.m_value, reg);
    1289         else
    1290             m_assembler.testq_i32r(mask.m_value, reg);
    1291         return Jump(m_assembler.jCC(cond));
    1292     }
    1293 
    1294     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1295     {
    1296         if (mask.m_value == -1)
    1297             m_assembler.cmpq_im(0, address.offset, address.base);
    1298         else
    1299             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    1300         return Jump(m_assembler.jCC(cond));
    1301     }
    1302 
    1303     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1304     {
    1305         if (mask.m_value == -1)
    1306             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
    1307         else
    1308             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    1309         return Jump(m_assembler.jCC(cond));
    1310     }
    1311 
    1312 
    1313     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1314     {
    1315         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1316         addPtr(src, dest);
    1317         return Jump(m_assembler.jCC(cond));
    1318     }
    1319 
    1320     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1321     {
    1322         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1323         subPtr(imm, dest);
    1324         return Jump(m_assembler.jCC(cond));
    1325     }
    1326 
    1327     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1328     {
    1329         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1330         dataLabel = DataLabelPtr(this);
    1331         return branchPtr(cond, left, scratchRegister);
    1332     }
    1333 
    1334     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1335     {
    1336         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1337         dataLabel = DataLabelPtr(this);
    1338         return branchPtr(cond, left, scratchRegister);
    1339     }
    1340 
    1341     DataLabelPtr storePtrWithPatch(Address address)
    1342     {
    1343         m_assembler.movq_i64r(0, scratchRegister);
    1344         DataLabelPtr label(this);
    1345         storePtr(scratchRegister, address);
    1346         return label;
    1347     }
    1348 };
    1349 
    1350 typedef MacroAssemblerX86_64 MacroAssemblerBase;
    1351 
    1352 #else
    1353 
    1354 class MacroAssemblerX86 : public MacroAssemblerX86Common {
    1355 public:
    1356     static const Scale ScalePtr = TimesFour;
    1357 
    1358     using MacroAssemblerX86Common::add32;
    1359     using MacroAssemblerX86Common::sub32;
    1360     using MacroAssemblerX86Common::load32;
    1361     using MacroAssemblerX86Common::store32;
    1362     using MacroAssemblerX86Common::branch32;
    1363 
    1364     void add32(Imm32 imm, RegisterID src, RegisterID dest)
    1365     {
    1366         m_assembler.leal_mr(imm.m_value, src, dest);
    1367     }
    1368 
    1369     void add32(Imm32 imm, AbsoluteAddress address)
    1370     {
    1371         m_assembler.addl_im(imm.m_value, address.m_ptr);
    1372     }
    1373    
    1374     void sub32(Imm32 imm, AbsoluteAddress address)
    1375     {
    1376         m_assembler.subl_im(imm.m_value, address.m_ptr);
    1377     }
    1378 
    1379     void load32(void* address, RegisterID dest)
    1380     {
    1381         m_assembler.movl_mr(address, dest);
    1382     }
    1383 
    1384     void store32(Imm32 imm, void* address)
    1385     {
    1386         m_assembler.movl_i32m(imm.m_value, address);
    1387     }
    1388 
    1389     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    1390     {
    1391         m_assembler.cmpl_rm(right, left.m_ptr);
    1392         return Jump(m_assembler.jCC(cond));
    1393     }
    1394 
    1395     Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
    1396     {
    1397         m_assembler.cmpl_im(right.m_value, left.m_ptr);
    1398         return Jump(m_assembler.jCC(cond));
    1399     }
    1400 
    1401     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1402     {
    1403         m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
    1404         dataLabel = DataLabelPtr(this);
    1405         return Jump(m_assembler.jCC(cond));
    1406     }
    1407 
    1408     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1409     {
    1410         m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
    1411         dataLabel = DataLabelPtr(this);
    1412         return Jump(m_assembler.jCC(cond));
    1413     }
    1414 
    1415     DataLabelPtr storePtrWithPatch(Address address)
    1416     {
    1417         m_assembler.movl_i32m(0, address.offset, address.base);
    1418         return DataLabelPtr(this);
    1419     }
    1420 };
    1421 
    1422 typedef MacroAssemblerX86 MacroAssemblerBase;
    1423 
    1424 #endif
    1425 
    1426 
    1427 class MacroAssembler : public MacroAssemblerBase {
    1428 public:
    1429 
    1430     using MacroAssemblerBase::pop;
    1431     using MacroAssemblerBase::jump;
    1432     using MacroAssemblerBase::branch32;
    1433     using MacroAssemblerBase::branch16;
    1434 #if PLATFORM(X86_64)
    1435     using MacroAssemblerBase::branchPtr;
    1436     using MacroAssemblerBase::branchTestPtr;
    1437 #endif
    1438 
    1439 
    1440     // Platform agnostic onvenience functions,
    1441     // described in terms of other macro assembly methods.
    1442     void pop()
    1443     {
    1444         addPtr(Imm32(sizeof(void*)), stackPointerRegister);
    1445     }
    1446    
    1447     void peek(RegisterID dest, int index = 0)
    1448     {
    1449         loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
    1450     }
    1451 
    1452     void poke(RegisterID src, int index = 0)
    1453     {
    1454         storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
    1455     }
    1456 
    1457     void poke(Imm32 value, int index = 0)
    1458     {
    1459         store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
    1460     }
    1461 
    1462     void poke(ImmPtr imm, int index = 0)
    1463     {
    1464         storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
    1465     }
    1466 
    1467 
    1468     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
    1469     void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
    1470     {
    1471         branchPtr(cond, op1, imm).linkTo(target, this);
    1472     }
    1473 
    1474     void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
    1475     {
    1476         branch32(cond, op1, op2).linkTo(target, this);
    1477     }
    1478 
    1479     void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
    1480     {
    1481         branch32(cond, op1, imm).linkTo(target, this);
    1482     }
    1483 
    1484     void branch32(Condition cond, RegisterID left, Address right, Label target)
    1485     {
    1486         branch32(cond, left, right).linkTo(target, this);
    1487     }
    1488 
    1489     void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
    1490     {
    1491         branch16(cond, left, right).linkTo(target, this);
    1492     }
    1493    
    1494     void branchTestPtr(Condition cond, RegisterID reg, Label target)
    1495     {
    1496         branchTestPtr(cond, reg).linkTo(target, this);
    1497     }
    1498 
    1499     void jump(Label target)
    1500     {
    1501         jump().linkTo(target, this);
    1502     }
    1503 
    1504 
    1505     // Ptr methods
    1506     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
    1507 #if !PLATFORM(X86_64)
    1508     void addPtr(RegisterID src, RegisterID dest)
    1509     {
    1510         add32(src, dest);
    1511     }
    1512 
    1513     void addPtr(Imm32 imm, RegisterID srcDest)
    1514     {
    1515         add32(imm, srcDest);
    1516     }
    1517 
    1518     void addPtr(ImmPtr imm, RegisterID dest)
    1519     {
    1520         add32(Imm32(imm), dest);
    1521     }
    1522 
    1523     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1524     {
    1525         add32(imm, src, dest);
    1526     }
    1527 
    1528     void andPtr(RegisterID src, RegisterID dest)
    1529     {
    1530         and32(src, dest);
    1531     }
    1532 
    1533     void andPtr(Imm32 imm, RegisterID srcDest)
    1534     {
    1535         and32(imm, srcDest);
    1536     }
    1537 
    1538     void orPtr(RegisterID src, RegisterID dest)
    1539     {
    1540         or32(src, dest);
    1541     }
    1542 
    1543     void orPtr(ImmPtr imm, RegisterID dest)
    1544     {
    1545         or32(Imm32(imm), dest);
    1546     }
    1547 
    1548     void orPtr(Imm32 imm, RegisterID dest)
    1549     {
    1550         or32(imm, dest);
    1551     }
    1552 
    1553     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1554     {
    1555         rshift32(shift_amount, dest);
    1556     }
    1557 
    1558     void rshiftPtr(Imm32 imm, RegisterID dest)
    1559     {
    1560         rshift32(imm, dest);
    1561     }
    1562 
    1563     void subPtr(RegisterID src, RegisterID dest)
    1564     {
    1565         sub32(src, dest);
    1566     }
    1567    
    1568     void subPtr(Imm32 imm, RegisterID dest)
    1569     {
    1570         sub32(imm, dest);
    1571     }
    1572    
    1573     void subPtr(ImmPtr imm, RegisterID dest)
    1574     {
    1575         sub32(Imm32(imm), dest);
    1576     }
    1577 
    1578     void xorPtr(RegisterID src, RegisterID dest)
    1579     {
    1580         xor32(src, dest);
    1581     }
    1582 
    1583     void xorPtr(Imm32 imm, RegisterID srcDest)
    1584     {
    1585         xor32(imm, srcDest);
    1586     }
    1587 
    1588 
    1589     void loadPtr(ImplicitAddress address, RegisterID dest)
    1590     {
    1591         load32(address, dest);
    1592     }
    1593 
    1594     void loadPtr(BaseIndex address, RegisterID dest)
    1595     {
    1596         load32(address, dest);
    1597     }
    1598 
    1599     void loadPtr(void* address, RegisterID dest)
    1600     {
    1601         load32(address, dest);
    1602     }
    1603 
    1604     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1605     {
    1606         return load32WithAddressOffsetPatch(address, dest);
    1607     }
    1608 
    1609     void storePtr(RegisterID src, ImplicitAddress address)
    1610     {
    1611         store32(src, address);
    1612     }
    1613 
    1614     void storePtr(RegisterID src, BaseIndex address)
    1615     {
    1616         store32(src, address);
    1617     }
    1618 
    1619     void storePtr(ImmPtr imm, ImplicitAddress address)
    1620     {
    1621         store32(Imm32(imm), address);
    1622     }
    1623 
    1624     void storePtr(ImmPtr imm, void* address)
    1625     {
    1626         store32(Imm32(imm), address);
    1627     }
    1628 
    1629     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1630     {
    1631         return store32WithAddressOffsetPatch(src, address);
    1632     }
    1633 
    1634 
    1635     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1636     {
    1637         return branch32(cond, left, right);
    1638     }
    1639 
    1640     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1641     {
    1642         return branch32(cond, left, Imm32(right));
    1643     }
    1644 
    1645     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1646     {
    1647         return branch32(cond, left, right);
    1648     }
    1649 
    1650     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1651     {
    1652         return branch32(cond, left, right);
    1653     }
    1654 
    1655     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1656     {
    1657         return branch32(cond, left, right);
    1658     }
    1659 
    1660     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1661     {
    1662         return branch32(cond, left, Imm32(right));
    1663     }
    1664 
    1665     Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
    1666     {
    1667         return branch32(cond, left, Imm32(right));
    1668     }
    1669 
    1670     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1671     {
    1672         return branchTest32(cond, reg, mask);
    1673     }
    1674 
    1675     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1676     {
    1677         return branchTest32(cond, reg, mask);
    1678     }
    1679 
    1680     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1681     {
    1682         return branchTest32(cond, address, mask);
    1683     }
    1684 
    1685     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1686     {
    1687         return branchTest32(cond, address, mask);
    1688     }
    1689 
    1690 
    1691     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1692     {
    1693         return branchAdd32(cond, src, dest);
    1694     }
    1695 
    1696     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1697     {
    1698         return branchSub32(cond, imm, dest);
    1699     }
    1700 #endif
    1701 
    1702 };
    1703 
    1704460} // namespace JSC
    1705461
    1706462#endif // ENABLE(ASSEMBLER)
    1707463
    1708 #endif // MacroAssembler_h
     464#endif // AbstractMacroAssembler_h
  • trunk/JavaScriptCore/assembler/MacroAssembler.h

    r40656 r40660  
    3131#if ENABLE(ASSEMBLER)
    3232
    33 #include "X86Assembler.h"
     33#if PLATFORM(X86)
     34#include "MacroAssemblerX86.h"
     35namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
     36
     37#elif PLATFORM(X86_64)
     38#include "MacroAssemblerX86_64.h"
     39namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
     40
     41#else
     42#error "The MacroAssembler is not supported on this platform."
     43#endif
     44
    3445
    3546namespace JSC {
    36 
    37 template <class AssemblerType>
    38 class AbstractMacroAssembler {
    39 protected:
    40     AssemblerType m_assembler;
    41 
    42 public:
    43     typedef typename AssemblerType::RegisterID RegisterID;
    44     typedef typename AssemblerType::JmpSrc JmpSrc;
    45     typedef typename AssemblerType::JmpDst JmpDst;
    46 
    47     enum Scale {
    48         TimesOne,
    49         TimesTwo,
    50         TimesFour,
    51         TimesEight,
    52     };
    53 
    54     // Address:
    55     //
    56     // Describes a simple base-offset address.
    57     struct Address {
    58         explicit Address(RegisterID base, int32_t offset = 0)
    59             : base(base)
    60             , offset(offset)
    61         {
    62         }
    63 
    64         RegisterID base;
    65         int32_t offset;
    66     };
    67 
    68     // ImplicitAddress:
    69     //
    70     // This class is used for explicit 'load' and 'store' operations
    71     // (as opposed to situations in which a memory operand is provided
    72     // to a generic operation, such as an integer arithmetic instruction).
    73     //
    74     // In the case of a load (or store) operation we want to permit
    75     // addresses to be implicitly constructed, e.g. the two calls:
    76     //
    77     //     load32(Address(addrReg), destReg);
    78     //     load32(addrReg, destReg);
    79     //
    80     // Are equivalent, and the explicit wrapping of the Address in the former
    81     // is unnecessary.
    82     struct ImplicitAddress {
    83         ImplicitAddress(RegisterID base)
    84             : base(base)
    85             , offset(0)
    86         {
    87         }
    88 
    89         ImplicitAddress(Address address)
    90             : base(address.base)
    91             , offset(address.offset)
    92         {
    93         }
    94 
    95         RegisterID base;
    96         int32_t offset;
    97     };
    98 
    99     // BaseIndex:
    100     //
    101     // Describes a complex addressing mode.
    102     struct BaseIndex {
    103         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
    104             : base(base)
    105             , index(index)
    106             , scale(scale)
    107             , offset(offset)
    108         {
    109         }
    110 
    111         RegisterID base;
    112         RegisterID index;
    113         Scale scale;
    114         int32_t offset;
    115     };
    116 
    117     // AbsoluteAddress:
    118     //
    119     // Describes an memory operand given by a pointer.  For regular load & store
    120     // operations an unwrapped void* will be used, rather than using this.
    121     struct AbsoluteAddress {
    122         explicit AbsoluteAddress(void* ptr)
    123             : m_ptr(ptr)
    124         {
    125         }
    126 
    127         void* m_ptr;
    128     };
    129 
    130 
    131     class Jump;
    132     class PatchBuffer;
    133 
    134     // DataLabelPtr:
    135     //
    136     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    137     // patched after the code has been generated.
    138     class DataLabelPtr {
    139         template<class AssemblerType_T>
    140         friend class AbstractMacroAssembler;
    141         friend class PatchBuffer;
    142 
    143     public:
    144         DataLabelPtr()
    145         {
    146         }
    147 
    148         DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
    149             : m_label(masm->m_assembler.label())
    150         {
    151         }
    152 
    153         static void patch(void* address, void* value)
    154         {
    155             AssemblerType::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
    156         }
    157        
    158     private:
    159         JmpDst m_label;
    160     };
    161 
    162     // DataLabel32:
    163     //
    164     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    165     // patched after the code has been generated.
    166     class DataLabel32 {
    167         template<class AssemblerType_T>
    168         friend class AbstractMacroAssembler;
    169         friend class PatchBuffer;
    170 
    171     public:
    172         DataLabel32()
    173         {
    174         }
    175 
    176         DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
    177             : m_label(masm->m_assembler.label())
    178         {
    179         }
    180 
    181         static void patch(void* address, int32_t value)
    182         {
    183             AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(address), value);
    184         }
    185 
    186     private:
    187         JmpDst m_label;
    188     };
    189 
    190     // Label:
    191     //
    192     // A Label records a point in the generated instruction stream, typically such that
    193     // it may be used as a destination for a jump.
    194     class Label {
    195         friend class Jump;
    196         template<class AssemblerType_T>
    197         friend class AbstractMacroAssembler;
    198         friend class PatchBuffer;
    199 
    200     public:
    201         Label()
    202         {
    203         }
    204 
    205         Label(AbstractMacroAssembler<AssemblerType>* masm)
    206             : m_label(masm->m_assembler.label())
    207         {
    208         }
    209        
    210     private:
    211         JmpDst m_label;
    212     };
    213 
    214 
    215     // Jump:
    216     //
    217     // A jump object is a reference to a jump instruction that has been planted
    218     // into the code buffer - it is typically used to link the jump, setting the
    219     // relative offset such that when executed it will jump to the desired
    220     // destination.
    221     //
    222     // Jump objects retain a pointer to the assembler for syntactic purposes -
    223     // to allow the jump object to be able to link itself, e.g.:
    224     //
    225     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
    226     //     // ...
    227     //     forwardsBranch.link();
    228     //
    229     // Jumps may also be linked to a Label.
    230     class Jump {
    231         friend class PatchBuffer;
    232         template<class AssemblerType_T>
    233         friend class AbstractMacroAssembler;
    234 
    235     public:
    236         Jump()
    237         {
    238         }
    239        
    240         Jump(JmpSrc jmp)
    241             : m_jmp(jmp)
    242         {
    243         }
    244        
    245         void link(AbstractMacroAssembler<AssemblerType>* masm)
    246         {
    247             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
    248         }
    249        
    250         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    251         {
    252             masm->m_assembler.link(m_jmp, label.m_label);
    253         }
    254        
    255         static void patch(void* address, void* destination)
    256         {
    257             AssemblerType::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
    258         }
    259 
    260     private:
    261         JmpSrc m_jmp;
    262     };
    263 
    264     // JumpList:
    265     //
    266     // A JumpList is a set of Jump objects.
    267     // All jumps in the set will be linked to the same destination.
    268     class JumpList {
    269         friend class PatchBuffer;
    270 
    271     public:
    272         void link(AbstractMacroAssembler<AssemblerType>* masm)
    273         {
    274             size_t size = m_jumps.size();
    275             for (size_t i = 0; i < size; ++i)
    276                 m_jumps[i].link(masm);
    277             m_jumps.clear();
    278         }
    279        
    280         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    281         {
    282             size_t size = m_jumps.size();
    283             for (size_t i = 0; i < size; ++i)
    284                 m_jumps[i].linkTo(label, masm);
    285             m_jumps.clear();
    286         }
    287        
    288         void append(Jump jump)
    289         {
    290             m_jumps.append(jump);
    291         }
    292        
    293         void append(JumpList& other)
    294         {
    295             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
    296         }
    297 
    298         bool empty()
    299         {
    300             return !m_jumps.size();
    301         }
    302 
    303     private:
    304         Vector<Jump, 16> m_jumps;
    305     };
    306 
    307 
    308     // PatchBuffer:
    309     //
    310     // This class assists in linking code generated by the macro assembler, once code generation
    311     // has been completed, and the code has been copied to is final location in memory.  At this
    312     // time pointers to labels within the code may be resolved, and relative offsets to external
    313     // addresses may be fixed.
    314     //
    315     // Specifically:
    316     //   * Jump objects may be linked to external targets,
    317     //   * The address of Jump objects may taken, such that it can later be relinked.
    318     //   * The return address of a Jump object representing a call may be acquired.
    319     //   * The address of a Label pointing into the code may be resolved.
    320     //   * The value referenced by a DataLabel may be fixed.
    321     //
    322     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
    323     // address of calls, as opposed to a point that can be used to later relink a Jump -
    324     // possibly wrap the later up in an object that can do just that).
    325     class PatchBuffer {
    326     public:
    327         PatchBuffer(void* code)
    328             : m_code(code)
    329         {
    330         }
    331 
    332         void link(Jump jump, void* target)
    333         {
    334             AssemblerType::link(m_code, jump.m_jmp, target);
    335         }
    336 
    337         void link(JumpList list, void* target)
    338         {
    339             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
    340                 AssemblerType::link(m_code, list.m_jumps[i].m_jmp, target);
    341         }
    342 
    343         void* addressOf(Jump jump)
    344         {
    345             return AssemblerType::getRelocatedAddress(m_code, jump.m_jmp);
    346         }
    347 
    348         void* addressOf(Label label)
    349         {
    350             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    351         }
    352 
    353         void* addressOf(DataLabelPtr label)
    354         {
    355             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    356         }
    357 
    358         void* addressOf(DataLabel32 label)
    359         {
    360             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    361         }
    362 
    363         void setPtr(DataLabelPtr label, void* value)
    364         {
    365             AssemblerType::patchAddress(m_code, label.m_label, value);
    366         }
    367 
    368     private:
    369         void* m_code;
    370     };
    371  
    372 
    373     // ImmPtr:
    374     //
    375     // A pointer sized immediate operand to an instruction - this is wrapped
    376     // in a class requiring explicit construction in order to differentiate
    377     // from pointers used as absolute addresses to memory operations
    378     struct ImmPtr {
    379         explicit ImmPtr(void* value)
    380             : m_value(value)
    381         {
    382         }
    383 
    384         intptr_t asIntptr()
    385         {
    386             return reinterpret_cast<intptr_t>(m_value);
    387         }
    388 
    389         void* m_value;
    390     };
    391 
    392     // Imm32:
    393     //
    394     // A 32bit immediate operand to an instruction - this is wrapped in a
    395     // class requiring explicit construction in order to prevent RegisterIDs
    396     // (which are implemented as an enum) from accidentally being passed as
    397     // immediate values.
    398     struct Imm32 {
    399         explicit Imm32(int32_t value)
    400             : m_value(value)
    401         {
    402         }
    403 
    404 #if !PLATFORM(X86_64)
    405         explicit Imm32(ImmPtr ptr)
    406             : m_value(ptr.asIntptr())
    407         {
    408         }
    409 #endif
    410 
    411         int32_t m_value;
    412     };
    413 
    414     size_t size()
    415     {
    416         return m_assembler.size();
    417     }
    418 
    419     void* copyCode(ExecutablePool* allocator)
    420     {
    421         return m_assembler.executableCopy(allocator);
    422     }
    423 
    424     Label label()
    425     {
    426         return Label(this);
    427     }
    428    
    429     Label align()
    430     {
    431         m_assembler.align(16);
    432         return Label(this);
    433     }
    434 
    435     ptrdiff_t differenceBetween(Label from, Jump to)
    436     {
    437         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    438     }
    439 
    440     ptrdiff_t differenceBetween(Label from, Label to)
    441     {
    442         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    443     }
    444 
    445     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
    446     {
    447         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    448     }
    449 
    450     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
    451     {
    452         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    453     }
    454 
    455     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
    456     {
    457         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    458     }
    459 
    460 };
    461 
    462 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
    463 public:
    464 
    465     typedef X86Assembler::Condition Condition;
    466     static const Condition Equal = X86Assembler::ConditionE;
    467     static const Condition NotEqual = X86Assembler::ConditionNE;
    468     static const Condition Above = X86Assembler::ConditionA;
    469     static const Condition AboveOrEqual = X86Assembler::ConditionAE;
    470     static const Condition Below = X86Assembler::ConditionB;
    471     static const Condition BelowOrEqual = X86Assembler::ConditionBE;
    472     static const Condition GreaterThan = X86Assembler::ConditionG;
    473     static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
    474     static const Condition LessThan = X86Assembler::ConditionL;
    475     static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
    476     static const Condition Overflow = X86Assembler::ConditionO;
    477     static const Condition Zero = X86Assembler::ConditionE;
    478     static const Condition NonZero = X86Assembler::ConditionNE;
    479 
    480     static const RegisterID stackPointerRegister = X86::esp;
    481 
    482     // Integer arithmetic operations:
    483     //
    484     // Operations are typically two operand - operation(source, srcDst)
    485     // For many operations the source may be an Imm32, the srcDst operand
    486     // may often be a memory location (explictly described using an Address
    487     // object).
    488 
    489     void add32(RegisterID src, RegisterID dest)
    490     {
    491         m_assembler.addl_rr(src, dest);
    492     }
    493 
    494     void add32(Imm32 imm, Address address)
    495     {
    496         m_assembler.addl_im(imm.m_value, address.offset, address.base);
    497     }
    498 
    499     void add32(Imm32 imm, RegisterID dest)
    500     {
    501         m_assembler.addl_ir(imm.m_value, dest);
    502     }
    503    
    504     void add32(Address src, RegisterID dest)
    505     {
    506         m_assembler.addl_mr(src.offset, src.base, dest);
    507     }
    508    
    509     void and32(RegisterID src, RegisterID dest)
    510     {
    511         m_assembler.andl_rr(src, dest);
    512     }
    513 
    514     void and32(Imm32 imm, RegisterID dest)
    515     {
    516         m_assembler.andl_ir(imm.m_value, dest);
    517     }
    518 
    519     void lshift32(Imm32 imm, RegisterID dest)
    520     {
    521         m_assembler.shll_i8r(imm.m_value, dest);
    522     }
    523    
    524     void lshift32(RegisterID shift_amount, RegisterID dest)
    525     {
    526         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    527         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    528         if (shift_amount != X86::ecx) {
    529             swap(shift_amount, X86::ecx);
    530 
    531             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    532             if (dest == shift_amount)
    533                 m_assembler.shll_CLr(X86::ecx);
    534             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    535             else if (dest == X86::ecx)
    536                 m_assembler.shll_CLr(shift_amount);
    537             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    538             else
    539                 m_assembler.shll_CLr(dest);
    540        
    541             swap(shift_amount, X86::ecx);
    542         } else
    543             m_assembler.shll_CLr(dest);
    544     }
    545    
    546     // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
    547     // For now, this operation has specific register requirements, and the three register must
    548     // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
    549     // given the complexity to fix, the fact that it is not uncommmon  for processors to have
    550     // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
    551     // support a hardware divide at all, it may not be
    552     void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
    553     {
    554 #ifdef NDEBUG
    555 #pragma unused(dividend,remainder)
    556 #else
    557         ASSERT((dividend == X86::eax) && (remainder == X86::edx));
    558         ASSERT((dividend != divisor) && (remainder != divisor));
    559 #endif
    560 
    561         m_assembler.cdq();
    562         m_assembler.idivl_r(divisor);
    563     }
    564 
    565     void mul32(RegisterID src, RegisterID dest)
    566     {
    567         m_assembler.imull_rr(src, dest);
    568     }
    569    
    570     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
    571     {
    572         m_assembler.imull_i32r(src, imm.m_value, dest);
    573     }
    574    
    575     void not32(RegisterID srcDest)
    576     {
    577         m_assembler.notl_r(srcDest);
    578     }
    579    
    580     void or32(RegisterID src, RegisterID dest)
    581     {
    582         m_assembler.orl_rr(src, dest);
    583     }
    584 
    585     void or32(Imm32 imm, RegisterID dest)
    586     {
    587         m_assembler.orl_ir(imm.m_value, dest);
    588     }
    589 
    590     void rshift32(RegisterID shift_amount, RegisterID dest)
    591     {
    592         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    593         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    594         if (shift_amount != X86::ecx) {
    595             swap(shift_amount, X86::ecx);
    596 
    597             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    598             if (dest == shift_amount)
    599                 m_assembler.sarl_CLr(X86::ecx);
    600             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    601             else if (dest == X86::ecx)
    602                 m_assembler.sarl_CLr(shift_amount);
    603             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    604             else
    605                 m_assembler.sarl_CLr(dest);
    606        
    607             swap(shift_amount, X86::ecx);
    608         } else
    609             m_assembler.sarl_CLr(dest);
    610     }
    611 
    612     void rshift32(Imm32 imm, RegisterID dest)
    613     {
    614         m_assembler.sarl_i8r(imm.m_value, dest);
    615     }
    616 
    617     void sub32(RegisterID src, RegisterID dest)
    618     {
    619         m_assembler.subl_rr(src, dest);
    620     }
    621    
    622     void sub32(Imm32 imm, RegisterID dest)
    623     {
    624         m_assembler.subl_ir(imm.m_value, dest);
    625     }
    626    
    627     void sub32(Imm32 imm, Address address)
    628     {
    629         m_assembler.subl_im(imm.m_value, address.offset, address.base);
    630     }
    631 
    632     void sub32(Address src, RegisterID dest)
    633     {
    634         m_assembler.subl_mr(src.offset, src.base, dest);
    635     }
    636 
    637     void xor32(RegisterID src, RegisterID dest)
    638     {
    639         m_assembler.xorl_rr(src, dest);
    640     }
    641 
    642     void xor32(Imm32 imm, RegisterID srcDest)
    643     {
    644         m_assembler.xorl_ir(imm.m_value, srcDest);
    645     }
    646    
    647 
    648     // Memory access operations:
    649     //
    650     // Loads are of the form load(address, destination) and stores of the form
    651     // store(source, address).  The source for a store may be an Imm32.  Address
    652     // operand objects to loads and store will be implicitly constructed if a
    653     // register is passed.
    654 
    655     void load32(ImplicitAddress address, RegisterID dest)
    656     {
    657         m_assembler.movl_mr(address.offset, address.base, dest);
    658     }
    659 
    660     void load32(BaseIndex address, RegisterID dest)
    661     {
    662         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
    663     }
    664 
    665     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    666     {
    667         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
    668         return DataLabel32(this);
    669     }
    670 
    671     void load16(BaseIndex address, RegisterID dest)
    672     {
    673         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
    674     }
    675 
    676     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    677     {
    678         m_assembler.movl_rm_disp32(src, address.offset, address.base);
    679         return DataLabel32(this);
    680     }
    681 
    682     void store32(RegisterID src, ImplicitAddress address)
    683     {
    684         m_assembler.movl_rm(src, address.offset, address.base);
    685     }
    686 
    687     void store32(RegisterID src, BaseIndex address)
    688     {
    689         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
    690     }
    691 
    692     void store32(Imm32 imm, ImplicitAddress address)
    693     {
    694         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
    695     }
    696    
    697 
    698     // Stack manipulation operations:
    699     //
    700     // The ABI is assumed to provide a stack abstraction to memory,
    701     // containing machine word sized units of data.  Push and pop
    702     // operations add and remove a single register sized unit of data
    703     // to or from the stack.  Peek and poke operations read or write
    704     // values on the stack, without moving the current stack position.
    705    
    706     void pop(RegisterID dest)
    707     {
    708         m_assembler.pop_r(dest);
    709     }
    710 
    711     void push(RegisterID src)
    712     {
    713         m_assembler.push_r(src);
    714     }
    715 
    716     void push(Address address)
    717     {
    718         m_assembler.push_m(address.offset, address.base);
    719     }
    720 
    721     void push(Imm32 imm)
    722     {
    723         m_assembler.push_i32(imm.m_value);
    724     }
    725 
    726     // Register move operations:
    727     //
    728     // Move values in registers.
    729 
    730     void move(Imm32 imm, RegisterID dest)
    731     {
    732         // Note: on 64-bit the Imm32 value is zero extended into the register, it
    733         // may be useful to have a separate version that sign extends the value?
    734         if (!imm.m_value)
    735             m_assembler.xorl_rr(dest, dest);
    736         else
    737             m_assembler.movl_i32r(imm.m_value, dest);
    738     }
    739 
    740 #if PLATFORM(X86_64)
    741     void move(RegisterID src, RegisterID dest)
    742     {
    743         // Note: on 64-bit this is is a full register move; perhaps it would be
    744         // useful to have separate move32 & movePtr, with move32 zero extending?
    745         m_assembler.movq_rr(src, dest);
    746     }
    747 
    748     void move(ImmPtr imm, RegisterID dest)
    749     {
    750         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
    751             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
    752         else
    753             m_assembler.movq_i64r(imm.asIntptr(), dest);
    754     }
    755 
    756     void swap(RegisterID reg1, RegisterID reg2)
    757     {
    758         m_assembler.xchgq_rr(reg1, reg2);
    759     }
    760 
    761     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    762     {
    763         m_assembler.movsxd_rr(src, dest);
    764     }
    765 
    766     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    767     {
    768         m_assembler.movl_rr(src, dest);
    769     }
    770 #else
    771     void move(RegisterID src, RegisterID dest)
    772     {
    773         m_assembler.movl_rr(src, dest);
    774     }
    775 
    776     void move(ImmPtr imm, RegisterID dest)
    777     {
    778         m_assembler.movl_i32r(imm.asIntptr(), dest);
    779     }
    780 
    781     void swap(RegisterID reg1, RegisterID reg2)
    782     {
    783         m_assembler.xchgl_rr(reg1, reg2);
    784     }
    785 
    786     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    787     {
    788         if (src != dest)
    789             move(src, dest);
    790     }
    791 
    792     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    793     {
    794         if (src != dest)
    795             move(src, dest);
    796     }
    797 #endif
    798 
    799 
    800     // Forwards / external control flow operations:
    801     //
    802     // This set of jump and conditional branch operations return a Jump
    803     // object which may linked at a later point, allow forwards jump,
    804     // or jumps that will require external linkage (after the code has been
    805     // relocated).
    806     //
    807     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
    808     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
    809     // used (representing the names 'below' and 'above').
    810     //
    811     // Operands to the comparision are provided in the expected order, e.g.
    812     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
    813     // treated as a signed 32bit value, is less than or equal to 5.
    814     //
    815     // jz and jnz test whether the first operand is equal to zero, and take
    816     // an optional second operand of a mask under which to perform the test.
    817 
    818 public:
    819     Jump branch32(Condition cond, RegisterID left, RegisterID right)
    820     {
    821         m_assembler.cmpl_rr(right, left);
    822         return Jump(m_assembler.jCC(cond));
    823     }
    824 
    825     Jump branch32(Condition cond, RegisterID left, Imm32 right)
    826     {
    827         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    828             m_assembler.testl_rr(left, left);
    829         else
    830             m_assembler.cmpl_ir(right.m_value, left);
    831         return Jump(m_assembler.jCC(cond));
    832     }
    833    
    834     Jump branch32(Condition cond, RegisterID left, Address right)
    835     {
    836         m_assembler.cmpl_mr(right.offset, right.base, left);
    837         return Jump(m_assembler.jCC(cond));
    838     }
    839    
    840     Jump branch32(Condition cond, Address left, RegisterID right)
    841     {
    842         m_assembler.cmpl_rm(right, left.offset, left.base);
    843         return Jump(m_assembler.jCC(cond));
    844     }
    845 
    846     Jump branch32(Condition cond, Address left, Imm32 right)
    847     {
    848         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
    849         return Jump(m_assembler.jCC(cond));
    850     }
    851 
    852     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    853     {
    854         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
    855         return Jump(m_assembler.jCC(cond));
    856     }
    857 
    858     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    859     {
    860         ASSERT((cond == Zero) || (cond == NonZero));
    861         m_assembler.testl_rr(reg, mask);
    862         return Jump(m_assembler.jCC(cond));
    863     }
    864 
    865     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    866     {
    867         ASSERT((cond == Zero) || (cond == NonZero));
    868         // if we are only interested in the low seven bits, this can be tested with a testb
    869         if (mask.m_value == -1)
    870             m_assembler.testl_rr(reg, reg);
    871         else if ((mask.m_value & ~0x7f) == 0)
    872             m_assembler.testb_i8r(mask.m_value, reg);
    873         else
    874             m_assembler.testl_i32r(mask.m_value, reg);
    875         return Jump(m_assembler.jCC(cond));
    876     }
    877 
    878     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    879     {
    880         ASSERT((cond == Zero) || (cond == NonZero));
    881         if (mask.m_value == -1)
    882             m_assembler.cmpl_im(0, address.offset, address.base);
    883         else
    884             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    885         return Jump(m_assembler.jCC(cond));
    886     }
    887 
    888     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    889     {
    890         ASSERT((cond == Zero) || (cond == NonZero));
    891         if (mask.m_value == -1)
    892             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
    893         else
    894             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    895         return Jump(m_assembler.jCC(cond));
    896     }
    897 
    898     Jump jump()
    899     {
    900         return Jump(m_assembler.jmp());
    901     }
    902 
    903     void jump(RegisterID target)
    904     {
    905         m_assembler.jmp_r(target);
    906     }
    907 
    908     // Address is a memory location containing the address to jump to
    909     void jump(Address address)
    910     {
    911         m_assembler.jmp_m(address.offset, address.base);
    912     }
    913 
    914 
    915     // Arithmetic control flow operations:
    916     //
    917     // This set of conditional branch operations branch based
    918     // on the result of an arithmetic operation.  The operation
    919     // is performed as normal, storing the result.
    920     //
    921     // * jz operations branch if the result is zero.
    922     // * jo operations branch if the (signed) arithmetic
    923     //   operation caused an overflow to occur.
    924    
    925     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    926     {
    927         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    928         add32(src, dest);
    929         return Jump(m_assembler.jCC(cond));
    930     }
    931    
    932     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    933     {
    934         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    935         add32(imm, dest);
    936         return Jump(m_assembler.jCC(cond));
    937     }
    938    
    939     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    940     {
    941         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    942         mul32(src, dest);
    943         return Jump(m_assembler.jCC(cond));
    944     }
    945    
    946     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    947     {
    948         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    949         mul32(imm, src, dest);
    950         return Jump(m_assembler.jCC(cond));
    951     }
    952    
    953     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    954     {
    955         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    956         sub32(src, dest);
    957         return Jump(m_assembler.jCC(cond));
    958     }
    959    
    960     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    961     {
    962         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    963         sub32(imm, dest);
    964         return Jump(m_assembler.jCC(cond));
    965     }
    966    
    967 
    968     // Miscellaneous operations:
    969 
    970     void breakpoint()
    971     {
    972         m_assembler.int3();
    973     }
    974 
    975     Jump call()
    976     {
    977         return Jump(m_assembler.call());
    978     }
    979 
    980     // FIXME: why does this return a Jump object? - it can't be linked.
    981     // This may be to get a reference to the return address of the call.
    982     //
    983     // This should probably be handled by a separate label type to a regular
    984     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
    985     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
    986     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
    987     // form of label, can get the void* after the code has been linked, but can't
    988     // try to link it like a Jump object), and let the CallLabel be cast into a
    989     // CallReturnLabel.
    990     Jump call(RegisterID target)
    991     {
    992         return Jump(m_assembler.call(target));
    993     }
    994 
    995     void ret()
    996     {
    997         m_assembler.ret();
    998     }
    999 
    1000     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    1001     {
    1002         m_assembler.cmpl_rr(right, left);
    1003         m_assembler.setCC_r(cond, dest);
    1004         m_assembler.movzbl_rr(dest, dest);
    1005     }
    1006 
    1007     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    1008     {
    1009         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1010             m_assembler.testl_rr(left, left);
    1011         else
    1012             m_assembler.cmpl_ir(right.m_value, left);
    1013         m_assembler.setCC_r(cond, dest);
    1014         m_assembler.movzbl_rr(dest, dest);
    1015     }
    1016 
    1017     // FIXME:
    1018     // The mask should be optional... paerhaps the argument order should be
    1019     // dest-src, operations always have a dest? ... possibly not true, considering
    1020     // asm ops like test, or pseudo ops like pop().
    1021     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    1022     {
    1023         if (mask.m_value == -1)
    1024             m_assembler.cmpl_im(0, address.offset, address.base);
    1025         else
    1026             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1027         m_assembler.setCC_r(cond, dest);
    1028         m_assembler.movzbl_rr(dest, dest);
    1029     }
    1030 };
    1031 
    1032 
    1033 #if PLATFORM(X86_64)
    1034 
    1035 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
    1036 protected:
    1037     static const X86::RegisterID scratchRegister = X86::r11;
    1038 
    1039 public:
    1040     static const Scale ScalePtr = TimesEight;
    1041 
    1042     using MacroAssemblerX86Common::add32;
    1043     using MacroAssemblerX86Common::sub32;
    1044     using MacroAssemblerX86Common::load32;
    1045     using MacroAssemblerX86Common::store32;
    1046 
    1047     void add32(Imm32 imm, AbsoluteAddress address)
    1048     {
    1049         move(ImmPtr(address.m_ptr), scratchRegister);
    1050         add32(imm, Address(scratchRegister));
    1051     }
    1052    
    1053     void sub32(Imm32 imm, AbsoluteAddress address)
    1054     {
    1055         move(ImmPtr(address.m_ptr), scratchRegister);
    1056         sub32(imm, Address(scratchRegister));
    1057     }
    1058 
    1059     void load32(void* address, RegisterID dest)
    1060     {
    1061         if (dest == X86::eax)
    1062             m_assembler.movl_mEAX(address);
    1063         else {
    1064             move(X86::eax, dest);
    1065             m_assembler.movl_mEAX(address);
    1066             swap(X86::eax, dest);
    1067         }
    1068     }
    1069 
    1070     void store32(Imm32 imm, void* address)
    1071     {
    1072         move(X86::eax, scratchRegister);
    1073         move(imm, X86::eax);
    1074         m_assembler.movl_EAXm(address);
    1075         move(scratchRegister, X86::eax);
    1076     }
    1077 
    1078 
    1079 
    1080     void addPtr(RegisterID src, RegisterID dest)
    1081     {
    1082         m_assembler.addq_rr(src, dest);
    1083     }
    1084 
    1085     void addPtr(Imm32 imm, RegisterID srcDest)
    1086     {
    1087         m_assembler.addq_ir(imm.m_value, srcDest);
    1088     }
    1089 
    1090     void addPtr(ImmPtr imm, RegisterID dest)
    1091     {
    1092         move(imm, scratchRegister);
    1093         m_assembler.addq_rr(scratchRegister, dest);
    1094     }
    1095 
    1096     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1097     {
    1098         m_assembler.leal_mr(imm.m_value, src, dest);
    1099     }
    1100 
    1101     void andPtr(RegisterID src, RegisterID dest)
    1102     {
    1103         m_assembler.andq_rr(src, dest);
    1104     }
    1105 
    1106     void andPtr(Imm32 imm, RegisterID srcDest)
    1107     {
    1108         m_assembler.andq_ir(imm.m_value, srcDest);
    1109     }
    1110 
    1111     void orPtr(RegisterID src, RegisterID dest)
    1112     {
    1113         m_assembler.orq_rr(src, dest);
    1114     }
    1115 
    1116     void orPtr(ImmPtr imm, RegisterID dest)
    1117     {
    1118         move(imm, scratchRegister);
    1119         m_assembler.orq_rr(scratchRegister, dest);
    1120     }
    1121 
    1122     void orPtr(Imm32 imm, RegisterID dest)
    1123     {
    1124         m_assembler.orq_ir(imm.m_value, dest);
    1125     }
    1126 
    1127     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1128     {
    1129         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    1130         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    1131         if (shift_amount != X86::ecx) {
    1132             swap(shift_amount, X86::ecx);
    1133 
    1134             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    1135             if (dest == shift_amount)
    1136                 m_assembler.sarq_CLr(X86::ecx);
    1137             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    1138             else if (dest == X86::ecx)
    1139                 m_assembler.sarq_CLr(shift_amount);
    1140             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    1141             else
    1142                 m_assembler.sarq_CLr(dest);
    1143        
    1144             swap(shift_amount, X86::ecx);
    1145         } else
    1146             m_assembler.sarq_CLr(dest);
    1147     }
    1148 
    1149     void rshiftPtr(Imm32 imm, RegisterID dest)
    1150     {
    1151         m_assembler.sarq_i8r(imm.m_value, dest);
    1152     }
    1153 
    1154     void subPtr(RegisterID src, RegisterID dest)
    1155     {
    1156         m_assembler.subq_rr(src, dest);
    1157     }
    1158    
    1159     void subPtr(Imm32 imm, RegisterID dest)
    1160     {
    1161         m_assembler.subq_ir(imm.m_value, dest);
    1162     }
    1163    
    1164     void subPtr(ImmPtr imm, RegisterID dest)
    1165     {
    1166         move(imm, scratchRegister);
    1167         m_assembler.subq_rr(scratchRegister, dest);
    1168     }
    1169 
    1170     void xorPtr(RegisterID src, RegisterID dest)
    1171     {
    1172         m_assembler.xorq_rr(src, dest);
    1173     }
    1174 
    1175     void xorPtr(Imm32 imm, RegisterID srcDest)
    1176     {
    1177         m_assembler.xorq_ir(imm.m_value, srcDest);
    1178     }
    1179 
    1180 
    1181     void loadPtr(ImplicitAddress address, RegisterID dest)
    1182     {
    1183         m_assembler.movq_mr(address.offset, address.base, dest);
    1184     }
    1185 
    1186     void loadPtr(BaseIndex address, RegisterID dest)
    1187     {
    1188         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    1189     }
    1190 
    1191     void loadPtr(void* address, RegisterID dest)
    1192     {
    1193         if (dest == X86::eax)
    1194             m_assembler.movq_mEAX(address);
    1195         else {
    1196             move(X86::eax, dest);
    1197             m_assembler.movq_mEAX(address);
    1198             swap(X86::eax, dest);
    1199         }
    1200     }
    1201 
    1202     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1203     {
    1204         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    1205         return DataLabel32(this);
    1206     }
    1207 
    1208     void storePtr(RegisterID src, ImplicitAddress address)
    1209     {
    1210         m_assembler.movq_rm(src, address.offset, address.base);
    1211     }
    1212 
    1213     void storePtr(RegisterID src, BaseIndex address)
    1214     {
    1215         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    1216     }
    1217 
    1218     void storePtr(ImmPtr imm, ImplicitAddress address)
    1219     {
    1220         move(imm, scratchRegister);
    1221         storePtr(scratchRegister, address);
    1222     }
    1223 
    1224     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1225     {
    1226         m_assembler.movq_rm_disp32(src, address.offset, address.base);
    1227         return DataLabel32(this);
    1228     }
    1229 
    1230 
    1231     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1232     {
    1233         m_assembler.cmpq_rr(right, left);
    1234         return Jump(m_assembler.jCC(cond));
    1235     }
    1236 
    1237     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1238     {
    1239         intptr_t imm = right.asIntptr();
    1240         if (CAN_SIGN_EXTEND_32_64(imm)) {
    1241             if (!imm)
    1242                 m_assembler.testq_rr(left, left);
    1243             else
    1244                 m_assembler.cmpq_ir(imm, left);
    1245             return Jump(m_assembler.jCC(cond));
    1246         } else {
    1247             move(right, scratchRegister);
    1248             return branchPtr(cond, left, scratchRegister);
    1249         }
    1250     }
    1251 
    1252     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1253     {
    1254         m_assembler.cmpq_mr(right.offset, right.base, left);
    1255         return Jump(m_assembler.jCC(cond));
    1256     }
    1257 
    1258     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1259     {
    1260         move(ImmPtr(left.m_ptr), scratchRegister);
    1261         return branchPtr(cond, Address(scratchRegister), right);
    1262     }
    1263 
    1264     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1265     {
    1266         m_assembler.cmpq_rm(right, left.offset, left.base);
    1267         return Jump(m_assembler.jCC(cond));
    1268     }
    1269 
    1270     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1271     {
    1272         move(right, scratchRegister);
    1273         return branchPtr(cond, left, scratchRegister);
    1274     }
    1275 
    1276     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1277     {
    1278         m_assembler.testq_rr(reg, mask);
    1279         return Jump(m_assembler.jCC(cond));
    1280     }
    1281 
    1282     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1283     {
    1284         // if we are only interested in the low seven bits, this can be tested with a testb
    1285         if (mask.m_value == -1)
    1286             m_assembler.testq_rr(reg, reg);
    1287         else if ((mask.m_value & ~0x7f) == 0)
    1288             m_assembler.testb_i8r(mask.m_value, reg);
    1289         else
    1290             m_assembler.testq_i32r(mask.m_value, reg);
    1291         return Jump(m_assembler.jCC(cond));
    1292     }
    1293 
    1294     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1295     {
    1296         if (mask.m_value == -1)
    1297             m_assembler.cmpq_im(0, address.offset, address.base);
    1298         else
    1299             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    1300         return Jump(m_assembler.jCC(cond));
    1301     }
    1302 
    1303     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1304     {
    1305         if (mask.m_value == -1)
    1306             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
    1307         else
    1308             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    1309         return Jump(m_assembler.jCC(cond));
    1310     }
    1311 
    1312 
    1313     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1314     {
    1315         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1316         addPtr(src, dest);
    1317         return Jump(m_assembler.jCC(cond));
    1318     }
    1319 
    1320     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1321     {
    1322         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1323         subPtr(imm, dest);
    1324         return Jump(m_assembler.jCC(cond));
    1325     }
    1326 
    1327     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1328     {
    1329         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1330         dataLabel = DataLabelPtr(this);
    1331         return branchPtr(cond, left, scratchRegister);
    1332     }
    1333 
    1334     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1335     {
    1336         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1337         dataLabel = DataLabelPtr(this);
    1338         return branchPtr(cond, left, scratchRegister);
    1339     }
    1340 
    1341     DataLabelPtr storePtrWithPatch(Address address)
    1342     {
    1343         m_assembler.movq_i64r(0, scratchRegister);
    1344         DataLabelPtr label(this);
    1345         storePtr(scratchRegister, address);
    1346         return label;
    1347     }
    1348 };
    1349 
    1350 typedef MacroAssemblerX86_64 MacroAssemblerBase;
    1351 
    1352 #else
    1353 
    1354 class MacroAssemblerX86 : public MacroAssemblerX86Common {
    1355 public:
    1356     static const Scale ScalePtr = TimesFour;
    1357 
    1358     using MacroAssemblerX86Common::add32;
    1359     using MacroAssemblerX86Common::sub32;
    1360     using MacroAssemblerX86Common::load32;
    1361     using MacroAssemblerX86Common::store32;
    1362     using MacroAssemblerX86Common::branch32;
    1363 
    1364     void add32(Imm32 imm, RegisterID src, RegisterID dest)
    1365     {
    1366         m_assembler.leal_mr(imm.m_value, src, dest);
    1367     }
    1368 
    1369     void add32(Imm32 imm, AbsoluteAddress address)
    1370     {
    1371         m_assembler.addl_im(imm.m_value, address.m_ptr);
    1372     }
    1373    
    1374     void sub32(Imm32 imm, AbsoluteAddress address)
    1375     {
    1376         m_assembler.subl_im(imm.m_value, address.m_ptr);
    1377     }
    1378 
    1379     void load32(void* address, RegisterID dest)
    1380     {
    1381         m_assembler.movl_mr(address, dest);
    1382     }
    1383 
    1384     void store32(Imm32 imm, void* address)
    1385     {
    1386         m_assembler.movl_i32m(imm.m_value, address);
    1387     }
    1388 
    1389     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    1390     {
    1391         m_assembler.cmpl_rm(right, left.m_ptr);
    1392         return Jump(m_assembler.jCC(cond));
    1393     }
    1394 
    1395     Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
    1396     {
    1397         m_assembler.cmpl_im(right.m_value, left.m_ptr);
    1398         return Jump(m_assembler.jCC(cond));
    1399     }
    1400 
    1401     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1402     {
    1403         m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
    1404         dataLabel = DataLabelPtr(this);
    1405         return Jump(m_assembler.jCC(cond));
    1406     }
    1407 
    1408     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1409     {
    1410         m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
    1411         dataLabel = DataLabelPtr(this);
    1412         return Jump(m_assembler.jCC(cond));
    1413     }
    1414 
    1415     DataLabelPtr storePtrWithPatch(Address address)
    1416     {
    1417         m_assembler.movl_i32m(0, address.offset, address.base);
    1418         return DataLabelPtr(this);
    1419     }
    1420 };
    1421 
    1422 typedef MacroAssemblerX86 MacroAssemblerBase;
    1423 
    1424 #endif
    1425 
    142647
    142748class MacroAssembler : public MacroAssemblerBase {
  • trunk/JavaScriptCore/assembler/MacroAssemblerX86.h

    r40656 r40660  
    2424 */
    2525
    26 #ifndef MacroAssembler_h
    27 #define MacroAssembler_h
     26#ifndef MacroAssemblerX86_h
     27#define MacroAssemblerX86_h
    2828
    2929#include <wtf/Platform.h>
    3030
    31 #if ENABLE(ASSEMBLER)
     31#if ENABLE(ASSEMBLER) && PLATFORM(X86)
    3232
    33 #include "X86Assembler.h"
     33#include "MacroAssemblerX86Common.h"
    3434
    3535namespace JSC {
    36 
    37 template <class AssemblerType>
    38 class AbstractMacroAssembler {
    39 protected:
    40     AssemblerType m_assembler;
    41 
    42 public:
    43     typedef typename AssemblerType::RegisterID RegisterID;
    44     typedef typename AssemblerType::JmpSrc JmpSrc;
    45     typedef typename AssemblerType::JmpDst JmpDst;
    46 
    47     enum Scale {
    48         TimesOne,
    49         TimesTwo,
    50         TimesFour,
    51         TimesEight,
    52     };
    53 
    54     // Address:
    55     //
    56     // Describes a simple base-offset address.
    57     struct Address {
    58         explicit Address(RegisterID base, int32_t offset = 0)
    59             : base(base)
    60             , offset(offset)
    61         {
    62         }
    63 
    64         RegisterID base;
    65         int32_t offset;
    66     };
    67 
    68     // ImplicitAddress:
    69     //
    70     // This class is used for explicit 'load' and 'store' operations
    71     // (as opposed to situations in which a memory operand is provided
    72     // to a generic operation, such as an integer arithmetic instruction).
    73     //
    74     // In the case of a load (or store) operation we want to permit
    75     // addresses to be implicitly constructed, e.g. the two calls:
    76     //
    77     //     load32(Address(addrReg), destReg);
    78     //     load32(addrReg, destReg);
    79     //
    80     // Are equivalent, and the explicit wrapping of the Address in the former
    81     // is unnecessary.
    82     struct ImplicitAddress {
    83         ImplicitAddress(RegisterID base)
    84             : base(base)
    85             , offset(0)
    86         {
    87         }
    88 
    89         ImplicitAddress(Address address)
    90             : base(address.base)
    91             , offset(address.offset)
    92         {
    93         }
    94 
    95         RegisterID base;
    96         int32_t offset;
    97     };
    98 
    99     // BaseIndex:
    100     //
    101     // Describes a complex addressing mode.
    102     struct BaseIndex {
    103         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
    104             : base(base)
    105             , index(index)
    106             , scale(scale)
    107             , offset(offset)
    108         {
    109         }
    110 
    111         RegisterID base;
    112         RegisterID index;
    113         Scale scale;
    114         int32_t offset;
    115     };
    116 
    117     // AbsoluteAddress:
    118     //
    119     // Describes an memory operand given by a pointer.  For regular load & store
    120     // operations an unwrapped void* will be used, rather than using this.
    121     struct AbsoluteAddress {
    122         explicit AbsoluteAddress(void* ptr)
    123             : m_ptr(ptr)
    124         {
    125         }
    126 
    127         void* m_ptr;
    128     };
    129 
    130 
    131     class Jump;
    132     class PatchBuffer;
    133 
    134     // DataLabelPtr:
    135     //
    136     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    137     // patched after the code has been generated.
    138     class DataLabelPtr {
    139         template<class AssemblerType_T>
    140         friend class AbstractMacroAssembler;
    141         friend class PatchBuffer;
    142 
    143     public:
    144         DataLabelPtr()
    145         {
    146         }
    147 
    148         DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
    149             : m_label(masm->m_assembler.label())
    150         {
    151         }
    152 
    153         static void patch(void* address, void* value)
    154         {
    155             AssemblerType::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
    156         }
    157        
    158     private:
    159         JmpDst m_label;
    160     };
    161 
    162     // DataLabel32:
    163     //
    164     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    165     // patched after the code has been generated.
    166     class DataLabel32 {
    167         template<class AssemblerType_T>
    168         friend class AbstractMacroAssembler;
    169         friend class PatchBuffer;
    170 
    171     public:
    172         DataLabel32()
    173         {
    174         }
    175 
    176         DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
    177             : m_label(masm->m_assembler.label())
    178         {
    179         }
    180 
    181         static void patch(void* address, int32_t value)
    182         {
    183             AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(address), value);
    184         }
    185 
    186     private:
    187         JmpDst m_label;
    188     };
    189 
    190     // Label:
    191     //
    192     // A Label records a point in the generated instruction stream, typically such that
    193     // it may be used as a destination for a jump.
    194     class Label {
    195         friend class Jump;
    196         template<class AssemblerType_T>
    197         friend class AbstractMacroAssembler;
    198         friend class PatchBuffer;
    199 
    200     public:
    201         Label()
    202         {
    203         }
    204 
    205         Label(AbstractMacroAssembler<AssemblerType>* masm)
    206             : m_label(masm->m_assembler.label())
    207         {
    208         }
    209        
    210     private:
    211         JmpDst m_label;
    212     };
    213 
    214 
    215     // Jump:
    216     //
    217     // A jump object is a reference to a jump instruction that has been planted
    218     // into the code buffer - it is typically used to link the jump, setting the
    219     // relative offset such that when executed it will jump to the desired
    220     // destination.
    221     //
    222     // Jump objects retain a pointer to the assembler for syntactic purposes -
    223     // to allow the jump object to be able to link itself, e.g.:
    224     //
    225     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
    226     //     // ...
    227     //     forwardsBranch.link();
    228     //
    229     // Jumps may also be linked to a Label.
    230     class Jump {
    231         friend class PatchBuffer;
    232         template<class AssemblerType_T>
    233         friend class AbstractMacroAssembler;
    234 
    235     public:
    236         Jump()
    237         {
    238         }
    239        
    240         Jump(JmpSrc jmp)
    241             : m_jmp(jmp)
    242         {
    243         }
    244        
    245         void link(AbstractMacroAssembler<AssemblerType>* masm)
    246         {
    247             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
    248         }
    249        
    250         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    251         {
    252             masm->m_assembler.link(m_jmp, label.m_label);
    253         }
    254        
    255         static void patch(void* address, void* destination)
    256         {
    257             AssemblerType::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
    258         }
    259 
    260     private:
    261         JmpSrc m_jmp;
    262     };
    263 
    264     // JumpList:
    265     //
    266     // A JumpList is a set of Jump objects.
    267     // All jumps in the set will be linked to the same destination.
    268     class JumpList {
    269         friend class PatchBuffer;
    270 
    271     public:
    272         void link(AbstractMacroAssembler<AssemblerType>* masm)
    273         {
    274             size_t size = m_jumps.size();
    275             for (size_t i = 0; i < size; ++i)
    276                 m_jumps[i].link(masm);
    277             m_jumps.clear();
    278         }
    279        
    280         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    281         {
    282             size_t size = m_jumps.size();
    283             for (size_t i = 0; i < size; ++i)
    284                 m_jumps[i].linkTo(label, masm);
    285             m_jumps.clear();
    286         }
    287        
    288         void append(Jump jump)
    289         {
    290             m_jumps.append(jump);
    291         }
    292        
    293         void append(JumpList& other)
    294         {
    295             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
    296         }
    297 
    298         bool empty()
    299         {
    300             return !m_jumps.size();
    301         }
    302 
    303     private:
    304         Vector<Jump, 16> m_jumps;
    305     };
    306 
    307 
    308     // PatchBuffer:
    309     //
    310     // This class assists in linking code generated by the macro assembler, once code generation
    311     // has been completed, and the code has been copied to is final location in memory.  At this
    312     // time pointers to labels within the code may be resolved, and relative offsets to external
    313     // addresses may be fixed.
    314     //
    315     // Specifically:
    316     //   * Jump objects may be linked to external targets,
    317     //   * The address of Jump objects may taken, such that it can later be relinked.
    318     //   * The return address of a Jump object representing a call may be acquired.
    319     //   * The address of a Label pointing into the code may be resolved.
    320     //   * The value referenced by a DataLabel may be fixed.
    321     //
    322     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
    323     // address of calls, as opposed to a point that can be used to later relink a Jump -
    324     // possibly wrap the later up in an object that can do just that).
    325     class PatchBuffer {
    326     public:
    327         PatchBuffer(void* code)
    328             : m_code(code)
    329         {
    330         }
    331 
    332         void link(Jump jump, void* target)
    333         {
    334             AssemblerType::link(m_code, jump.m_jmp, target);
    335         }
    336 
    337         void link(JumpList list, void* target)
    338         {
    339             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
    340                 AssemblerType::link(m_code, list.m_jumps[i].m_jmp, target);
    341         }
    342 
    343         void* addressOf(Jump jump)
    344         {
    345             return AssemblerType::getRelocatedAddress(m_code, jump.m_jmp);
    346         }
    347 
    348         void* addressOf(Label label)
    349         {
    350             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    351         }
    352 
    353         void* addressOf(DataLabelPtr label)
    354         {
    355             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    356         }
    357 
    358         void* addressOf(DataLabel32 label)
    359         {
    360             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    361         }
    362 
    363         void setPtr(DataLabelPtr label, void* value)
    364         {
    365             AssemblerType::patchAddress(m_code, label.m_label, value);
    366         }
    367 
    368     private:
    369         void* m_code;
    370     };
    371  
    372 
    373     // ImmPtr:
    374     //
    375     // A pointer sized immediate operand to an instruction - this is wrapped
    376     // in a class requiring explicit construction in order to differentiate
    377     // from pointers used as absolute addresses to memory operations
    378     struct ImmPtr {
    379         explicit ImmPtr(void* value)
    380             : m_value(value)
    381         {
    382         }
    383 
    384         intptr_t asIntptr()
    385         {
    386             return reinterpret_cast<intptr_t>(m_value);
    387         }
    388 
    389         void* m_value;
    390     };
    391 
    392     // Imm32:
    393     //
    394     // A 32bit immediate operand to an instruction - this is wrapped in a
    395     // class requiring explicit construction in order to prevent RegisterIDs
    396     // (which are implemented as an enum) from accidentally being passed as
    397     // immediate values.
    398     struct Imm32 {
    399         explicit Imm32(int32_t value)
    400             : m_value(value)
    401         {
    402         }
    403 
    404 #if !PLATFORM(X86_64)
    405         explicit Imm32(ImmPtr ptr)
    406             : m_value(ptr.asIntptr())
    407         {
    408         }
    409 #endif
    410 
    411         int32_t m_value;
    412     };
    413 
    414     size_t size()
    415     {
    416         return m_assembler.size();
    417     }
    418 
    419     void* copyCode(ExecutablePool* allocator)
    420     {
    421         return m_assembler.executableCopy(allocator);
    422     }
    423 
    424     Label label()
    425     {
    426         return Label(this);
    427     }
    428    
    429     Label align()
    430     {
    431         m_assembler.align(16);
    432         return Label(this);
    433     }
    434 
    435     ptrdiff_t differenceBetween(Label from, Jump to)
    436     {
    437         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    438     }
    439 
    440     ptrdiff_t differenceBetween(Label from, Label to)
    441     {
    442         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    443     }
    444 
    445     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
    446     {
    447         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    448     }
    449 
    450     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
    451     {
    452         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    453     }
    454 
    455     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
    456     {
    457         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    458     }
    459 
    460 };
    461 
    462 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
    463 public:
    464 
    465     typedef X86Assembler::Condition Condition;
    466     static const Condition Equal = X86Assembler::ConditionE;
    467     static const Condition NotEqual = X86Assembler::ConditionNE;
    468     static const Condition Above = X86Assembler::ConditionA;
    469     static const Condition AboveOrEqual = X86Assembler::ConditionAE;
    470     static const Condition Below = X86Assembler::ConditionB;
    471     static const Condition BelowOrEqual = X86Assembler::ConditionBE;
    472     static const Condition GreaterThan = X86Assembler::ConditionG;
    473     static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
    474     static const Condition LessThan = X86Assembler::ConditionL;
    475     static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
    476     static const Condition Overflow = X86Assembler::ConditionO;
    477     static const Condition Zero = X86Assembler::ConditionE;
    478     static const Condition NonZero = X86Assembler::ConditionNE;
    479 
    480     static const RegisterID stackPointerRegister = X86::esp;
    481 
    482     // Integer arithmetic operations:
    483     //
    484     // Operations are typically two operand - operation(source, srcDst)
    485     // For many operations the source may be an Imm32, the srcDst operand
    486     // may often be a memory location (explictly described using an Address
    487     // object).
    488 
    489     void add32(RegisterID src, RegisterID dest)
    490     {
    491         m_assembler.addl_rr(src, dest);
    492     }
    493 
    494     void add32(Imm32 imm, Address address)
    495     {
    496         m_assembler.addl_im(imm.m_value, address.offset, address.base);
    497     }
    498 
    499     void add32(Imm32 imm, RegisterID dest)
    500     {
    501         m_assembler.addl_ir(imm.m_value, dest);
    502     }
    503    
    504     void add32(Address src, RegisterID dest)
    505     {
    506         m_assembler.addl_mr(src.offset, src.base, dest);
    507     }
    508    
    509     void and32(RegisterID src, RegisterID dest)
    510     {
    511         m_assembler.andl_rr(src, dest);
    512     }
    513 
    514     void and32(Imm32 imm, RegisterID dest)
    515     {
    516         m_assembler.andl_ir(imm.m_value, dest);
    517     }
    518 
    519     void lshift32(Imm32 imm, RegisterID dest)
    520     {
    521         m_assembler.shll_i8r(imm.m_value, dest);
    522     }
    523    
    524     void lshift32(RegisterID shift_amount, RegisterID dest)
    525     {
    526         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    527         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    528         if (shift_amount != X86::ecx) {
    529             swap(shift_amount, X86::ecx);
    530 
    531             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    532             if (dest == shift_amount)
    533                 m_assembler.shll_CLr(X86::ecx);
    534             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    535             else if (dest == X86::ecx)
    536                 m_assembler.shll_CLr(shift_amount);
    537             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    538             else
    539                 m_assembler.shll_CLr(dest);
    540        
    541             swap(shift_amount, X86::ecx);
    542         } else
    543             m_assembler.shll_CLr(dest);
    544     }
    545    
    546     // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
    547     // For now, this operation has specific register requirements, and the three register must
    548     // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
    549     // given the complexity to fix, the fact that it is not uncommmon  for processors to have
    550     // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
    551     // support a hardware divide at all, it may not be
    552     void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
    553     {
    554 #ifdef NDEBUG
    555 #pragma unused(dividend,remainder)
    556 #else
    557         ASSERT((dividend == X86::eax) && (remainder == X86::edx));
    558         ASSERT((dividend != divisor) && (remainder != divisor));
    559 #endif
    560 
    561         m_assembler.cdq();
    562         m_assembler.idivl_r(divisor);
    563     }
    564 
    565     void mul32(RegisterID src, RegisterID dest)
    566     {
    567         m_assembler.imull_rr(src, dest);
    568     }
    569    
    570     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
    571     {
    572         m_assembler.imull_i32r(src, imm.m_value, dest);
    573     }
    574    
    575     void not32(RegisterID srcDest)
    576     {
    577         m_assembler.notl_r(srcDest);
    578     }
    579    
    580     void or32(RegisterID src, RegisterID dest)
    581     {
    582         m_assembler.orl_rr(src, dest);
    583     }
    584 
    585     void or32(Imm32 imm, RegisterID dest)
    586     {
    587         m_assembler.orl_ir(imm.m_value, dest);
    588     }
    589 
    590     void rshift32(RegisterID shift_amount, RegisterID dest)
    591     {
    592         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    593         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    594         if (shift_amount != X86::ecx) {
    595             swap(shift_amount, X86::ecx);
    596 
    597             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    598             if (dest == shift_amount)
    599                 m_assembler.sarl_CLr(X86::ecx);
    600             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    601             else if (dest == X86::ecx)
    602                 m_assembler.sarl_CLr(shift_amount);
    603             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    604             else
    605                 m_assembler.sarl_CLr(dest);
    606        
    607             swap(shift_amount, X86::ecx);
    608         } else
    609             m_assembler.sarl_CLr(dest);
    610     }
    611 
    612     void rshift32(Imm32 imm, RegisterID dest)
    613     {
    614         m_assembler.sarl_i8r(imm.m_value, dest);
    615     }
    616 
    617     void sub32(RegisterID src, RegisterID dest)
    618     {
    619         m_assembler.subl_rr(src, dest);
    620     }
    621    
    622     void sub32(Imm32 imm, RegisterID dest)
    623     {
    624         m_assembler.subl_ir(imm.m_value, dest);
    625     }
    626    
    627     void sub32(Imm32 imm, Address address)
    628     {
    629         m_assembler.subl_im(imm.m_value, address.offset, address.base);
    630     }
    631 
    632     void sub32(Address src, RegisterID dest)
    633     {
    634         m_assembler.subl_mr(src.offset, src.base, dest);
    635     }
    636 
    637     void xor32(RegisterID src, RegisterID dest)
    638     {
    639         m_assembler.xorl_rr(src, dest);
    640     }
    641 
    642     void xor32(Imm32 imm, RegisterID srcDest)
    643     {
    644         m_assembler.xorl_ir(imm.m_value, srcDest);
    645     }
    646    
    647 
    648     // Memory access operations:
    649     //
    650     // Loads are of the form load(address, destination) and stores of the form
    651     // store(source, address).  The source for a store may be an Imm32.  Address
    652     // operand objects to loads and store will be implicitly constructed if a
    653     // register is passed.
    654 
    655     void load32(ImplicitAddress address, RegisterID dest)
    656     {
    657         m_assembler.movl_mr(address.offset, address.base, dest);
    658     }
    659 
    660     void load32(BaseIndex address, RegisterID dest)
    661     {
    662         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
    663     }
    664 
    665     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    666     {
    667         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
    668         return DataLabel32(this);
    669     }
    670 
    671     void load16(BaseIndex address, RegisterID dest)
    672     {
    673         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
    674     }
    675 
    676     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    677     {
    678         m_assembler.movl_rm_disp32(src, address.offset, address.base);
    679         return DataLabel32(this);
    680     }
    681 
    682     void store32(RegisterID src, ImplicitAddress address)
    683     {
    684         m_assembler.movl_rm(src, address.offset, address.base);
    685     }
    686 
    687     void store32(RegisterID src, BaseIndex address)
    688     {
    689         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
    690     }
    691 
    692     void store32(Imm32 imm, ImplicitAddress address)
    693     {
    694         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
    695     }
    696    
    697 
    698     // Stack manipulation operations:
    699     //
    700     // The ABI is assumed to provide a stack abstraction to memory,
    701     // containing machine word sized units of data.  Push and pop
    702     // operations add and remove a single register sized unit of data
    703     // to or from the stack.  Peek and poke operations read or write
    704     // values on the stack, without moving the current stack position.
    705    
    706     void pop(RegisterID dest)
    707     {
    708         m_assembler.pop_r(dest);
    709     }
    710 
    711     void push(RegisterID src)
    712     {
    713         m_assembler.push_r(src);
    714     }
    715 
    716     void push(Address address)
    717     {
    718         m_assembler.push_m(address.offset, address.base);
    719     }
    720 
    721     void push(Imm32 imm)
    722     {
    723         m_assembler.push_i32(imm.m_value);
    724     }
    725 
    726     // Register move operations:
    727     //
    728     // Move values in registers.
    729 
    730     void move(Imm32 imm, RegisterID dest)
    731     {
    732         // Note: on 64-bit the Imm32 value is zero extended into the register, it
    733         // may be useful to have a separate version that sign extends the value?
    734         if (!imm.m_value)
    735             m_assembler.xorl_rr(dest, dest);
    736         else
    737             m_assembler.movl_i32r(imm.m_value, dest);
    738     }
    739 
    740 #if PLATFORM(X86_64)
    741     void move(RegisterID src, RegisterID dest)
    742     {
    743         // Note: on 64-bit this is is a full register move; perhaps it would be
    744         // useful to have separate move32 & movePtr, with move32 zero extending?
    745         m_assembler.movq_rr(src, dest);
    746     }
    747 
    748     void move(ImmPtr imm, RegisterID dest)
    749     {
    750         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
    751             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
    752         else
    753             m_assembler.movq_i64r(imm.asIntptr(), dest);
    754     }
    755 
    756     void swap(RegisterID reg1, RegisterID reg2)
    757     {
    758         m_assembler.xchgq_rr(reg1, reg2);
    759     }
    760 
    761     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    762     {
    763         m_assembler.movsxd_rr(src, dest);
    764     }
    765 
    766     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    767     {
    768         m_assembler.movl_rr(src, dest);
    769     }
    770 #else
    771     void move(RegisterID src, RegisterID dest)
    772     {
    773         m_assembler.movl_rr(src, dest);
    774     }
    775 
    776     void move(ImmPtr imm, RegisterID dest)
    777     {
    778         m_assembler.movl_i32r(imm.asIntptr(), dest);
    779     }
    780 
    781     void swap(RegisterID reg1, RegisterID reg2)
    782     {
    783         m_assembler.xchgl_rr(reg1, reg2);
    784     }
    785 
    786     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    787     {
    788         if (src != dest)
    789             move(src, dest);
    790     }
    791 
    792     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    793     {
    794         if (src != dest)
    795             move(src, dest);
    796     }
    797 #endif
    798 
    799 
    800     // Forwards / external control flow operations:
    801     //
    802     // This set of jump and conditional branch operations return a Jump
    803     // object which may linked at a later point, allow forwards jump,
    804     // or jumps that will require external linkage (after the code has been
    805     // relocated).
    806     //
    807     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
    808     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
    809     // used (representing the names 'below' and 'above').
    810     //
    811     // Operands to the comparision are provided in the expected order, e.g.
    812     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
    813     // treated as a signed 32bit value, is less than or equal to 5.
    814     //
    815     // jz and jnz test whether the first operand is equal to zero, and take
    816     // an optional second operand of a mask under which to perform the test.
    817 
    818 public:
    819     Jump branch32(Condition cond, RegisterID left, RegisterID right)
    820     {
    821         m_assembler.cmpl_rr(right, left);
    822         return Jump(m_assembler.jCC(cond));
    823     }
    824 
    825     Jump branch32(Condition cond, RegisterID left, Imm32 right)
    826     {
    827         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    828             m_assembler.testl_rr(left, left);
    829         else
    830             m_assembler.cmpl_ir(right.m_value, left);
    831         return Jump(m_assembler.jCC(cond));
    832     }
    833    
    834     Jump branch32(Condition cond, RegisterID left, Address right)
    835     {
    836         m_assembler.cmpl_mr(right.offset, right.base, left);
    837         return Jump(m_assembler.jCC(cond));
    838     }
    839    
    840     Jump branch32(Condition cond, Address left, RegisterID right)
    841     {
    842         m_assembler.cmpl_rm(right, left.offset, left.base);
    843         return Jump(m_assembler.jCC(cond));
    844     }
    845 
    846     Jump branch32(Condition cond, Address left, Imm32 right)
    847     {
    848         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
    849         return Jump(m_assembler.jCC(cond));
    850     }
    851 
    852     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    853     {
    854         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
    855         return Jump(m_assembler.jCC(cond));
    856     }
    857 
    858     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    859     {
    860         ASSERT((cond == Zero) || (cond == NonZero));
    861         m_assembler.testl_rr(reg, mask);
    862         return Jump(m_assembler.jCC(cond));
    863     }
    864 
    865     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    866     {
    867         ASSERT((cond == Zero) || (cond == NonZero));
    868         // if we are only interested in the low seven bits, this can be tested with a testb
    869         if (mask.m_value == -1)
    870             m_assembler.testl_rr(reg, reg);
    871         else if ((mask.m_value & ~0x7f) == 0)
    872             m_assembler.testb_i8r(mask.m_value, reg);
    873         else
    874             m_assembler.testl_i32r(mask.m_value, reg);
    875         return Jump(m_assembler.jCC(cond));
    876     }
    877 
    878     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    879     {
    880         ASSERT((cond == Zero) || (cond == NonZero));
    881         if (mask.m_value == -1)
    882             m_assembler.cmpl_im(0, address.offset, address.base);
    883         else
    884             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    885         return Jump(m_assembler.jCC(cond));
    886     }
    887 
    888     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    889     {
    890         ASSERT((cond == Zero) || (cond == NonZero));
    891         if (mask.m_value == -1)
    892             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
    893         else
    894             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    895         return Jump(m_assembler.jCC(cond));
    896     }
    897 
    898     Jump jump()
    899     {
    900         return Jump(m_assembler.jmp());
    901     }
    902 
    903     void jump(RegisterID target)
    904     {
    905         m_assembler.jmp_r(target);
    906     }
    907 
    908     // Address is a memory location containing the address to jump to
    909     void jump(Address address)
    910     {
    911         m_assembler.jmp_m(address.offset, address.base);
    912     }
    913 
    914 
    915     // Arithmetic control flow operations:
    916     //
    917     // This set of conditional branch operations branch based
    918     // on the result of an arithmetic operation.  The operation
    919     // is performed as normal, storing the result.
    920     //
    921     // * jz operations branch if the result is zero.
    922     // * jo operations branch if the (signed) arithmetic
    923     //   operation caused an overflow to occur.
    924    
    925     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    926     {
    927         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    928         add32(src, dest);
    929         return Jump(m_assembler.jCC(cond));
    930     }
    931    
    932     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    933     {
    934         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    935         add32(imm, dest);
    936         return Jump(m_assembler.jCC(cond));
    937     }
    938    
    939     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    940     {
    941         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    942         mul32(src, dest);
    943         return Jump(m_assembler.jCC(cond));
    944     }
    945    
    946     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    947     {
    948         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    949         mul32(imm, src, dest);
    950         return Jump(m_assembler.jCC(cond));
    951     }
    952    
    953     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    954     {
    955         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    956         sub32(src, dest);
    957         return Jump(m_assembler.jCC(cond));
    958     }
    959    
    960     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    961     {
    962         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    963         sub32(imm, dest);
    964         return Jump(m_assembler.jCC(cond));
    965     }
    966    
    967 
    968     // Miscellaneous operations:
    969 
    970     void breakpoint()
    971     {
    972         m_assembler.int3();
    973     }
    974 
    975     Jump call()
    976     {
    977         return Jump(m_assembler.call());
    978     }
    979 
    980     // FIXME: why does this return a Jump object? - it can't be linked.
    981     // This may be to get a reference to the return address of the call.
    982     //
    983     // This should probably be handled by a separate label type to a regular
    984     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
    985     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
    986     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
    987     // form of label, can get the void* after the code has been linked, but can't
    988     // try to link it like a Jump object), and let the CallLabel be cast into a
    989     // CallReturnLabel.
    990     Jump call(RegisterID target)
    991     {
    992         return Jump(m_assembler.call(target));
    993     }
    994 
    995     void ret()
    996     {
    997         m_assembler.ret();
    998     }
    999 
    1000     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    1001     {
    1002         m_assembler.cmpl_rr(right, left);
    1003         m_assembler.setCC_r(cond, dest);
    1004         m_assembler.movzbl_rr(dest, dest);
    1005     }
    1006 
    1007     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    1008     {
    1009         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1010             m_assembler.testl_rr(left, left);
    1011         else
    1012             m_assembler.cmpl_ir(right.m_value, left);
    1013         m_assembler.setCC_r(cond, dest);
    1014         m_assembler.movzbl_rr(dest, dest);
    1015     }
    1016 
    1017     // FIXME:
    1018     // The mask should be optional... paerhaps the argument order should be
    1019     // dest-src, operations always have a dest? ... possibly not true, considering
    1020     // asm ops like test, or pseudo ops like pop().
    1021     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    1022     {
    1023         if (mask.m_value == -1)
    1024             m_assembler.cmpl_im(0, address.offset, address.base);
    1025         else
    1026             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1027         m_assembler.setCC_r(cond, dest);
    1028         m_assembler.movzbl_rr(dest, dest);
    1029     }
    1030 };
    1031 
    1032 
    1033 #if PLATFORM(X86_64)
    1034 
    1035 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
    1036 protected:
    1037     static const X86::RegisterID scratchRegister = X86::r11;
    1038 
    1039 public:
    1040     static const Scale ScalePtr = TimesEight;
    1041 
    1042     using MacroAssemblerX86Common::add32;
    1043     using MacroAssemblerX86Common::sub32;
    1044     using MacroAssemblerX86Common::load32;
    1045     using MacroAssemblerX86Common::store32;
    1046 
    1047     void add32(Imm32 imm, AbsoluteAddress address)
    1048     {
    1049         move(ImmPtr(address.m_ptr), scratchRegister);
    1050         add32(imm, Address(scratchRegister));
    1051     }
    1052    
    1053     void sub32(Imm32 imm, AbsoluteAddress address)
    1054     {
    1055         move(ImmPtr(address.m_ptr), scratchRegister);
    1056         sub32(imm, Address(scratchRegister));
    1057     }
    1058 
    1059     void load32(void* address, RegisterID dest)
    1060     {
    1061         if (dest == X86::eax)
    1062             m_assembler.movl_mEAX(address);
    1063         else {
    1064             move(X86::eax, dest);
    1065             m_assembler.movl_mEAX(address);
    1066             swap(X86::eax, dest);
    1067         }
    1068     }
    1069 
    1070     void store32(Imm32 imm, void* address)
    1071     {
    1072         move(X86::eax, scratchRegister);
    1073         move(imm, X86::eax);
    1074         m_assembler.movl_EAXm(address);
    1075         move(scratchRegister, X86::eax);
    1076     }
    1077 
    1078 
    1079 
    1080     void addPtr(RegisterID src, RegisterID dest)
    1081     {
    1082         m_assembler.addq_rr(src, dest);
    1083     }
    1084 
    1085     void addPtr(Imm32 imm, RegisterID srcDest)
    1086     {
    1087         m_assembler.addq_ir(imm.m_value, srcDest);
    1088     }
    1089 
    1090     void addPtr(ImmPtr imm, RegisterID dest)
    1091     {
    1092         move(imm, scratchRegister);
    1093         m_assembler.addq_rr(scratchRegister, dest);
    1094     }
    1095 
    1096     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1097     {
    1098         m_assembler.leal_mr(imm.m_value, src, dest);
    1099     }
    1100 
    1101     void andPtr(RegisterID src, RegisterID dest)
    1102     {
    1103         m_assembler.andq_rr(src, dest);
    1104     }
    1105 
    1106     void andPtr(Imm32 imm, RegisterID srcDest)
    1107     {
    1108         m_assembler.andq_ir(imm.m_value, srcDest);
    1109     }
    1110 
    1111     void orPtr(RegisterID src, RegisterID dest)
    1112     {
    1113         m_assembler.orq_rr(src, dest);
    1114     }
    1115 
    1116     void orPtr(ImmPtr imm, RegisterID dest)
    1117     {
    1118         move(imm, scratchRegister);
    1119         m_assembler.orq_rr(scratchRegister, dest);
    1120     }
    1121 
    1122     void orPtr(Imm32 imm, RegisterID dest)
    1123     {
    1124         m_assembler.orq_ir(imm.m_value, dest);
    1125     }
    1126 
    1127     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1128     {
    1129         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    1130         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    1131         if (shift_amount != X86::ecx) {
    1132             swap(shift_amount, X86::ecx);
    1133 
    1134             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    1135             if (dest == shift_amount)
    1136                 m_assembler.sarq_CLr(X86::ecx);
    1137             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    1138             else if (dest == X86::ecx)
    1139                 m_assembler.sarq_CLr(shift_amount);
    1140             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    1141             else
    1142                 m_assembler.sarq_CLr(dest);
    1143        
    1144             swap(shift_amount, X86::ecx);
    1145         } else
    1146             m_assembler.sarq_CLr(dest);
    1147     }
    1148 
    1149     void rshiftPtr(Imm32 imm, RegisterID dest)
    1150     {
    1151         m_assembler.sarq_i8r(imm.m_value, dest);
    1152     }
    1153 
    1154     void subPtr(RegisterID src, RegisterID dest)
    1155     {
    1156         m_assembler.subq_rr(src, dest);
    1157     }
    1158    
    1159     void subPtr(Imm32 imm, RegisterID dest)
    1160     {
    1161         m_assembler.subq_ir(imm.m_value, dest);
    1162     }
    1163    
    1164     void subPtr(ImmPtr imm, RegisterID dest)
    1165     {
    1166         move(imm, scratchRegister);
    1167         m_assembler.subq_rr(scratchRegister, dest);
    1168     }
    1169 
    1170     void xorPtr(RegisterID src, RegisterID dest)
    1171     {
    1172         m_assembler.xorq_rr(src, dest);
    1173     }
    1174 
    1175     void xorPtr(Imm32 imm, RegisterID srcDest)
    1176     {
    1177         m_assembler.xorq_ir(imm.m_value, srcDest);
    1178     }
    1179 
    1180 
    1181     void loadPtr(ImplicitAddress address, RegisterID dest)
    1182     {
    1183         m_assembler.movq_mr(address.offset, address.base, dest);
    1184     }
    1185 
    1186     void loadPtr(BaseIndex address, RegisterID dest)
    1187     {
    1188         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    1189     }
    1190 
    1191     void loadPtr(void* address, RegisterID dest)
    1192     {
    1193         if (dest == X86::eax)
    1194             m_assembler.movq_mEAX(address);
    1195         else {
    1196             move(X86::eax, dest);
    1197             m_assembler.movq_mEAX(address);
    1198             swap(X86::eax, dest);
    1199         }
    1200     }
    1201 
    1202     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1203     {
    1204         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    1205         return DataLabel32(this);
    1206     }
    1207 
    1208     void storePtr(RegisterID src, ImplicitAddress address)
    1209     {
    1210         m_assembler.movq_rm(src, address.offset, address.base);
    1211     }
    1212 
    1213     void storePtr(RegisterID src, BaseIndex address)
    1214     {
    1215         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    1216     }
    1217 
    1218     void storePtr(ImmPtr imm, ImplicitAddress address)
    1219     {
    1220         move(imm, scratchRegister);
    1221         storePtr(scratchRegister, address);
    1222     }
    1223 
    1224     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1225     {
    1226         m_assembler.movq_rm_disp32(src, address.offset, address.base);
    1227         return DataLabel32(this);
    1228     }
    1229 
    1230 
    1231     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1232     {
    1233         m_assembler.cmpq_rr(right, left);
    1234         return Jump(m_assembler.jCC(cond));
    1235     }
    1236 
    1237     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1238     {
    1239         intptr_t imm = right.asIntptr();
    1240         if (CAN_SIGN_EXTEND_32_64(imm)) {
    1241             if (!imm)
    1242                 m_assembler.testq_rr(left, left);
    1243             else
    1244                 m_assembler.cmpq_ir(imm, left);
    1245             return Jump(m_assembler.jCC(cond));
    1246         } else {
    1247             move(right, scratchRegister);
    1248             return branchPtr(cond, left, scratchRegister);
    1249         }
    1250     }
    1251 
    1252     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1253     {
    1254         m_assembler.cmpq_mr(right.offset, right.base, left);
    1255         return Jump(m_assembler.jCC(cond));
    1256     }
    1257 
    1258     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1259     {
    1260         move(ImmPtr(left.m_ptr), scratchRegister);
    1261         return branchPtr(cond, Address(scratchRegister), right);
    1262     }
    1263 
    1264     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1265     {
    1266         m_assembler.cmpq_rm(right, left.offset, left.base);
    1267         return Jump(m_assembler.jCC(cond));
    1268     }
    1269 
    1270     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1271     {
    1272         move(right, scratchRegister);
    1273         return branchPtr(cond, left, scratchRegister);
    1274     }
    1275 
    1276     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1277     {
    1278         m_assembler.testq_rr(reg, mask);
    1279         return Jump(m_assembler.jCC(cond));
    1280     }
    1281 
    1282     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1283     {
    1284         // if we are only interested in the low seven bits, this can be tested with a testb
    1285         if (mask.m_value == -1)
    1286             m_assembler.testq_rr(reg, reg);
    1287         else if ((mask.m_value & ~0x7f) == 0)
    1288             m_assembler.testb_i8r(mask.m_value, reg);
    1289         else
    1290             m_assembler.testq_i32r(mask.m_value, reg);
    1291         return Jump(m_assembler.jCC(cond));
    1292     }
    1293 
    1294     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1295     {
    1296         if (mask.m_value == -1)
    1297             m_assembler.cmpq_im(0, address.offset, address.base);
    1298         else
    1299             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    1300         return Jump(m_assembler.jCC(cond));
    1301     }
    1302 
    1303     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1304     {
    1305         if (mask.m_value == -1)
    1306             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
    1307         else
    1308             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    1309         return Jump(m_assembler.jCC(cond));
    1310     }
    1311 
    1312 
    1313     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1314     {
    1315         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1316         addPtr(src, dest);
    1317         return Jump(m_assembler.jCC(cond));
    1318     }
    1319 
    1320     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1321     {
    1322         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1323         subPtr(imm, dest);
    1324         return Jump(m_assembler.jCC(cond));
    1325     }
    1326 
    1327     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1328     {
    1329         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1330         dataLabel = DataLabelPtr(this);
    1331         return branchPtr(cond, left, scratchRegister);
    1332     }
    1333 
    1334     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1335     {
    1336         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1337         dataLabel = DataLabelPtr(this);
    1338         return branchPtr(cond, left, scratchRegister);
    1339     }
    1340 
    1341     DataLabelPtr storePtrWithPatch(Address address)
    1342     {
    1343         m_assembler.movq_i64r(0, scratchRegister);
    1344         DataLabelPtr label(this);
    1345         storePtr(scratchRegister, address);
    1346         return label;
    1347     }
    1348 };
    1349 
    1350 typedef MacroAssemblerX86_64 MacroAssemblerBase;
    1351 
    1352 #else
    135336
    135437class MacroAssemblerX86 : public MacroAssemblerX86Common {
     
    1420103};
    1421104
    1422 typedef MacroAssemblerX86 MacroAssemblerBase;
    1423 
    1424 #endif
    1425 
    1426 
    1427 class MacroAssembler : public MacroAssemblerBase {
    1428 public:
    1429 
    1430     using MacroAssemblerBase::pop;
    1431     using MacroAssemblerBase::jump;
    1432     using MacroAssemblerBase::branch32;
    1433     using MacroAssemblerBase::branch16;
    1434 #if PLATFORM(X86_64)
    1435     using MacroAssemblerBase::branchPtr;
    1436     using MacroAssemblerBase::branchTestPtr;
    1437 #endif
    1438 
    1439 
    1440     // Platform agnostic onvenience functions,
    1441     // described in terms of other macro assembly methods.
    1442     void pop()
    1443     {
    1444         addPtr(Imm32(sizeof(void*)), stackPointerRegister);
    1445     }
    1446    
    1447     void peek(RegisterID dest, int index = 0)
    1448     {
    1449         loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
    1450     }
    1451 
    1452     void poke(RegisterID src, int index = 0)
    1453     {
    1454         storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
    1455     }
    1456 
    1457     void poke(Imm32 value, int index = 0)
    1458     {
    1459         store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
    1460     }
    1461 
    1462     void poke(ImmPtr imm, int index = 0)
    1463     {
    1464         storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
    1465     }
    1466 
    1467 
    1468     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
    1469     void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
    1470     {
    1471         branchPtr(cond, op1, imm).linkTo(target, this);
    1472     }
    1473 
    1474     void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
    1475     {
    1476         branch32(cond, op1, op2).linkTo(target, this);
    1477     }
    1478 
    1479     void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
    1480     {
    1481         branch32(cond, op1, imm).linkTo(target, this);
    1482     }
    1483 
    1484     void branch32(Condition cond, RegisterID left, Address right, Label target)
    1485     {
    1486         branch32(cond, left, right).linkTo(target, this);
    1487     }
    1488 
    1489     void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
    1490     {
    1491         branch16(cond, left, right).linkTo(target, this);
    1492     }
    1493    
    1494     void branchTestPtr(Condition cond, RegisterID reg, Label target)
    1495     {
    1496         branchTestPtr(cond, reg).linkTo(target, this);
    1497     }
    1498 
    1499     void jump(Label target)
    1500     {
    1501         jump().linkTo(target, this);
    1502     }
    1503 
    1504 
    1505     // Ptr methods
    1506     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
    1507 #if !PLATFORM(X86_64)
    1508     void addPtr(RegisterID src, RegisterID dest)
    1509     {
    1510         add32(src, dest);
    1511     }
    1512 
    1513     void addPtr(Imm32 imm, RegisterID srcDest)
    1514     {
    1515         add32(imm, srcDest);
    1516     }
    1517 
    1518     void addPtr(ImmPtr imm, RegisterID dest)
    1519     {
    1520         add32(Imm32(imm), dest);
    1521     }
    1522 
    1523     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1524     {
    1525         add32(imm, src, dest);
    1526     }
    1527 
    1528     void andPtr(RegisterID src, RegisterID dest)
    1529     {
    1530         and32(src, dest);
    1531     }
    1532 
    1533     void andPtr(Imm32 imm, RegisterID srcDest)
    1534     {
    1535         and32(imm, srcDest);
    1536     }
    1537 
    1538     void orPtr(RegisterID src, RegisterID dest)
    1539     {
    1540         or32(src, dest);
    1541     }
    1542 
    1543     void orPtr(ImmPtr imm, RegisterID dest)
    1544     {
    1545         or32(Imm32(imm), dest);
    1546     }
    1547 
    1548     void orPtr(Imm32 imm, RegisterID dest)
    1549     {
    1550         or32(imm, dest);
    1551     }
    1552 
    1553     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1554     {
    1555         rshift32(shift_amount, dest);
    1556     }
    1557 
    1558     void rshiftPtr(Imm32 imm, RegisterID dest)
    1559     {
    1560         rshift32(imm, dest);
    1561     }
    1562 
    1563     void subPtr(RegisterID src, RegisterID dest)
    1564     {
    1565         sub32(src, dest);
    1566     }
    1567    
    1568     void subPtr(Imm32 imm, RegisterID dest)
    1569     {
    1570         sub32(imm, dest);
    1571     }
    1572    
    1573     void subPtr(ImmPtr imm, RegisterID dest)
    1574     {
    1575         sub32(Imm32(imm), dest);
    1576     }
    1577 
    1578     void xorPtr(RegisterID src, RegisterID dest)
    1579     {
    1580         xor32(src, dest);
    1581     }
    1582 
    1583     void xorPtr(Imm32 imm, RegisterID srcDest)
    1584     {
    1585         xor32(imm, srcDest);
    1586     }
    1587 
    1588 
    1589     void loadPtr(ImplicitAddress address, RegisterID dest)
    1590     {
    1591         load32(address, dest);
    1592     }
    1593 
    1594     void loadPtr(BaseIndex address, RegisterID dest)
    1595     {
    1596         load32(address, dest);
    1597     }
    1598 
    1599     void loadPtr(void* address, RegisterID dest)
    1600     {
    1601         load32(address, dest);
    1602     }
    1603 
    1604     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1605     {
    1606         return load32WithAddressOffsetPatch(address, dest);
    1607     }
    1608 
    1609     void storePtr(RegisterID src, ImplicitAddress address)
    1610     {
    1611         store32(src, address);
    1612     }
    1613 
    1614     void storePtr(RegisterID src, BaseIndex address)
    1615     {
    1616         store32(src, address);
    1617     }
    1618 
    1619     void storePtr(ImmPtr imm, ImplicitAddress address)
    1620     {
    1621         store32(Imm32(imm), address);
    1622     }
    1623 
    1624     void storePtr(ImmPtr imm, void* address)
    1625     {
    1626         store32(Imm32(imm), address);
    1627     }
    1628 
    1629     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1630     {
    1631         return store32WithAddressOffsetPatch(src, address);
    1632     }
    1633 
    1634 
    1635     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1636     {
    1637         return branch32(cond, left, right);
    1638     }
    1639 
    1640     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1641     {
    1642         return branch32(cond, left, Imm32(right));
    1643     }
    1644 
    1645     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1646     {
    1647         return branch32(cond, left, right);
    1648     }
    1649 
    1650     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1651     {
    1652         return branch32(cond, left, right);
    1653     }
    1654 
    1655     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1656     {
    1657         return branch32(cond, left, right);
    1658     }
    1659 
    1660     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1661     {
    1662         return branch32(cond, left, Imm32(right));
    1663     }
    1664 
    1665     Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
    1666     {
    1667         return branch32(cond, left, Imm32(right));
    1668     }
    1669 
    1670     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1671     {
    1672         return branchTest32(cond, reg, mask);
    1673     }
    1674 
    1675     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1676     {
    1677         return branchTest32(cond, reg, mask);
    1678     }
    1679 
    1680     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1681     {
    1682         return branchTest32(cond, address, mask);
    1683     }
    1684 
    1685     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1686     {
    1687         return branchTest32(cond, address, mask);
    1688     }
    1689 
    1690 
    1691     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1692     {
    1693         return branchAdd32(cond, src, dest);
    1694     }
    1695 
    1696     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1697     {
    1698         return branchSub32(cond, imm, dest);
    1699     }
    1700 #endif
    1701 
    1702 };
    1703 
    1704105} // namespace JSC
    1705106
    1706107#endif // ENABLE(ASSEMBLER)
    1707108
    1708 #endif // MacroAssembler_h
     109#endif // MacroAssemblerX86_h
  • trunk/JavaScriptCore/assembler/MacroAssemblerX86Common.h

    r40656 r40660  
    2424 */
    2525
    26 #ifndef MacroAssembler_h
    27 #define MacroAssembler_h
     26#ifndef MacroAssemblerX86Common_h
     27#define MacroAssemblerX86Common_h
    2828
    2929#include <wtf/Platform.h>
     
    3232
    3333#include "X86Assembler.h"
     34#include "AbstractMacroAssembler.h"
    3435
    3536namespace JSC {
    36 
    37 template <class AssemblerType>
    38 class AbstractMacroAssembler {
    39 protected:
    40     AssemblerType m_assembler;
    41 
    42 public:
    43     typedef typename AssemblerType::RegisterID RegisterID;
    44     typedef typename AssemblerType::JmpSrc JmpSrc;
    45     typedef typename AssemblerType::JmpDst JmpDst;
    46 
    47     enum Scale {
    48         TimesOne,
    49         TimesTwo,
    50         TimesFour,
    51         TimesEight,
    52     };
    53 
    54     // Address:
    55     //
    56     // Describes a simple base-offset address.
    57     struct Address {
    58         explicit Address(RegisterID base, int32_t offset = 0)
    59             : base(base)
    60             , offset(offset)
    61         {
    62         }
    63 
    64         RegisterID base;
    65         int32_t offset;
    66     };
    67 
    68     // ImplicitAddress:
    69     //
    70     // This class is used for explicit 'load' and 'store' operations
    71     // (as opposed to situations in which a memory operand is provided
    72     // to a generic operation, such as an integer arithmetic instruction).
    73     //
    74     // In the case of a load (or store) operation we want to permit
    75     // addresses to be implicitly constructed, e.g. the two calls:
    76     //
    77     //     load32(Address(addrReg), destReg);
    78     //     load32(addrReg, destReg);
    79     //
    80     // Are equivalent, and the explicit wrapping of the Address in the former
    81     // is unnecessary.
    82     struct ImplicitAddress {
    83         ImplicitAddress(RegisterID base)
    84             : base(base)
    85             , offset(0)
    86         {
    87         }
    88 
    89         ImplicitAddress(Address address)
    90             : base(address.base)
    91             , offset(address.offset)
    92         {
    93         }
    94 
    95         RegisterID base;
    96         int32_t offset;
    97     };
    98 
    99     // BaseIndex:
    100     //
    101     // Describes a complex addressing mode.
    102     struct BaseIndex {
    103         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
    104             : base(base)
    105             , index(index)
    106             , scale(scale)
    107             , offset(offset)
    108         {
    109         }
    110 
    111         RegisterID base;
    112         RegisterID index;
    113         Scale scale;
    114         int32_t offset;
    115     };
    116 
    117     // AbsoluteAddress:
    118     //
    119     // Describes an memory operand given by a pointer.  For regular load & store
    120     // operations an unwrapped void* will be used, rather than using this.
    121     struct AbsoluteAddress {
    122         explicit AbsoluteAddress(void* ptr)
    123             : m_ptr(ptr)
    124         {
    125         }
    126 
    127         void* m_ptr;
    128     };
    129 
    130 
    131     class Jump;
    132     class PatchBuffer;
    133 
    134     // DataLabelPtr:
    135     //
    136     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    137     // patched after the code has been generated.
    138     class DataLabelPtr {
    139         template<class AssemblerType_T>
    140         friend class AbstractMacroAssembler;
    141         friend class PatchBuffer;
    142 
    143     public:
    144         DataLabelPtr()
    145         {
    146         }
    147 
    148         DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
    149             : m_label(masm->m_assembler.label())
    150         {
    151         }
    152 
    153         static void patch(void* address, void* value)
    154         {
    155             AssemblerType::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
    156         }
    157        
    158     private:
    159         JmpDst m_label;
    160     };
    161 
    162     // DataLabel32:
    163     //
    164     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    165     // patched after the code has been generated.
    166     class DataLabel32 {
    167         template<class AssemblerType_T>
    168         friend class AbstractMacroAssembler;
    169         friend class PatchBuffer;
    170 
    171     public:
    172         DataLabel32()
    173         {
    174         }
    175 
    176         DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
    177             : m_label(masm->m_assembler.label())
    178         {
    179         }
    180 
    181         static void patch(void* address, int32_t value)
    182         {
    183             AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(address), value);
    184         }
    185 
    186     private:
    187         JmpDst m_label;
    188     };
    189 
    190     // Label:
    191     //
    192     // A Label records a point in the generated instruction stream, typically such that
    193     // it may be used as a destination for a jump.
    194     class Label {
    195         friend class Jump;
    196         template<class AssemblerType_T>
    197         friend class AbstractMacroAssembler;
    198         friend class PatchBuffer;
    199 
    200     public:
    201         Label()
    202         {
    203         }
    204 
    205         Label(AbstractMacroAssembler<AssemblerType>* masm)
    206             : m_label(masm->m_assembler.label())
    207         {
    208         }
    209        
    210     private:
    211         JmpDst m_label;
    212     };
    213 
    214 
    215     // Jump:
    216     //
    217     // A jump object is a reference to a jump instruction that has been planted
    218     // into the code buffer - it is typically used to link the jump, setting the
    219     // relative offset such that when executed it will jump to the desired
    220     // destination.
    221     //
    222     // Jump objects retain a pointer to the assembler for syntactic purposes -
    223     // to allow the jump object to be able to link itself, e.g.:
    224     //
    225     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
    226     //     // ...
    227     //     forwardsBranch.link();
    228     //
    229     // Jumps may also be linked to a Label.
    230     class Jump {
    231         friend class PatchBuffer;
    232         template<class AssemblerType_T>
    233         friend class AbstractMacroAssembler;
    234 
    235     public:
    236         Jump()
    237         {
    238         }
    239        
    240         Jump(JmpSrc jmp)
    241             : m_jmp(jmp)
    242         {
    243         }
    244        
    245         void link(AbstractMacroAssembler<AssemblerType>* masm)
    246         {
    247             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
    248         }
    249        
    250         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    251         {
    252             masm->m_assembler.link(m_jmp, label.m_label);
    253         }
    254        
    255         static void patch(void* address, void* destination)
    256         {
    257             AssemblerType::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
    258         }
    259 
    260     private:
    261         JmpSrc m_jmp;
    262     };
    263 
    264     // JumpList:
    265     //
    266     // A JumpList is a set of Jump objects.
    267     // All jumps in the set will be linked to the same destination.
    268     class JumpList {
    269         friend class PatchBuffer;
    270 
    271     public:
    272         void link(AbstractMacroAssembler<AssemblerType>* masm)
    273         {
    274             size_t size = m_jumps.size();
    275             for (size_t i = 0; i < size; ++i)
    276                 m_jumps[i].link(masm);
    277             m_jumps.clear();
    278         }
    279        
    280         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    281         {
    282             size_t size = m_jumps.size();
    283             for (size_t i = 0; i < size; ++i)
    284                 m_jumps[i].linkTo(label, masm);
    285             m_jumps.clear();
    286         }
    287        
    288         void append(Jump jump)
    289         {
    290             m_jumps.append(jump);
    291         }
    292        
    293         void append(JumpList& other)
    294         {
    295             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
    296         }
    297 
    298         bool empty()
    299         {
    300             return !m_jumps.size();
    301         }
    302 
    303     private:
    304         Vector<Jump, 16> m_jumps;
    305     };
    306 
    307 
    308     // PatchBuffer:
    309     //
    310     // This class assists in linking code generated by the macro assembler, once code generation
    311     // has been completed, and the code has been copied to is final location in memory.  At this
    312     // time pointers to labels within the code may be resolved, and relative offsets to external
    313     // addresses may be fixed.
    314     //
    315     // Specifically:
    316     //   * Jump objects may be linked to external targets,
    317     //   * The address of Jump objects may taken, such that it can later be relinked.
    318     //   * The return address of a Jump object representing a call may be acquired.
    319     //   * The address of a Label pointing into the code may be resolved.
    320     //   * The value referenced by a DataLabel may be fixed.
    321     //
    322     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
    323     // address of calls, as opposed to a point that can be used to later relink a Jump -
    324     // possibly wrap the later up in an object that can do just that).
    325     class PatchBuffer {
    326     public:
    327         PatchBuffer(void* code)
    328             : m_code(code)
    329         {
    330         }
    331 
    332         void link(Jump jump, void* target)
    333         {
    334             AssemblerType::link(m_code, jump.m_jmp, target);
    335         }
    336 
    337         void link(JumpList list, void* target)
    338         {
    339             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
    340                 AssemblerType::link(m_code, list.m_jumps[i].m_jmp, target);
    341         }
    342 
    343         void* addressOf(Jump jump)
    344         {
    345             return AssemblerType::getRelocatedAddress(m_code, jump.m_jmp);
    346         }
    347 
    348         void* addressOf(Label label)
    349         {
    350             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    351         }
    352 
    353         void* addressOf(DataLabelPtr label)
    354         {
    355             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    356         }
    357 
    358         void* addressOf(DataLabel32 label)
    359         {
    360             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    361         }
    362 
    363         void setPtr(DataLabelPtr label, void* value)
    364         {
    365             AssemblerType::patchAddress(m_code, label.m_label, value);
    366         }
    367 
    368     private:
    369         void* m_code;
    370     };
    371  
    372 
    373     // ImmPtr:
    374     //
    375     // A pointer sized immediate operand to an instruction - this is wrapped
    376     // in a class requiring explicit construction in order to differentiate
    377     // from pointers used as absolute addresses to memory operations
    378     struct ImmPtr {
    379         explicit ImmPtr(void* value)
    380             : m_value(value)
    381         {
    382         }
    383 
    384         intptr_t asIntptr()
    385         {
    386             return reinterpret_cast<intptr_t>(m_value);
    387         }
    388 
    389         void* m_value;
    390     };
    391 
    392     // Imm32:
    393     //
    394     // A 32bit immediate operand to an instruction - this is wrapped in a
    395     // class requiring explicit construction in order to prevent RegisterIDs
    396     // (which are implemented as an enum) from accidentally being passed as
    397     // immediate values.
    398     struct Imm32 {
    399         explicit Imm32(int32_t value)
    400             : m_value(value)
    401         {
    402         }
    403 
    404 #if !PLATFORM(X86_64)
    405         explicit Imm32(ImmPtr ptr)
    406             : m_value(ptr.asIntptr())
    407         {
    408         }
    409 #endif
    410 
    411         int32_t m_value;
    412     };
    413 
    414     size_t size()
    415     {
    416         return m_assembler.size();
    417     }
    418 
    419     void* copyCode(ExecutablePool* allocator)
    420     {
    421         return m_assembler.executableCopy(allocator);
    422     }
    423 
    424     Label label()
    425     {
    426         return Label(this);
    427     }
    428    
    429     Label align()
    430     {
    431         m_assembler.align(16);
    432         return Label(this);
    433     }
    434 
    435     ptrdiff_t differenceBetween(Label from, Jump to)
    436     {
    437         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    438     }
    439 
    440     ptrdiff_t differenceBetween(Label from, Label to)
    441     {
    442         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    443     }
    444 
    445     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
    446     {
    447         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    448     }
    449 
    450     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
    451     {
    452         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    453     }
    454 
    455     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
    456     {
    457         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    458     }
    459 
    460 };
    46137
    46238class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
     
    1030606};
    1031607
    1032 
    1033 #if PLATFORM(X86_64)
    1034 
    1035 class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
    1036 protected:
    1037     static const X86::RegisterID scratchRegister = X86::r11;
    1038 
    1039 public:
    1040     static const Scale ScalePtr = TimesEight;
    1041 
    1042     using MacroAssemblerX86Common::add32;
    1043     using MacroAssemblerX86Common::sub32;
    1044     using MacroAssemblerX86Common::load32;
    1045     using MacroAssemblerX86Common::store32;
    1046 
    1047     void add32(Imm32 imm, AbsoluteAddress address)
    1048     {
    1049         move(ImmPtr(address.m_ptr), scratchRegister);
    1050         add32(imm, Address(scratchRegister));
    1051     }
    1052    
    1053     void sub32(Imm32 imm, AbsoluteAddress address)
    1054     {
    1055         move(ImmPtr(address.m_ptr), scratchRegister);
    1056         sub32(imm, Address(scratchRegister));
    1057     }
    1058 
    1059     void load32(void* address, RegisterID dest)
    1060     {
    1061         if (dest == X86::eax)
    1062             m_assembler.movl_mEAX(address);
    1063         else {
    1064             move(X86::eax, dest);
    1065             m_assembler.movl_mEAX(address);
    1066             swap(X86::eax, dest);
    1067         }
    1068     }
    1069 
    1070     void store32(Imm32 imm, void* address)
    1071     {
    1072         move(X86::eax, scratchRegister);
    1073         move(imm, X86::eax);
    1074         m_assembler.movl_EAXm(address);
    1075         move(scratchRegister, X86::eax);
    1076     }
    1077 
    1078 
    1079 
    1080     void addPtr(RegisterID src, RegisterID dest)
    1081     {
    1082         m_assembler.addq_rr(src, dest);
    1083     }
    1084 
    1085     void addPtr(Imm32 imm, RegisterID srcDest)
    1086     {
    1087         m_assembler.addq_ir(imm.m_value, srcDest);
    1088     }
    1089 
    1090     void addPtr(ImmPtr imm, RegisterID dest)
    1091     {
    1092         move(imm, scratchRegister);
    1093         m_assembler.addq_rr(scratchRegister, dest);
    1094     }
    1095 
    1096     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1097     {
    1098         m_assembler.leal_mr(imm.m_value, src, dest);
    1099     }
    1100 
    1101     void andPtr(RegisterID src, RegisterID dest)
    1102     {
    1103         m_assembler.andq_rr(src, dest);
    1104     }
    1105 
    1106     void andPtr(Imm32 imm, RegisterID srcDest)
    1107     {
    1108         m_assembler.andq_ir(imm.m_value, srcDest);
    1109     }
    1110 
    1111     void orPtr(RegisterID src, RegisterID dest)
    1112     {
    1113         m_assembler.orq_rr(src, dest);
    1114     }
    1115 
    1116     void orPtr(ImmPtr imm, RegisterID dest)
    1117     {
    1118         move(imm, scratchRegister);
    1119         m_assembler.orq_rr(scratchRegister, dest);
    1120     }
    1121 
    1122     void orPtr(Imm32 imm, RegisterID dest)
    1123     {
    1124         m_assembler.orq_ir(imm.m_value, dest);
    1125     }
    1126 
    1127     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1128     {
    1129         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    1130         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    1131         if (shift_amount != X86::ecx) {
    1132             swap(shift_amount, X86::ecx);
    1133 
    1134             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    1135             if (dest == shift_amount)
    1136                 m_assembler.sarq_CLr(X86::ecx);
    1137             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    1138             else if (dest == X86::ecx)
    1139                 m_assembler.sarq_CLr(shift_amount);
    1140             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    1141             else
    1142                 m_assembler.sarq_CLr(dest);
    1143        
    1144             swap(shift_amount, X86::ecx);
    1145         } else
    1146             m_assembler.sarq_CLr(dest);
    1147     }
    1148 
    1149     void rshiftPtr(Imm32 imm, RegisterID dest)
    1150     {
    1151         m_assembler.sarq_i8r(imm.m_value, dest);
    1152     }
    1153 
    1154     void subPtr(RegisterID src, RegisterID dest)
    1155     {
    1156         m_assembler.subq_rr(src, dest);
    1157     }
    1158    
    1159     void subPtr(Imm32 imm, RegisterID dest)
    1160     {
    1161         m_assembler.subq_ir(imm.m_value, dest);
    1162     }
    1163    
    1164     void subPtr(ImmPtr imm, RegisterID dest)
    1165     {
    1166         move(imm, scratchRegister);
    1167         m_assembler.subq_rr(scratchRegister, dest);
    1168     }
    1169 
    1170     void xorPtr(RegisterID src, RegisterID dest)
    1171     {
    1172         m_assembler.xorq_rr(src, dest);
    1173     }
    1174 
    1175     void xorPtr(Imm32 imm, RegisterID srcDest)
    1176     {
    1177         m_assembler.xorq_ir(imm.m_value, srcDest);
    1178     }
    1179 
    1180 
    1181     void loadPtr(ImplicitAddress address, RegisterID dest)
    1182     {
    1183         m_assembler.movq_mr(address.offset, address.base, dest);
    1184     }
    1185 
    1186     void loadPtr(BaseIndex address, RegisterID dest)
    1187     {
    1188         m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    1189     }
    1190 
    1191     void loadPtr(void* address, RegisterID dest)
    1192     {
    1193         if (dest == X86::eax)
    1194             m_assembler.movq_mEAX(address);
    1195         else {
    1196             move(X86::eax, dest);
    1197             m_assembler.movq_mEAX(address);
    1198             swap(X86::eax, dest);
    1199         }
    1200     }
    1201 
    1202     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1203     {
    1204         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    1205         return DataLabel32(this);
    1206     }
    1207 
    1208     void storePtr(RegisterID src, ImplicitAddress address)
    1209     {
    1210         m_assembler.movq_rm(src, address.offset, address.base);
    1211     }
    1212 
    1213     void storePtr(RegisterID src, BaseIndex address)
    1214     {
    1215         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    1216     }
    1217 
    1218     void storePtr(ImmPtr imm, ImplicitAddress address)
    1219     {
    1220         move(imm, scratchRegister);
    1221         storePtr(scratchRegister, address);
    1222     }
    1223 
    1224     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1225     {
    1226         m_assembler.movq_rm_disp32(src, address.offset, address.base);
    1227         return DataLabel32(this);
    1228     }
    1229 
    1230 
    1231     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1232     {
    1233         m_assembler.cmpq_rr(right, left);
    1234         return Jump(m_assembler.jCC(cond));
    1235     }
    1236 
    1237     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1238     {
    1239         intptr_t imm = right.asIntptr();
    1240         if (CAN_SIGN_EXTEND_32_64(imm)) {
    1241             if (!imm)
    1242                 m_assembler.testq_rr(left, left);
    1243             else
    1244                 m_assembler.cmpq_ir(imm, left);
    1245             return Jump(m_assembler.jCC(cond));
    1246         } else {
    1247             move(right, scratchRegister);
    1248             return branchPtr(cond, left, scratchRegister);
    1249         }
    1250     }
    1251 
    1252     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1253     {
    1254         m_assembler.cmpq_mr(right.offset, right.base, left);
    1255         return Jump(m_assembler.jCC(cond));
    1256     }
    1257 
    1258     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1259     {
    1260         move(ImmPtr(left.m_ptr), scratchRegister);
    1261         return branchPtr(cond, Address(scratchRegister), right);
    1262     }
    1263 
    1264     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1265     {
    1266         m_assembler.cmpq_rm(right, left.offset, left.base);
    1267         return Jump(m_assembler.jCC(cond));
    1268     }
    1269 
    1270     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1271     {
    1272         move(right, scratchRegister);
    1273         return branchPtr(cond, left, scratchRegister);
    1274     }
    1275 
    1276     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1277     {
    1278         m_assembler.testq_rr(reg, mask);
    1279         return Jump(m_assembler.jCC(cond));
    1280     }
    1281 
    1282     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1283     {
    1284         // if we are only interested in the low seven bits, this can be tested with a testb
    1285         if (mask.m_value == -1)
    1286             m_assembler.testq_rr(reg, reg);
    1287         else if ((mask.m_value & ~0x7f) == 0)
    1288             m_assembler.testb_i8r(mask.m_value, reg);
    1289         else
    1290             m_assembler.testq_i32r(mask.m_value, reg);
    1291         return Jump(m_assembler.jCC(cond));
    1292     }
    1293 
    1294     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1295     {
    1296         if (mask.m_value == -1)
    1297             m_assembler.cmpq_im(0, address.offset, address.base);
    1298         else
    1299             m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    1300         return Jump(m_assembler.jCC(cond));
    1301     }
    1302 
    1303     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1304     {
    1305         if (mask.m_value == -1)
    1306             m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
    1307         else
    1308             m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    1309         return Jump(m_assembler.jCC(cond));
    1310     }
    1311 
    1312 
    1313     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1314     {
    1315         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1316         addPtr(src, dest);
    1317         return Jump(m_assembler.jCC(cond));
    1318     }
    1319 
    1320     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1321     {
    1322         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1323         subPtr(imm, dest);
    1324         return Jump(m_assembler.jCC(cond));
    1325     }
    1326 
    1327     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1328     {
    1329         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1330         dataLabel = DataLabelPtr(this);
    1331         return branchPtr(cond, left, scratchRegister);
    1332     }
    1333 
    1334     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1335     {
    1336         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1337         dataLabel = DataLabelPtr(this);
    1338         return branchPtr(cond, left, scratchRegister);
    1339     }
    1340 
    1341     DataLabelPtr storePtrWithPatch(Address address)
    1342     {
    1343         m_assembler.movq_i64r(0, scratchRegister);
    1344         DataLabelPtr label(this);
    1345         storePtr(scratchRegister, address);
    1346         return label;
    1347     }
    1348 };
    1349 
    1350 typedef MacroAssemblerX86_64 MacroAssemblerBase;
    1351 
    1352 #else
    1353 
    1354 class MacroAssemblerX86 : public MacroAssemblerX86Common {
    1355 public:
    1356     static const Scale ScalePtr = TimesFour;
    1357 
    1358     using MacroAssemblerX86Common::add32;
    1359     using MacroAssemblerX86Common::sub32;
    1360     using MacroAssemblerX86Common::load32;
    1361     using MacroAssemblerX86Common::store32;
    1362     using MacroAssemblerX86Common::branch32;
    1363 
    1364     void add32(Imm32 imm, RegisterID src, RegisterID dest)
    1365     {
    1366         m_assembler.leal_mr(imm.m_value, src, dest);
    1367     }
    1368 
    1369     void add32(Imm32 imm, AbsoluteAddress address)
    1370     {
    1371         m_assembler.addl_im(imm.m_value, address.m_ptr);
    1372     }
    1373    
    1374     void sub32(Imm32 imm, AbsoluteAddress address)
    1375     {
    1376         m_assembler.subl_im(imm.m_value, address.m_ptr);
    1377     }
    1378 
    1379     void load32(void* address, RegisterID dest)
    1380     {
    1381         m_assembler.movl_mr(address, dest);
    1382     }
    1383 
    1384     void store32(Imm32 imm, void* address)
    1385     {
    1386         m_assembler.movl_i32m(imm.m_value, address);
    1387     }
    1388 
    1389     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    1390     {
    1391         m_assembler.cmpl_rm(right, left.m_ptr);
    1392         return Jump(m_assembler.jCC(cond));
    1393     }
    1394 
    1395     Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
    1396     {
    1397         m_assembler.cmpl_im(right.m_value, left.m_ptr);
    1398         return Jump(m_assembler.jCC(cond));
    1399     }
    1400 
    1401     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1402     {
    1403         m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
    1404         dataLabel = DataLabelPtr(this);
    1405         return Jump(m_assembler.jCC(cond));
    1406     }
    1407 
    1408     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1409     {
    1410         m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
    1411         dataLabel = DataLabelPtr(this);
    1412         return Jump(m_assembler.jCC(cond));
    1413     }
    1414 
    1415     DataLabelPtr storePtrWithPatch(Address address)
    1416     {
    1417         m_assembler.movl_i32m(0, address.offset, address.base);
    1418         return DataLabelPtr(this);
    1419     }
    1420 };
    1421 
    1422 typedef MacroAssemblerX86 MacroAssemblerBase;
    1423 
    1424 #endif
    1425 
    1426 
    1427 class MacroAssembler : public MacroAssemblerBase {
    1428 public:
    1429 
    1430     using MacroAssemblerBase::pop;
    1431     using MacroAssemblerBase::jump;
    1432     using MacroAssemblerBase::branch32;
    1433     using MacroAssemblerBase::branch16;
    1434 #if PLATFORM(X86_64)
    1435     using MacroAssemblerBase::branchPtr;
    1436     using MacroAssemblerBase::branchTestPtr;
    1437 #endif
    1438 
    1439 
    1440     // Platform agnostic onvenience functions,
    1441     // described in terms of other macro assembly methods.
    1442     void pop()
    1443     {
    1444         addPtr(Imm32(sizeof(void*)), stackPointerRegister);
    1445     }
    1446    
    1447     void peek(RegisterID dest, int index = 0)
    1448     {
    1449         loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
    1450     }
    1451 
    1452     void poke(RegisterID src, int index = 0)
    1453     {
    1454         storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
    1455     }
    1456 
    1457     void poke(Imm32 value, int index = 0)
    1458     {
    1459         store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
    1460     }
    1461 
    1462     void poke(ImmPtr imm, int index = 0)
    1463     {
    1464         storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
    1465     }
    1466 
    1467 
    1468     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
    1469     void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
    1470     {
    1471         branchPtr(cond, op1, imm).linkTo(target, this);
    1472     }
    1473 
    1474     void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
    1475     {
    1476         branch32(cond, op1, op2).linkTo(target, this);
    1477     }
    1478 
    1479     void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
    1480     {
    1481         branch32(cond, op1, imm).linkTo(target, this);
    1482     }
    1483 
    1484     void branch32(Condition cond, RegisterID left, Address right, Label target)
    1485     {
    1486         branch32(cond, left, right).linkTo(target, this);
    1487     }
    1488 
    1489     void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
    1490     {
    1491         branch16(cond, left, right).linkTo(target, this);
    1492     }
    1493    
    1494     void branchTestPtr(Condition cond, RegisterID reg, Label target)
    1495     {
    1496         branchTestPtr(cond, reg).linkTo(target, this);
    1497     }
    1498 
    1499     void jump(Label target)
    1500     {
    1501         jump().linkTo(target, this);
    1502     }
    1503 
    1504 
    1505     // Ptr methods
    1506     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
    1507 #if !PLATFORM(X86_64)
    1508     void addPtr(RegisterID src, RegisterID dest)
    1509     {
    1510         add32(src, dest);
    1511     }
    1512 
    1513     void addPtr(Imm32 imm, RegisterID srcDest)
    1514     {
    1515         add32(imm, srcDest);
    1516     }
    1517 
    1518     void addPtr(ImmPtr imm, RegisterID dest)
    1519     {
    1520         add32(Imm32(imm), dest);
    1521     }
    1522 
    1523     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1524     {
    1525         add32(imm, src, dest);
    1526     }
    1527 
    1528     void andPtr(RegisterID src, RegisterID dest)
    1529     {
    1530         and32(src, dest);
    1531     }
    1532 
    1533     void andPtr(Imm32 imm, RegisterID srcDest)
    1534     {
    1535         and32(imm, srcDest);
    1536     }
    1537 
    1538     void orPtr(RegisterID src, RegisterID dest)
    1539     {
    1540         or32(src, dest);
    1541     }
    1542 
    1543     void orPtr(ImmPtr imm, RegisterID dest)
    1544     {
    1545         or32(Imm32(imm), dest);
    1546     }
    1547 
    1548     void orPtr(Imm32 imm, RegisterID dest)
    1549     {
    1550         or32(imm, dest);
    1551     }
    1552 
    1553     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1554     {
    1555         rshift32(shift_amount, dest);
    1556     }
    1557 
    1558     void rshiftPtr(Imm32 imm, RegisterID dest)
    1559     {
    1560         rshift32(imm, dest);
    1561     }
    1562 
    1563     void subPtr(RegisterID src, RegisterID dest)
    1564     {
    1565         sub32(src, dest);
    1566     }
    1567    
    1568     void subPtr(Imm32 imm, RegisterID dest)
    1569     {
    1570         sub32(imm, dest);
    1571     }
    1572    
    1573     void subPtr(ImmPtr imm, RegisterID dest)
    1574     {
    1575         sub32(Imm32(imm), dest);
    1576     }
    1577 
    1578     void xorPtr(RegisterID src, RegisterID dest)
    1579     {
    1580         xor32(src, dest);
    1581     }
    1582 
    1583     void xorPtr(Imm32 imm, RegisterID srcDest)
    1584     {
    1585         xor32(imm, srcDest);
    1586     }
    1587 
    1588 
    1589     void loadPtr(ImplicitAddress address, RegisterID dest)
    1590     {
    1591         load32(address, dest);
    1592     }
    1593 
    1594     void loadPtr(BaseIndex address, RegisterID dest)
    1595     {
    1596         load32(address, dest);
    1597     }
    1598 
    1599     void loadPtr(void* address, RegisterID dest)
    1600     {
    1601         load32(address, dest);
    1602     }
    1603 
    1604     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1605     {
    1606         return load32WithAddressOffsetPatch(address, dest);
    1607     }
    1608 
    1609     void storePtr(RegisterID src, ImplicitAddress address)
    1610     {
    1611         store32(src, address);
    1612     }
    1613 
    1614     void storePtr(RegisterID src, BaseIndex address)
    1615     {
    1616         store32(src, address);
    1617     }
    1618 
    1619     void storePtr(ImmPtr imm, ImplicitAddress address)
    1620     {
    1621         store32(Imm32(imm), address);
    1622     }
    1623 
    1624     void storePtr(ImmPtr imm, void* address)
    1625     {
    1626         store32(Imm32(imm), address);
    1627     }
    1628 
    1629     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1630     {
    1631         return store32WithAddressOffsetPatch(src, address);
    1632     }
    1633 
    1634 
    1635     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1636     {
    1637         return branch32(cond, left, right);
    1638     }
    1639 
    1640     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1641     {
    1642         return branch32(cond, left, Imm32(right));
    1643     }
    1644 
    1645     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1646     {
    1647         return branch32(cond, left, right);
    1648     }
    1649 
    1650     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1651     {
    1652         return branch32(cond, left, right);
    1653     }
    1654 
    1655     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1656     {
    1657         return branch32(cond, left, right);
    1658     }
    1659 
    1660     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1661     {
    1662         return branch32(cond, left, Imm32(right));
    1663     }
    1664 
    1665     Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
    1666     {
    1667         return branch32(cond, left, Imm32(right));
    1668     }
    1669 
    1670     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1671     {
    1672         return branchTest32(cond, reg, mask);
    1673     }
    1674 
    1675     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1676     {
    1677         return branchTest32(cond, reg, mask);
    1678     }
    1679 
    1680     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1681     {
    1682         return branchTest32(cond, address, mask);
    1683     }
    1684 
    1685     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1686     {
    1687         return branchTest32(cond, address, mask);
    1688     }
    1689 
    1690 
    1691     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1692     {
    1693         return branchAdd32(cond, src, dest);
    1694     }
    1695 
    1696     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1697     {
    1698         return branchSub32(cond, imm, dest);
    1699     }
    1700 #endif
    1701 
    1702 };
    1703 
    1704608} // namespace JSC
    1705609
    1706610#endif // ENABLE(ASSEMBLER)
    1707611
    1708 #endif // MacroAssembler_h
     612#endif // MacroAssemblerX86Common_h
  • trunk/JavaScriptCore/assembler/MacroAssemblerX86_64.h

    r40656 r40660  
    2424 */
    2525
    26 #ifndef MacroAssembler_h
    27 #define MacroAssembler_h
     26#ifndef MacroAssemblerX86_64_h
     27#define MacroAssemblerX86_64_h
    2828
    2929#include <wtf/Platform.h>
    3030
    31 #if ENABLE(ASSEMBLER)
    32 
    33 #include "X86Assembler.h"
     31#if ENABLE(ASSEMBLER) && PLATFORM(X86_64)
     32
     33#include "MacroAssemblerX86Common.h"
    3434
    3535namespace JSC {
    36 
    37 template <class AssemblerType>
    38 class AbstractMacroAssembler {
    39 protected:
    40     AssemblerType m_assembler;
    41 
    42 public:
    43     typedef typename AssemblerType::RegisterID RegisterID;
    44     typedef typename AssemblerType::JmpSrc JmpSrc;
    45     typedef typename AssemblerType::JmpDst JmpDst;
    46 
    47     enum Scale {
    48         TimesOne,
    49         TimesTwo,
    50         TimesFour,
    51         TimesEight,
    52     };
    53 
    54     // Address:
    55     //
    56     // Describes a simple base-offset address.
    57     struct Address {
    58         explicit Address(RegisterID base, int32_t offset = 0)
    59             : base(base)
    60             , offset(offset)
    61         {
    62         }
    63 
    64         RegisterID base;
    65         int32_t offset;
    66     };
    67 
    68     // ImplicitAddress:
    69     //
    70     // This class is used for explicit 'load' and 'store' operations
    71     // (as opposed to situations in which a memory operand is provided
    72     // to a generic operation, such as an integer arithmetic instruction).
    73     //
    74     // In the case of a load (or store) operation we want to permit
    75     // addresses to be implicitly constructed, e.g. the two calls:
    76     //
    77     //     load32(Address(addrReg), destReg);
    78     //     load32(addrReg, destReg);
    79     //
    80     // Are equivalent, and the explicit wrapping of the Address in the former
    81     // is unnecessary.
    82     struct ImplicitAddress {
    83         ImplicitAddress(RegisterID base)
    84             : base(base)
    85             , offset(0)
    86         {
    87         }
    88 
    89         ImplicitAddress(Address address)
    90             : base(address.base)
    91             , offset(address.offset)
    92         {
    93         }
    94 
    95         RegisterID base;
    96         int32_t offset;
    97     };
    98 
    99     // BaseIndex:
    100     //
    101     // Describes a complex addressing mode.
    102     struct BaseIndex {
    103         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
    104             : base(base)
    105             , index(index)
    106             , scale(scale)
    107             , offset(offset)
    108         {
    109         }
    110 
    111         RegisterID base;
    112         RegisterID index;
    113         Scale scale;
    114         int32_t offset;
    115     };
    116 
    117     // AbsoluteAddress:
    118     //
    119     // Describes an memory operand given by a pointer.  For regular load & store
    120     // operations an unwrapped void* will be used, rather than using this.
    121     struct AbsoluteAddress {
    122         explicit AbsoluteAddress(void* ptr)
    123             : m_ptr(ptr)
    124         {
    125         }
    126 
    127         void* m_ptr;
    128     };
    129 
    130 
    131     class Jump;
    132     class PatchBuffer;
    133 
    134     // DataLabelPtr:
    135     //
    136     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    137     // patched after the code has been generated.
    138     class DataLabelPtr {
    139         template<class AssemblerType_T>
    140         friend class AbstractMacroAssembler;
    141         friend class PatchBuffer;
    142 
    143     public:
    144         DataLabelPtr()
    145         {
    146         }
    147 
    148         DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
    149             : m_label(masm->m_assembler.label())
    150         {
    151         }
    152 
    153         static void patch(void* address, void* value)
    154         {
    155             AssemblerType::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
    156         }
    157        
    158     private:
    159         JmpDst m_label;
    160     };
    161 
    162     // DataLabel32:
    163     //
    164     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    165     // patched after the code has been generated.
    166     class DataLabel32 {
    167         template<class AssemblerType_T>
    168         friend class AbstractMacroAssembler;
    169         friend class PatchBuffer;
    170 
    171     public:
    172         DataLabel32()
    173         {
    174         }
    175 
    176         DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
    177             : m_label(masm->m_assembler.label())
    178         {
    179         }
    180 
    181         static void patch(void* address, int32_t value)
    182         {
    183             AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(address), value);
    184         }
    185 
    186     private:
    187         JmpDst m_label;
    188     };
    189 
    190     // Label:
    191     //
    192     // A Label records a point in the generated instruction stream, typically such that
    193     // it may be used as a destination for a jump.
    194     class Label {
    195         friend class Jump;
    196         template<class AssemblerType_T>
    197         friend class AbstractMacroAssembler;
    198         friend class PatchBuffer;
    199 
    200     public:
    201         Label()
    202         {
    203         }
    204 
    205         Label(AbstractMacroAssembler<AssemblerType>* masm)
    206             : m_label(masm->m_assembler.label())
    207         {
    208         }
    209        
    210     private:
    211         JmpDst m_label;
    212     };
    213 
    214 
    215     // Jump:
    216     //
    217     // A jump object is a reference to a jump instruction that has been planted
    218     // into the code buffer - it is typically used to link the jump, setting the
    219     // relative offset such that when executed it will jump to the desired
    220     // destination.
    221     //
    222     // Jump objects retain a pointer to the assembler for syntactic purposes -
    223     // to allow the jump object to be able to link itself, e.g.:
    224     //
    225     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
    226     //     // ...
    227     //     forwardsBranch.link();
    228     //
    229     // Jumps may also be linked to a Label.
    230     class Jump {
    231         friend class PatchBuffer;
    232         template<class AssemblerType_T>
    233         friend class AbstractMacroAssembler;
    234 
    235     public:
    236         Jump()
    237         {
    238         }
    239        
    240         Jump(JmpSrc jmp)
    241             : m_jmp(jmp)
    242         {
    243         }
    244        
    245         void link(AbstractMacroAssembler<AssemblerType>* masm)
    246         {
    247             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
    248         }
    249        
    250         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    251         {
    252             masm->m_assembler.link(m_jmp, label.m_label);
    253         }
    254        
    255         static void patch(void* address, void* destination)
    256         {
    257             AssemblerType::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
    258         }
    259 
    260     private:
    261         JmpSrc m_jmp;
    262     };
    263 
    264     // JumpList:
    265     //
    266     // A JumpList is a set of Jump objects.
    267     // All jumps in the set will be linked to the same destination.
    268     class JumpList {
    269         friend class PatchBuffer;
    270 
    271     public:
    272         void link(AbstractMacroAssembler<AssemblerType>* masm)
    273         {
    274             size_t size = m_jumps.size();
    275             for (size_t i = 0; i < size; ++i)
    276                 m_jumps[i].link(masm);
    277             m_jumps.clear();
    278         }
    279        
    280         void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
    281         {
    282             size_t size = m_jumps.size();
    283             for (size_t i = 0; i < size; ++i)
    284                 m_jumps[i].linkTo(label, masm);
    285             m_jumps.clear();
    286         }
    287        
    288         void append(Jump jump)
    289         {
    290             m_jumps.append(jump);
    291         }
    292        
    293         void append(JumpList& other)
    294         {
    295             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
    296         }
    297 
    298         bool empty()
    299         {
    300             return !m_jumps.size();
    301         }
    302 
    303     private:
    304         Vector<Jump, 16> m_jumps;
    305     };
    306 
    307 
    308     // PatchBuffer:
    309     //
    310     // This class assists in linking code generated by the macro assembler, once code generation
    311     // has been completed, and the code has been copied to is final location in memory.  At this
    312     // time pointers to labels within the code may be resolved, and relative offsets to external
    313     // addresses may be fixed.
    314     //
    315     // Specifically:
    316     //   * Jump objects may be linked to external targets,
    317     //   * The address of Jump objects may taken, such that it can later be relinked.
    318     //   * The return address of a Jump object representing a call may be acquired.
    319     //   * The address of a Label pointing into the code may be resolved.
    320     //   * The value referenced by a DataLabel may be fixed.
    321     //
    322     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
    323     // address of calls, as opposed to a point that can be used to later relink a Jump -
    324     // possibly wrap the later up in an object that can do just that).
    325     class PatchBuffer {
    326     public:
    327         PatchBuffer(void* code)
    328             : m_code(code)
    329         {
    330         }
    331 
    332         void link(Jump jump, void* target)
    333         {
    334             AssemblerType::link(m_code, jump.m_jmp, target);
    335         }
    336 
    337         void link(JumpList list, void* target)
    338         {
    339             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
    340                 AssemblerType::link(m_code, list.m_jumps[i].m_jmp, target);
    341         }
    342 
    343         void* addressOf(Jump jump)
    344         {
    345             return AssemblerType::getRelocatedAddress(m_code, jump.m_jmp);
    346         }
    347 
    348         void* addressOf(Label label)
    349         {
    350             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    351         }
    352 
    353         void* addressOf(DataLabelPtr label)
    354         {
    355             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    356         }
    357 
    358         void* addressOf(DataLabel32 label)
    359         {
    360             return AssemblerType::getRelocatedAddress(m_code, label.m_label);
    361         }
    362 
    363         void setPtr(DataLabelPtr label, void* value)
    364         {
    365             AssemblerType::patchAddress(m_code, label.m_label, value);
    366         }
    367 
    368     private:
    369         void* m_code;
    370     };
    371  
    372 
    373     // ImmPtr:
    374     //
    375     // A pointer sized immediate operand to an instruction - this is wrapped
    376     // in a class requiring explicit construction in order to differentiate
    377     // from pointers used as absolute addresses to memory operations
    378     struct ImmPtr {
    379         explicit ImmPtr(void* value)
    380             : m_value(value)
    381         {
    382         }
    383 
    384         intptr_t asIntptr()
    385         {
    386             return reinterpret_cast<intptr_t>(m_value);
    387         }
    388 
    389         void* m_value;
    390     };
    391 
    392     // Imm32:
    393     //
    394     // A 32bit immediate operand to an instruction - this is wrapped in a
    395     // class requiring explicit construction in order to prevent RegisterIDs
    396     // (which are implemented as an enum) from accidentally being passed as
    397     // immediate values.
    398     struct Imm32 {
    399         explicit Imm32(int32_t value)
    400             : m_value(value)
    401         {
    402         }
    403 
    404 #if !PLATFORM(X86_64)
    405         explicit Imm32(ImmPtr ptr)
    406             : m_value(ptr.asIntptr())
    407         {
    408         }
    409 #endif
    410 
    411         int32_t m_value;
    412     };
    413 
    414     size_t size()
    415     {
    416         return m_assembler.size();
    417     }
    418 
    419     void* copyCode(ExecutablePool* allocator)
    420     {
    421         return m_assembler.executableCopy(allocator);
    422     }
    423 
    424     Label label()
    425     {
    426         return Label(this);
    427     }
    428    
    429     Label align()
    430     {
    431         m_assembler.align(16);
    432         return Label(this);
    433     }
    434 
    435     ptrdiff_t differenceBetween(Label from, Jump to)
    436     {
    437         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    438     }
    439 
    440     ptrdiff_t differenceBetween(Label from, Label to)
    441     {
    442         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    443     }
    444 
    445     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
    446     {
    447         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    448     }
    449 
    450     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
    451     {
    452         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
    453     }
    454 
    455     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
    456     {
    457         return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    458     }
    459 
    460 };
    461 
    462 class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
    463 public:
    464 
    465     typedef X86Assembler::Condition Condition;
    466     static const Condition Equal = X86Assembler::ConditionE;
    467     static const Condition NotEqual = X86Assembler::ConditionNE;
    468     static const Condition Above = X86Assembler::ConditionA;
    469     static const Condition AboveOrEqual = X86Assembler::ConditionAE;
    470     static const Condition Below = X86Assembler::ConditionB;
    471     static const Condition BelowOrEqual = X86Assembler::ConditionBE;
    472     static const Condition GreaterThan = X86Assembler::ConditionG;
    473     static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE;
    474     static const Condition LessThan = X86Assembler::ConditionL;
    475     static const Condition LessThanOrEqual = X86Assembler::ConditionLE;
    476     static const Condition Overflow = X86Assembler::ConditionO;
    477     static const Condition Zero = X86Assembler::ConditionE;
    478     static const Condition NonZero = X86Assembler::ConditionNE;
    479 
    480     static const RegisterID stackPointerRegister = X86::esp;
    481 
    482     // Integer arithmetic operations:
    483     //
    484     // Operations are typically two operand - operation(source, srcDst)
    485     // For many operations the source may be an Imm32, the srcDst operand
    486     // may often be a memory location (explictly described using an Address
    487     // object).
    488 
    489     void add32(RegisterID src, RegisterID dest)
    490     {
    491         m_assembler.addl_rr(src, dest);
    492     }
    493 
    494     void add32(Imm32 imm, Address address)
    495     {
    496         m_assembler.addl_im(imm.m_value, address.offset, address.base);
    497     }
    498 
    499     void add32(Imm32 imm, RegisterID dest)
    500     {
    501         m_assembler.addl_ir(imm.m_value, dest);
    502     }
    503    
    504     void add32(Address src, RegisterID dest)
    505     {
    506         m_assembler.addl_mr(src.offset, src.base, dest);
    507     }
    508    
    509     void and32(RegisterID src, RegisterID dest)
    510     {
    511         m_assembler.andl_rr(src, dest);
    512     }
    513 
    514     void and32(Imm32 imm, RegisterID dest)
    515     {
    516         m_assembler.andl_ir(imm.m_value, dest);
    517     }
    518 
    519     void lshift32(Imm32 imm, RegisterID dest)
    520     {
    521         m_assembler.shll_i8r(imm.m_value, dest);
    522     }
    523    
    524     void lshift32(RegisterID shift_amount, RegisterID dest)
    525     {
    526         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    527         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    528         if (shift_amount != X86::ecx) {
    529             swap(shift_amount, X86::ecx);
    530 
    531             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    532             if (dest == shift_amount)
    533                 m_assembler.shll_CLr(X86::ecx);
    534             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    535             else if (dest == X86::ecx)
    536                 m_assembler.shll_CLr(shift_amount);
    537             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    538             else
    539                 m_assembler.shll_CLr(dest);
    540        
    541             swap(shift_amount, X86::ecx);
    542         } else
    543             m_assembler.shll_CLr(dest);
    544     }
    545    
    546     // Take the value from dividend, divide it by divisor, and put the remainder in remainder.
    547     // For now, this operation has specific register requirements, and the three register must
    548     // be unique.  It is unfortunate to expose this in the MacroAssembler interface, however
    549     // given the complexity to fix, the fact that it is not uncommmon  for processors to have
    550     // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not
    551     // support a hardware divide at all, it may not be
    552     void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder)
    553     {
    554 #ifdef NDEBUG
    555 #pragma unused(dividend,remainder)
    556 #else
    557         ASSERT((dividend == X86::eax) && (remainder == X86::edx));
    558         ASSERT((dividend != divisor) && (remainder != divisor));
    559 #endif
    560 
    561         m_assembler.cdq();
    562         m_assembler.idivl_r(divisor);
    563     }
    564 
    565     void mul32(RegisterID src, RegisterID dest)
    566     {
    567         m_assembler.imull_rr(src, dest);
    568     }
    569    
    570     void mul32(Imm32 imm, RegisterID src, RegisterID dest)
    571     {
    572         m_assembler.imull_i32r(src, imm.m_value, dest);
    573     }
    574    
    575     void not32(RegisterID srcDest)
    576     {
    577         m_assembler.notl_r(srcDest);
    578     }
    579    
    580     void or32(RegisterID src, RegisterID dest)
    581     {
    582         m_assembler.orl_rr(src, dest);
    583     }
    584 
    585     void or32(Imm32 imm, RegisterID dest)
    586     {
    587         m_assembler.orl_ir(imm.m_value, dest);
    588     }
    589 
    590     void rshift32(RegisterID shift_amount, RegisterID dest)
    591     {
    592         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    593         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    594         if (shift_amount != X86::ecx) {
    595             swap(shift_amount, X86::ecx);
    596 
    597             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    598             if (dest == shift_amount)
    599                 m_assembler.sarl_CLr(X86::ecx);
    600             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    601             else if (dest == X86::ecx)
    602                 m_assembler.sarl_CLr(shift_amount);
    603             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    604             else
    605                 m_assembler.sarl_CLr(dest);
    606        
    607             swap(shift_amount, X86::ecx);
    608         } else
    609             m_assembler.sarl_CLr(dest);
    610     }
    611 
    612     void rshift32(Imm32 imm, RegisterID dest)
    613     {
    614         m_assembler.sarl_i8r(imm.m_value, dest);
    615     }
    616 
    617     void sub32(RegisterID src, RegisterID dest)
    618     {
    619         m_assembler.subl_rr(src, dest);
    620     }
    621    
    622     void sub32(Imm32 imm, RegisterID dest)
    623     {
    624         m_assembler.subl_ir(imm.m_value, dest);
    625     }
    626    
    627     void sub32(Imm32 imm, Address address)
    628     {
    629         m_assembler.subl_im(imm.m_value, address.offset, address.base);
    630     }
    631 
    632     void sub32(Address src, RegisterID dest)
    633     {
    634         m_assembler.subl_mr(src.offset, src.base, dest);
    635     }
    636 
    637     void xor32(RegisterID src, RegisterID dest)
    638     {
    639         m_assembler.xorl_rr(src, dest);
    640     }
    641 
    642     void xor32(Imm32 imm, RegisterID srcDest)
    643     {
    644         m_assembler.xorl_ir(imm.m_value, srcDest);
    645     }
    646    
    647 
    648     // Memory access operations:
    649     //
    650     // Loads are of the form load(address, destination) and stores of the form
    651     // store(source, address).  The source for a store may be an Imm32.  Address
    652     // operand objects to loads and store will be implicitly constructed if a
    653     // register is passed.
    654 
    655     void load32(ImplicitAddress address, RegisterID dest)
    656     {
    657         m_assembler.movl_mr(address.offset, address.base, dest);
    658     }
    659 
    660     void load32(BaseIndex address, RegisterID dest)
    661     {
    662         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
    663     }
    664 
    665     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
    666     {
    667         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
    668         return DataLabel32(this);
    669     }
    670 
    671     void load16(BaseIndex address, RegisterID dest)
    672     {
    673         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
    674     }
    675 
    676     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
    677     {
    678         m_assembler.movl_rm_disp32(src, address.offset, address.base);
    679         return DataLabel32(this);
    680     }
    681 
    682     void store32(RegisterID src, ImplicitAddress address)
    683     {
    684         m_assembler.movl_rm(src, address.offset, address.base);
    685     }
    686 
    687     void store32(RegisterID src, BaseIndex address)
    688     {
    689         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
    690     }
    691 
    692     void store32(Imm32 imm, ImplicitAddress address)
    693     {
    694         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
    695     }
    696    
    697 
    698     // Stack manipulation operations:
    699     //
    700     // The ABI is assumed to provide a stack abstraction to memory,
    701     // containing machine word sized units of data.  Push and pop
    702     // operations add and remove a single register sized unit of data
    703     // to or from the stack.  Peek and poke operations read or write
    704     // values on the stack, without moving the current stack position.
    705    
    706     void pop(RegisterID dest)
    707     {
    708         m_assembler.pop_r(dest);
    709     }
    710 
    711     void push(RegisterID src)
    712     {
    713         m_assembler.push_r(src);
    714     }
    715 
    716     void push(Address address)
    717     {
    718         m_assembler.push_m(address.offset, address.base);
    719     }
    720 
    721     void push(Imm32 imm)
    722     {
    723         m_assembler.push_i32(imm.m_value);
    724     }
    725 
    726     // Register move operations:
    727     //
    728     // Move values in registers.
    729 
    730     void move(Imm32 imm, RegisterID dest)
    731     {
    732         // Note: on 64-bit the Imm32 value is zero extended into the register, it
    733         // may be useful to have a separate version that sign extends the value?
    734         if (!imm.m_value)
    735             m_assembler.xorl_rr(dest, dest);
    736         else
    737             m_assembler.movl_i32r(imm.m_value, dest);
    738     }
    739 
    740 #if PLATFORM(X86_64)
    741     void move(RegisterID src, RegisterID dest)
    742     {
    743         // Note: on 64-bit this is is a full register move; perhaps it would be
    744         // useful to have separate move32 & movePtr, with move32 zero extending?
    745         m_assembler.movq_rr(src, dest);
    746     }
    747 
    748     void move(ImmPtr imm, RegisterID dest)
    749     {
    750         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
    751             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
    752         else
    753             m_assembler.movq_i64r(imm.asIntptr(), dest);
    754     }
    755 
    756     void swap(RegisterID reg1, RegisterID reg2)
    757     {
    758         m_assembler.xchgq_rr(reg1, reg2);
    759     }
    760 
    761     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    762     {
    763         m_assembler.movsxd_rr(src, dest);
    764     }
    765 
    766     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    767     {
    768         m_assembler.movl_rr(src, dest);
    769     }
    770 #else
    771     void move(RegisterID src, RegisterID dest)
    772     {
    773         m_assembler.movl_rr(src, dest);
    774     }
    775 
    776     void move(ImmPtr imm, RegisterID dest)
    777     {
    778         m_assembler.movl_i32r(imm.asIntptr(), dest);
    779     }
    780 
    781     void swap(RegisterID reg1, RegisterID reg2)
    782     {
    783         m_assembler.xchgl_rr(reg1, reg2);
    784     }
    785 
    786     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    787     {
    788         if (src != dest)
    789             move(src, dest);
    790     }
    791 
    792     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    793     {
    794         if (src != dest)
    795             move(src, dest);
    796     }
    797 #endif
    798 
    799 
    800     // Forwards / external control flow operations:
    801     //
    802     // This set of jump and conditional branch operations return a Jump
    803     // object which may linked at a later point, allow forwards jump,
    804     // or jumps that will require external linkage (after the code has been
    805     // relocated).
    806     //
    807     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
    808     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
    809     // used (representing the names 'below' and 'above').
    810     //
    811     // Operands to the comparision are provided in the expected order, e.g.
    812     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
    813     // treated as a signed 32bit value, is less than or equal to 5.
    814     //
    815     // jz and jnz test whether the first operand is equal to zero, and take
    816     // an optional second operand of a mask under which to perform the test.
    817 
    818 public:
    819     Jump branch32(Condition cond, RegisterID left, RegisterID right)
    820     {
    821         m_assembler.cmpl_rr(right, left);
    822         return Jump(m_assembler.jCC(cond));
    823     }
    824 
    825     Jump branch32(Condition cond, RegisterID left, Imm32 right)
    826     {
    827         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    828             m_assembler.testl_rr(left, left);
    829         else
    830             m_assembler.cmpl_ir(right.m_value, left);
    831         return Jump(m_assembler.jCC(cond));
    832     }
    833    
    834     Jump branch32(Condition cond, RegisterID left, Address right)
    835     {
    836         m_assembler.cmpl_mr(right.offset, right.base, left);
    837         return Jump(m_assembler.jCC(cond));
    838     }
    839    
    840     Jump branch32(Condition cond, Address left, RegisterID right)
    841     {
    842         m_assembler.cmpl_rm(right, left.offset, left.base);
    843         return Jump(m_assembler.jCC(cond));
    844     }
    845 
    846     Jump branch32(Condition cond, Address left, Imm32 right)
    847     {
    848         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
    849         return Jump(m_assembler.jCC(cond));
    850     }
    851 
    852     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    853     {
    854         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
    855         return Jump(m_assembler.jCC(cond));
    856     }
    857 
    858     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    859     {
    860         ASSERT((cond == Zero) || (cond == NonZero));
    861         m_assembler.testl_rr(reg, mask);
    862         return Jump(m_assembler.jCC(cond));
    863     }
    864 
    865     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    866     {
    867         ASSERT((cond == Zero) || (cond == NonZero));
    868         // if we are only interested in the low seven bits, this can be tested with a testb
    869         if (mask.m_value == -1)
    870             m_assembler.testl_rr(reg, reg);
    871         else if ((mask.m_value & ~0x7f) == 0)
    872             m_assembler.testb_i8r(mask.m_value, reg);
    873         else
    874             m_assembler.testl_i32r(mask.m_value, reg);
    875         return Jump(m_assembler.jCC(cond));
    876     }
    877 
    878     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    879     {
    880         ASSERT((cond == Zero) || (cond == NonZero));
    881         if (mask.m_value == -1)
    882             m_assembler.cmpl_im(0, address.offset, address.base);
    883         else
    884             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    885         return Jump(m_assembler.jCC(cond));
    886     }
    887 
    888     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    889     {
    890         ASSERT((cond == Zero) || (cond == NonZero));
    891         if (mask.m_value == -1)
    892             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
    893         else
    894             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    895         return Jump(m_assembler.jCC(cond));
    896     }
    897 
    898     Jump jump()
    899     {
    900         return Jump(m_assembler.jmp());
    901     }
    902 
    903     void jump(RegisterID target)
    904     {
    905         m_assembler.jmp_r(target);
    906     }
    907 
    908     // Address is a memory location containing the address to jump to
    909     void jump(Address address)
    910     {
    911         m_assembler.jmp_m(address.offset, address.base);
    912     }
    913 
    914 
    915     // Arithmetic control flow operations:
    916     //
    917     // This set of conditional branch operations branch based
    918     // on the result of an arithmetic operation.  The operation
    919     // is performed as normal, storing the result.
    920     //
    921     // * jz operations branch if the result is zero.
    922     // * jo operations branch if the (signed) arithmetic
    923     //   operation caused an overflow to occur.
    924    
    925     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    926     {
    927         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    928         add32(src, dest);
    929         return Jump(m_assembler.jCC(cond));
    930     }
    931    
    932     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    933     {
    934         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    935         add32(imm, dest);
    936         return Jump(m_assembler.jCC(cond));
    937     }
    938    
    939     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    940     {
    941         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    942         mul32(src, dest);
    943         return Jump(m_assembler.jCC(cond));
    944     }
    945    
    946     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    947     {
    948         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    949         mul32(imm, src, dest);
    950         return Jump(m_assembler.jCC(cond));
    951     }
    952    
    953     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    954     {
    955         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    956         sub32(src, dest);
    957         return Jump(m_assembler.jCC(cond));
    958     }
    959    
    960     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    961     {
    962         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    963         sub32(imm, dest);
    964         return Jump(m_assembler.jCC(cond));
    965     }
    966    
    967 
    968     // Miscellaneous operations:
    969 
    970     void breakpoint()
    971     {
    972         m_assembler.int3();
    973     }
    974 
    975     Jump call()
    976     {
    977         return Jump(m_assembler.call());
    978     }
    979 
    980     // FIXME: why does this return a Jump object? - it can't be linked.
    981     // This may be to get a reference to the return address of the call.
    982     //
    983     // This should probably be handled by a separate label type to a regular
    984     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
    985     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
    986     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
    987     // form of label, can get the void* after the code has been linked, but can't
    988     // try to link it like a Jump object), and let the CallLabel be cast into a
    989     // CallReturnLabel.
    990     Jump call(RegisterID target)
    991     {
    992         return Jump(m_assembler.call(target));
    993     }
    994 
    995     void ret()
    996     {
    997         m_assembler.ret();
    998     }
    999 
    1000     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    1001     {
    1002         m_assembler.cmpl_rr(right, left);
    1003         m_assembler.setCC_r(cond, dest);
    1004         m_assembler.movzbl_rr(dest, dest);
    1005     }
    1006 
    1007     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    1008     {
    1009         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1010             m_assembler.testl_rr(left, left);
    1011         else
    1012             m_assembler.cmpl_ir(right.m_value, left);
    1013         m_assembler.setCC_r(cond, dest);
    1014         m_assembler.movzbl_rr(dest, dest);
    1015     }
    1016 
    1017     // FIXME:
    1018     // The mask should be optional... paerhaps the argument order should be
    1019     // dest-src, operations always have a dest? ... possibly not true, considering
    1020     // asm ops like test, or pseudo ops like pop().
    1021     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    1022     {
    1023         if (mask.m_value == -1)
    1024             m_assembler.cmpl_im(0, address.offset, address.base);
    1025         else
    1026             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1027         m_assembler.setCC_r(cond, dest);
    1028         m_assembler.movzbl_rr(dest, dest);
    1029     }
    1030 };
    1031 
    1032 
    1033 #if PLATFORM(X86_64)
    103436
    103537class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
     
    1348350};
    1349351
    1350 typedef MacroAssemblerX86_64 MacroAssemblerBase;
    1351 
    1352 #else
    1353 
    1354 class MacroAssemblerX86 : public MacroAssemblerX86Common {
    1355 public:
    1356     static const Scale ScalePtr = TimesFour;
    1357 
    1358     using MacroAssemblerX86Common::add32;
    1359     using MacroAssemblerX86Common::sub32;
    1360     using MacroAssemblerX86Common::load32;
    1361     using MacroAssemblerX86Common::store32;
    1362     using MacroAssemblerX86Common::branch32;
    1363 
    1364     void add32(Imm32 imm, RegisterID src, RegisterID dest)
    1365     {
    1366         m_assembler.leal_mr(imm.m_value, src, dest);
    1367     }
    1368 
    1369     void add32(Imm32 imm, AbsoluteAddress address)
    1370     {
    1371         m_assembler.addl_im(imm.m_value, address.m_ptr);
    1372     }
    1373    
    1374     void sub32(Imm32 imm, AbsoluteAddress address)
    1375     {
    1376         m_assembler.subl_im(imm.m_value, address.m_ptr);
    1377     }
    1378 
    1379     void load32(void* address, RegisterID dest)
    1380     {
    1381         m_assembler.movl_mr(address, dest);
    1382     }
    1383 
    1384     void store32(Imm32 imm, void* address)
    1385     {
    1386         m_assembler.movl_i32m(imm.m_value, address);
    1387     }
    1388 
    1389     Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
    1390     {
    1391         m_assembler.cmpl_rm(right, left.m_ptr);
    1392         return Jump(m_assembler.jCC(cond));
    1393     }
    1394 
    1395     Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
    1396     {
    1397         m_assembler.cmpl_im(right.m_value, left.m_ptr);
    1398         return Jump(m_assembler.jCC(cond));
    1399     }
    1400 
    1401     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1402     {
    1403         m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
    1404         dataLabel = DataLabelPtr(this);
    1405         return Jump(m_assembler.jCC(cond));
    1406     }
    1407 
    1408     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1409     {
    1410         m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
    1411         dataLabel = DataLabelPtr(this);
    1412         return Jump(m_assembler.jCC(cond));
    1413     }
    1414 
    1415     DataLabelPtr storePtrWithPatch(Address address)
    1416     {
    1417         m_assembler.movl_i32m(0, address.offset, address.base);
    1418         return DataLabelPtr(this);
    1419     }
    1420 };
    1421 
    1422 typedef MacroAssemblerX86 MacroAssemblerBase;
    1423 
    1424 #endif
    1425 
    1426 
    1427 class MacroAssembler : public MacroAssemblerBase {
    1428 public:
    1429 
    1430     using MacroAssemblerBase::pop;
    1431     using MacroAssemblerBase::jump;
    1432     using MacroAssemblerBase::branch32;
    1433     using MacroAssemblerBase::branch16;
    1434 #if PLATFORM(X86_64)
    1435     using MacroAssemblerBase::branchPtr;
    1436     using MacroAssemblerBase::branchTestPtr;
    1437 #endif
    1438 
    1439 
    1440     // Platform agnostic onvenience functions,
    1441     // described in terms of other macro assembly methods.
    1442     void pop()
    1443     {
    1444         addPtr(Imm32(sizeof(void*)), stackPointerRegister);
    1445     }
    1446    
    1447     void peek(RegisterID dest, int index = 0)
    1448     {
    1449         loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
    1450     }
    1451 
    1452     void poke(RegisterID src, int index = 0)
    1453     {
    1454         storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
    1455     }
    1456 
    1457     void poke(Imm32 value, int index = 0)
    1458     {
    1459         store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
    1460     }
    1461 
    1462     void poke(ImmPtr imm, int index = 0)
    1463     {
    1464         storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
    1465     }
    1466 
    1467 
    1468     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
    1469     void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
    1470     {
    1471         branchPtr(cond, op1, imm).linkTo(target, this);
    1472     }
    1473 
    1474     void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
    1475     {
    1476         branch32(cond, op1, op2).linkTo(target, this);
    1477     }
    1478 
    1479     void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
    1480     {
    1481         branch32(cond, op1, imm).linkTo(target, this);
    1482     }
    1483 
    1484     void branch32(Condition cond, RegisterID left, Address right, Label target)
    1485     {
    1486         branch32(cond, left, right).linkTo(target, this);
    1487     }
    1488 
    1489     void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
    1490     {
    1491         branch16(cond, left, right).linkTo(target, this);
    1492     }
    1493    
    1494     void branchTestPtr(Condition cond, RegisterID reg, Label target)
    1495     {
    1496         branchTestPtr(cond, reg).linkTo(target, this);
    1497     }
    1498 
    1499     void jump(Label target)
    1500     {
    1501         jump().linkTo(target, this);
    1502     }
    1503 
    1504 
    1505     // Ptr methods
    1506     // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
    1507 #if !PLATFORM(X86_64)
    1508     void addPtr(RegisterID src, RegisterID dest)
    1509     {
    1510         add32(src, dest);
    1511     }
    1512 
    1513     void addPtr(Imm32 imm, RegisterID srcDest)
    1514     {
    1515         add32(imm, srcDest);
    1516     }
    1517 
    1518     void addPtr(ImmPtr imm, RegisterID dest)
    1519     {
    1520         add32(Imm32(imm), dest);
    1521     }
    1522 
    1523     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    1524     {
    1525         add32(imm, src, dest);
    1526     }
    1527 
    1528     void andPtr(RegisterID src, RegisterID dest)
    1529     {
    1530         and32(src, dest);
    1531     }
    1532 
    1533     void andPtr(Imm32 imm, RegisterID srcDest)
    1534     {
    1535         and32(imm, srcDest);
    1536     }
    1537 
    1538     void orPtr(RegisterID src, RegisterID dest)
    1539     {
    1540         or32(src, dest);
    1541     }
    1542 
    1543     void orPtr(ImmPtr imm, RegisterID dest)
    1544     {
    1545         or32(Imm32(imm), dest);
    1546     }
    1547 
    1548     void orPtr(Imm32 imm, RegisterID dest)
    1549     {
    1550         or32(imm, dest);
    1551     }
    1552 
    1553     void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    1554     {
    1555         rshift32(shift_amount, dest);
    1556     }
    1557 
    1558     void rshiftPtr(Imm32 imm, RegisterID dest)
    1559     {
    1560         rshift32(imm, dest);
    1561     }
    1562 
    1563     void subPtr(RegisterID src, RegisterID dest)
    1564     {
    1565         sub32(src, dest);
    1566     }
    1567    
    1568     void subPtr(Imm32 imm, RegisterID dest)
    1569     {
    1570         sub32(imm, dest);
    1571     }
    1572    
    1573     void subPtr(ImmPtr imm, RegisterID dest)
    1574     {
    1575         sub32(Imm32(imm), dest);
    1576     }
    1577 
    1578     void xorPtr(RegisterID src, RegisterID dest)
    1579     {
    1580         xor32(src, dest);
    1581     }
    1582 
    1583     void xorPtr(Imm32 imm, RegisterID srcDest)
    1584     {
    1585         xor32(imm, srcDest);
    1586     }
    1587 
    1588 
    1589     void loadPtr(ImplicitAddress address, RegisterID dest)
    1590     {
    1591         load32(address, dest);
    1592     }
    1593 
    1594     void loadPtr(BaseIndex address, RegisterID dest)
    1595     {
    1596         load32(address, dest);
    1597     }
    1598 
    1599     void loadPtr(void* address, RegisterID dest)
    1600     {
    1601         load32(address, dest);
    1602     }
    1603 
    1604     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    1605     {
    1606         return load32WithAddressOffsetPatch(address, dest);
    1607     }
    1608 
    1609     void storePtr(RegisterID src, ImplicitAddress address)
    1610     {
    1611         store32(src, address);
    1612     }
    1613 
    1614     void storePtr(RegisterID src, BaseIndex address)
    1615     {
    1616         store32(src, address);
    1617     }
    1618 
    1619     void storePtr(ImmPtr imm, ImplicitAddress address)
    1620     {
    1621         store32(Imm32(imm), address);
    1622     }
    1623 
    1624     void storePtr(ImmPtr imm, void* address)
    1625     {
    1626         store32(Imm32(imm), address);
    1627     }
    1628 
    1629     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    1630     {
    1631         return store32WithAddressOffsetPatch(src, address);
    1632     }
    1633 
    1634 
    1635     Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    1636     {
    1637         return branch32(cond, left, right);
    1638     }
    1639 
    1640     Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    1641     {
    1642         return branch32(cond, left, Imm32(right));
    1643     }
    1644 
    1645     Jump branchPtr(Condition cond, RegisterID left, Address right)
    1646     {
    1647         return branch32(cond, left, right);
    1648     }
    1649 
    1650     Jump branchPtr(Condition cond, Address left, RegisterID right)
    1651     {
    1652         return branch32(cond, left, right);
    1653     }
    1654 
    1655     Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    1656     {
    1657         return branch32(cond, left, right);
    1658     }
    1659 
    1660     Jump branchPtr(Condition cond, Address left, ImmPtr right)
    1661     {
    1662         return branch32(cond, left, Imm32(right));
    1663     }
    1664 
    1665     Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
    1666     {
    1667         return branch32(cond, left, Imm32(right));
    1668     }
    1669 
    1670     Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    1671     {
    1672         return branchTest32(cond, reg, mask);
    1673     }
    1674 
    1675     Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1676     {
    1677         return branchTest32(cond, reg, mask);
    1678     }
    1679 
    1680     Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1681     {
    1682         return branchTest32(cond, address, mask);
    1683     }
    1684 
    1685     Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1686     {
    1687         return branchTest32(cond, address, mask);
    1688     }
    1689 
    1690 
    1691     Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
    1692     {
    1693         return branchAdd32(cond, src, dest);
    1694     }
    1695 
    1696     Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    1697     {
    1698         return branchSub32(cond, imm, dest);
    1699     }
    1700 #endif
    1701 
    1702 };
    1703 
    1704352} // namespace JSC
    1705353
    1706354#endif // ENABLE(ASSEMBLER)
    1707355
    1708 #endif // MacroAssembler_h
     356#endif // MacroAssemblerX86_64_h
Note: See TracChangeset for help on using the changeset viewer.