Ignore:
Timestamp:
Feb 4, 2009, 6:01:25 PM (16 years ago)
Author:
[email protected]
Message:

2009-02-04 Gavin Barraclough <[email protected]>

Reviewed by Sam Weinig.

This patch tidies up the MacroAssembler, cleaning up the code and refactoring out the
platform-specific parts. The MacroAssembler gets split up like a beef burger, with the
platform-agnostic data types being the lower bun (in the form of the class AbstractMacroAssembler),
the plaform-specific code generation forming a big meaty patty of methods like 'add32',
'branch32', etc (MacroAssemblerX86), and finally topped off with the bun-lid of the
MacroAssembler class itself, providing covenience methods such as the stack peek & poke,
and backwards branch methods, all of which can be described in a platform independent
way using methods from the base class. The AbstractMacroAssembler is templated on the
type of the assembler class that will be used for code generation, and the three layers
are held together with the cocktail stick of inheritance.

The above description is a slight simplification since the MacroAssemblerX86 is actually
formed from two layers (in effect giving us a kind on bacon double cheeseburger) - with the
bulk of methods that are common between x86 & x86-64 implemented in MacroAssemblerX86Common,
which forms a base class for MacroAssemblerX86 and MacroAssemblerX86_64 (which add the methods
specific to the given platform).

I'm landing these changes first without splitting the classes across multiple files,
I will follow up with a second patch to split up the file MacroAssembler.h.

  • assembler/MacroAssembler.h: (JSC::AbstractMacroAssembler::): (JSC::AbstractMacroAssembler::DataLabelPtr::DataLabelPtr): (JSC::AbstractMacroAssembler::DataLabelPtr::patch): (JSC::AbstractMacroAssembler::DataLabel32::DataLabel32): (JSC::AbstractMacroAssembler::DataLabel32::patch): (JSC::AbstractMacroAssembler::Label::Label): (JSC::AbstractMacroAssembler::Jump::Jump): (JSC::AbstractMacroAssembler::Jump::link): (JSC::AbstractMacroAssembler::Jump::linkTo): (JSC::AbstractMacroAssembler::Jump::patch): (JSC::AbstractMacroAssembler::JumpList::link): (JSC::AbstractMacroAssembler::JumpList::linkTo): (JSC::AbstractMacroAssembler::PatchBuffer::link): (JSC::AbstractMacroAssembler::PatchBuffer::addressOf): (JSC::AbstractMacroAssembler::PatchBuffer::setPtr): (JSC::AbstractMacroAssembler::size): (JSC::AbstractMacroAssembler::copyCode): (JSC::AbstractMacroAssembler::label): (JSC::AbstractMacroAssembler::align): (JSC::AbstractMacroAssembler::differenceBetween): (JSC::MacroAssemblerX86Common::xor32): (JSC::MacroAssemblerX86Common::load32WithAddressOffsetPatch): (JSC::MacroAssemblerX86Common::store32WithAddressOffsetPatch): (JSC::MacroAssemblerX86Common::move): (JSC::MacroAssemblerX86Common::swap): (JSC::MacroAssemblerX86Common::signExtend32ToPtr): (JSC::MacroAssemblerX86Common::zeroExtend32ToPtr): (JSC::MacroAssemblerX86Common::branch32): (JSC::MacroAssemblerX86Common::jump): (JSC::MacroAssemblerX86_64::add32): (JSC::MacroAssemblerX86_64::sub32): (JSC::MacroAssemblerX86_64::load32): (JSC::MacroAssemblerX86_64::store32): (JSC::MacroAssemblerX86_64::addPtr): (JSC::MacroAssemblerX86_64::andPtr): (JSC::MacroAssemblerX86_64::orPtr): (JSC::MacroAssemblerX86_64::rshiftPtr): (JSC::MacroAssemblerX86_64::subPtr): (JSC::MacroAssemblerX86_64::xorPtr): (JSC::MacroAssemblerX86_64::loadPtr): (JSC::MacroAssemblerX86_64::loadPtrWithAddressOffsetPatch): (JSC::MacroAssemblerX86_64::storePtr): (JSC::MacroAssemblerX86_64::storePtrWithAddressOffsetPatch): (JSC::MacroAssemblerX86_64::branchPtr): (JSC::MacroAssemblerX86_64::branchTestPtr): (JSC::MacroAssemblerX86_64::branchAddPtr): (JSC::MacroAssemblerX86_64::branchSubPtr): (JSC::MacroAssemblerX86_64::branchPtrWithPatch): (JSC::MacroAssemblerX86_64::storePtrWithPatch): (JSC::MacroAssemblerX86::add32): (JSC::MacroAssemblerX86::sub32): (JSC::MacroAssemblerX86::load32): (JSC::MacroAssemblerX86::store32): (JSC::MacroAssemblerX86::branch32): (JSC::MacroAssemblerX86::branchPtrWithPatch): (JSC::MacroAssemblerX86::storePtrWithPatch): (JSC::MacroAssembler::pop): (JSC::MacroAssembler::peek): (JSC::MacroAssembler::poke): (JSC::MacroAssembler::branchPtr): (JSC::MacroAssembler::branch32): (JSC::MacroAssembler::branch16): (JSC::MacroAssembler::branchTestPtr): (JSC::MacroAssembler::addPtr): (JSC::MacroAssembler::andPtr): (JSC::MacroAssembler::orPtr): (JSC::MacroAssembler::rshiftPtr): (JSC::MacroAssembler::subPtr): (JSC::MacroAssembler::xorPtr): (JSC::MacroAssembler::loadPtr): (JSC::MacroAssembler::loadPtrWithAddressOffsetPatch): (JSC::MacroAssembler::storePtr): (JSC::MacroAssembler::storePtrWithAddressOffsetPatch): (JSC::MacroAssembler::branchAddPtr): (JSC::MacroAssembler::branchSubPtr):
  • jit/JITArithmetic.cpp: (JSC::JIT::compileBinaryArithOp):
File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/assembler/MacroAssembler.h

    r40562 r40656  
    3535namespace JSC {
    3636
    37 class MacroAssembler {
     37template <class AssemblerType>
     38class AbstractMacroAssembler {
    3839protected:
    39     X86Assembler m_assembler;
    40 
    41 #if PLATFORM(X86_64)
    42     static const X86::RegisterID scratchRegister = X86::r11;
    43 #endif
     40    AssemblerType m_assembler;
    4441
    4542public:
    46     typedef X86::RegisterID RegisterID;
    47 
    48     // Note: do not rely on values in this enum, these will change (to 0..3).
     43    typedef typename AssemblerType::RegisterID RegisterID;
     44    typedef typename AssemblerType::JmpSrc JmpSrc;
     45    typedef typename AssemblerType::JmpDst JmpDst;
     46
    4947    enum Scale {
    5048        TimesOne,
     
    5250        TimesFour,
    5351        TimesEight,
    54 #if PLATFORM(X86)
    55         ScalePtr = TimesFour
     52    };
     53
     54    // Address:
     55    //
     56    // Describes a simple base-offset address.
     57    struct Address {
     58        explicit Address(RegisterID base, int32_t offset = 0)
     59            : base(base)
     60            , offset(offset)
     61        {
     62        }
     63
     64        RegisterID base;
     65        int32_t offset;
     66    };
     67
     68    // ImplicitAddress:
     69    //
     70    // This class is used for explicit 'load' and 'store' operations
     71    // (as opposed to situations in which a memory operand is provided
     72    // to a generic operation, such as an integer arithmetic instruction).
     73    //
     74    // In the case of a load (or store) operation we want to permit
     75    // addresses to be implicitly constructed, e.g. the two calls:
     76    //
     77    //     load32(Address(addrReg), destReg);
     78    //     load32(addrReg, destReg);
     79    //
     80    // Are equivalent, and the explicit wrapping of the Address in the former
     81    // is unnecessary.
     82    struct ImplicitAddress {
     83        ImplicitAddress(RegisterID base)
     84            : base(base)
     85            , offset(0)
     86        {
     87        }
     88
     89        ImplicitAddress(Address address)
     90            : base(address.base)
     91            , offset(address.offset)
     92        {
     93        }
     94
     95        RegisterID base;
     96        int32_t offset;
     97    };
     98
     99    // BaseIndex:
     100    //
     101    // Describes a complex addressing mode.
     102    struct BaseIndex {
     103        BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
     104            : base(base)
     105            , index(index)
     106            , scale(scale)
     107            , offset(offset)
     108        {
     109        }
     110
     111        RegisterID base;
     112        RegisterID index;
     113        Scale scale;
     114        int32_t offset;
     115    };
     116
     117    // AbsoluteAddress:
     118    //
     119    // Describes an memory operand given by a pointer.  For regular load & store
     120    // operations an unwrapped void* will be used, rather than using this.
     121    struct AbsoluteAddress {
     122        explicit AbsoluteAddress(void* ptr)
     123            : m_ptr(ptr)
     124        {
     125        }
     126
     127        void* m_ptr;
     128    };
     129
     130
     131    class Jump;
     132    class PatchBuffer;
     133
     134    // DataLabelPtr:
     135    //
     136    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
     137    // patched after the code has been generated.
     138    class DataLabelPtr {
     139        template<class AssemblerType_T>
     140        friend class AbstractMacroAssembler;
     141        friend class PatchBuffer;
     142
     143    public:
     144        DataLabelPtr()
     145        {
     146        }
     147
     148        DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
     149            : m_label(masm->m_assembler.label())
     150        {
     151        }
     152
     153        static void patch(void* address, void* value)
     154        {
     155            AssemblerType::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
     156        }
     157       
     158    private:
     159        JmpDst m_label;
     160    };
     161
     162    // DataLabel32:
     163    //
     164    // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
     165    // patched after the code has been generated.
     166    class DataLabel32 {
     167        template<class AssemblerType_T>
     168        friend class AbstractMacroAssembler;
     169        friend class PatchBuffer;
     170
     171    public:
     172        DataLabel32()
     173        {
     174        }
     175
     176        DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
     177            : m_label(masm->m_assembler.label())
     178        {
     179        }
     180
     181        static void patch(void* address, int32_t value)
     182        {
     183            AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(address), value);
     184        }
     185
     186    private:
     187        JmpDst m_label;
     188    };
     189
     190    // Label:
     191    //
     192    // A Label records a point in the generated instruction stream, typically such that
     193    // it may be used as a destination for a jump.
     194    class Label {
     195        friend class Jump;
     196        template<class AssemblerType_T>
     197        friend class AbstractMacroAssembler;
     198        friend class PatchBuffer;
     199
     200    public:
     201        Label()
     202        {
     203        }
     204
     205        Label(AbstractMacroAssembler<AssemblerType>* masm)
     206            : m_label(masm->m_assembler.label())
     207        {
     208        }
     209       
     210    private:
     211        JmpDst m_label;
     212    };
     213
     214
     215    // Jump:
     216    //
     217    // A jump object is a reference to a jump instruction that has been planted
     218    // into the code buffer - it is typically used to link the jump, setting the
     219    // relative offset such that when executed it will jump to the desired
     220    // destination.
     221    //
     222    // Jump objects retain a pointer to the assembler for syntactic purposes -
     223    // to allow the jump object to be able to link itself, e.g.:
     224    //
     225    //     Jump forwardsBranch = jne32(Imm32(0), reg1);
     226    //     // ...
     227    //     forwardsBranch.link();
     228    //
     229    // Jumps may also be linked to a Label.
     230    class Jump {
     231        friend class PatchBuffer;
     232        template<class AssemblerType_T>
     233        friend class AbstractMacroAssembler;
     234
     235    public:
     236        Jump()
     237        {
     238        }
     239       
     240        Jump(JmpSrc jmp)
     241            : m_jmp(jmp)
     242        {
     243        }
     244       
     245        void link(AbstractMacroAssembler<AssemblerType>* masm)
     246        {
     247            masm->m_assembler.link(m_jmp, masm->m_assembler.label());
     248        }
     249       
     250        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
     251        {
     252            masm->m_assembler.link(m_jmp, label.m_label);
     253        }
     254       
     255        static void patch(void* address, void* destination)
     256        {
     257            AssemblerType::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
     258        }
     259
     260    private:
     261        JmpSrc m_jmp;
     262    };
     263
     264    // JumpList:
     265    //
     266    // A JumpList is a set of Jump objects.
     267    // All jumps in the set will be linked to the same destination.
     268    class JumpList {
     269        friend class PatchBuffer;
     270
     271    public:
     272        void link(AbstractMacroAssembler<AssemblerType>* masm)
     273        {
     274            size_t size = m_jumps.size();
     275            for (size_t i = 0; i < size; ++i)
     276                m_jumps[i].link(masm);
     277            m_jumps.clear();
     278        }
     279       
     280        void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
     281        {
     282            size_t size = m_jumps.size();
     283            for (size_t i = 0; i < size; ++i)
     284                m_jumps[i].linkTo(label, masm);
     285            m_jumps.clear();
     286        }
     287       
     288        void append(Jump jump)
     289        {
     290            m_jumps.append(jump);
     291        }
     292       
     293        void append(JumpList& other)
     294        {
     295            m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
     296        }
     297
     298        bool empty()
     299        {
     300            return !m_jumps.size();
     301        }
     302
     303    private:
     304        Vector<Jump, 16> m_jumps;
     305    };
     306
     307
     308    // PatchBuffer:
     309    //
     310    // This class assists in linking code generated by the macro assembler, once code generation
     311    // has been completed, and the code has been copied to is final location in memory.  At this
     312    // time pointers to labels within the code may be resolved, and relative offsets to external
     313    // addresses may be fixed.
     314    //
     315    // Specifically:
     316    //   * Jump objects may be linked to external targets,
     317    //   * The address of Jump objects may taken, such that it can later be relinked.
     318    //   * The return address of a Jump object representing a call may be acquired.
     319    //   * The address of a Label pointing into the code may be resolved.
     320    //   * The value referenced by a DataLabel may be fixed.
     321    //
     322    // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
     323    // address of calls, as opposed to a point that can be used to later relink a Jump -
     324    // possibly wrap the later up in an object that can do just that).
     325    class PatchBuffer {
     326    public:
     327        PatchBuffer(void* code)
     328            : m_code(code)
     329        {
     330        }
     331
     332        void link(Jump jump, void* target)
     333        {
     334            AssemblerType::link(m_code, jump.m_jmp, target);
     335        }
     336
     337        void link(JumpList list, void* target)
     338        {
     339            for (unsigned i = 0; i < list.m_jumps.size(); ++i)
     340                AssemblerType::link(m_code, list.m_jumps[i].m_jmp, target);
     341        }
     342
     343        void* addressOf(Jump jump)
     344        {
     345            return AssemblerType::getRelocatedAddress(m_code, jump.m_jmp);
     346        }
     347
     348        void* addressOf(Label label)
     349        {
     350            return AssemblerType::getRelocatedAddress(m_code, label.m_label);
     351        }
     352
     353        void* addressOf(DataLabelPtr label)
     354        {
     355            return AssemblerType::getRelocatedAddress(m_code, label.m_label);
     356        }
     357
     358        void* addressOf(DataLabel32 label)
     359        {
     360            return AssemblerType::getRelocatedAddress(m_code, label.m_label);
     361        }
     362
     363        void setPtr(DataLabelPtr label, void* value)
     364        {
     365            AssemblerType::patchAddress(m_code, label.m_label, value);
     366        }
     367
     368    private:
     369        void* m_code;
     370    };
     371 
     372
     373    // ImmPtr:
     374    //
     375    // A pointer sized immediate operand to an instruction - this is wrapped
     376    // in a class requiring explicit construction in order to differentiate
     377    // from pointers used as absolute addresses to memory operations
     378    struct ImmPtr {
     379        explicit ImmPtr(void* value)
     380            : m_value(value)
     381        {
     382        }
     383
     384        intptr_t asIntptr()
     385        {
     386            return reinterpret_cast<intptr_t>(m_value);
     387        }
     388
     389        void* m_value;
     390    };
     391
     392    // Imm32:
     393    //
     394    // A 32bit immediate operand to an instruction - this is wrapped in a
     395    // class requiring explicit construction in order to prevent RegisterIDs
     396    // (which are implemented as an enum) from accidentally being passed as
     397    // immediate values.
     398    struct Imm32 {
     399        explicit Imm32(int32_t value)
     400            : m_value(value)
     401        {
     402        }
     403
     404#if !PLATFORM(X86_64)
     405        explicit Imm32(ImmPtr ptr)
     406            : m_value(ptr.asIntptr())
     407        {
     408        }
    56409#endif
    57 #if PLATFORM(X86_64)
    58         ScalePtr = TimesEight
    59 #endif
     410
     411        int32_t m_value;
    60412    };
     413
     414    size_t size()
     415    {
     416        return m_assembler.size();
     417    }
     418
     419    void* copyCode(ExecutablePool* allocator)
     420    {
     421        return m_assembler.executableCopy(allocator);
     422    }
     423
     424    Label label()
     425    {
     426        return Label(this);
     427    }
     428   
     429    Label align()
     430    {
     431        m_assembler.align(16);
     432        return Label(this);
     433    }
     434
     435    ptrdiff_t differenceBetween(Label from, Jump to)
     436    {
     437        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
     438    }
     439
     440    ptrdiff_t differenceBetween(Label from, Label to)
     441    {
     442        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
     443    }
     444
     445    ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
     446    {
     447        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
     448    }
     449
     450    ptrdiff_t differenceBetween(Label from, DataLabel32 to)
     451    {
     452        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
     453    }
     454
     455    ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
     456    {
     457        return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
     458    }
     459
     460};
     461
     462class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
     463public:
    61464
    62465    typedef X86Assembler::Condition Condition;
     
    75478    static const Condition NonZero = X86Assembler::ConditionNE;
    76479
    77     MacroAssembler()
    78     {
    79     }
    80    
    81     size_t size() { return m_assembler.size(); }
    82     void* copyCode(ExecutablePool* allocator)
    83     {
    84         return m_assembler.executableCopy(allocator);
    85     }
    86 
    87 
    88     // Address:
    89     //
    90     // Describes a simple base-offset address.
    91     struct Address {
    92         explicit Address(RegisterID base, int32_t offset = 0)
    93             : base(base)
    94             , offset(offset)
    95         {
    96         }
    97 
    98         RegisterID base;
    99         int32_t offset;
    100     };
    101 
    102     // ImplicitAddress:
    103     //
    104     // This class is used for explicit 'load' and 'store' operations
    105     // (as opposed to situations in which a memory operand is provided
    106     // to a generic operation, such as an integer arithmetic instruction).
    107     //
    108     // In the case of a load (or store) operation we want to permit
    109     // addresses to be implicitly constructed, e.g. the two calls:
    110     //
    111     //     load32(Address(addrReg), destReg);
    112     //     load32(addrReg, destReg);
    113     //
    114     // Are equivalent, and the explicit wrapping of the Address in the former
    115     // is unnecessary.
    116     struct ImplicitAddress {
    117         ImplicitAddress(RegisterID base)
    118             : base(base)
    119             , offset(0)
    120         {
    121         }
    122 
    123         ImplicitAddress(Address address)
    124             : base(address.base)
    125             , offset(address.offset)
    126         {
    127         }
    128 
    129         RegisterID base;
    130         int32_t offset;
    131     };
    132 
    133     // BaseIndex:
    134     //
    135     // Describes a complex addressing mode.
    136     struct BaseIndex {
    137         BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
    138             : base(base)
    139             , index(index)
    140             , scale(scale)
    141             , offset(offset)
    142         {
    143         }
    144 
    145         RegisterID base;
    146         RegisterID index;
    147         Scale scale;
    148         int32_t offset;
    149     };
    150 
    151     // AbsoluteAddress:
    152     //
    153     // Describes an memory operand given by a pointer.  For regular load & store
    154     // operations an unwrapped void* will be used, rather than using this.
    155     struct AbsoluteAddress {
    156         explicit AbsoluteAddress(void* ptr)
    157             : m_ptr(ptr)
    158         {
    159         }
    160 
    161         void* m_ptr;
    162     };
    163 
    164 
    165     class Jump;
    166     class PatchBuffer;
    167 
    168     // DataLabelPtr:
    169     //
    170     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    171     // patched after the code has been generated.
    172     class DataLabelPtr {
    173         friend class MacroAssembler;
    174         friend class PatchBuffer;
    175 
    176     public:
    177         DataLabelPtr()
    178         {
    179         }
    180 
    181         DataLabelPtr(MacroAssembler* masm)
    182             : m_label(masm->m_assembler.label())
    183         {
    184         }
    185 
    186         static void patch(void* address, void* value)
    187         {
    188             X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value));
    189         }
    190        
    191     private:
    192         X86Assembler::JmpDst m_label;
    193     };
    194 
    195     // DataLabel32:
    196     //
    197     // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
    198     // patched after the code has been generated.
    199     class DataLabel32 {
    200         friend class MacroAssembler;
    201         friend class PatchBuffer;
    202 
    203     public:
    204         DataLabel32()
    205         {
    206         }
    207 
    208         DataLabel32(MacroAssembler* masm)
    209             : m_label(masm->m_assembler.label())
    210         {
    211         }
    212 
    213         static void patch(void* address, int32_t value)
    214         {
    215             X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value);
    216         }
    217 
    218     private:
    219         X86Assembler::JmpDst m_label;
    220     };
    221 
    222     // Label:
    223     //
    224     // A Label records a point in the generated instruction stream, typically such that
    225     // it may be used as a destination for a jump.
    226     class Label {
    227         friend class Jump;
    228         friend class MacroAssembler;
    229         friend class PatchBuffer;
    230 
    231     public:
    232         Label()
    233         {
    234         }
    235 
    236         Label(MacroAssembler* masm)
    237             : m_label(masm->m_assembler.label())
    238         {
    239         }
    240        
    241         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
    242         operator X86Assembler::JmpDst()
    243         {
    244             return m_label;
    245         }
    246 
    247     private:
    248         X86Assembler::JmpDst m_label;
    249     };
    250 
    251 
    252     // Jump:
    253     //
    254     // A jump object is a reference to a jump instruction that has been planted
    255     // into the code buffer - it is typically used to link the jump, setting the
    256     // relative offset such that when executed it will jump to the desired
    257     // destination.
    258     //
    259     // Jump objects retain a pointer to the assembler for syntactic purposes -
    260     // to allow the jump object to be able to link itself, e.g.:
    261     //
    262     //     Jump forwardsBranch = jne32(Imm32(0), reg1);
    263     //     // ...
    264     //     forwardsBranch.link();
    265     //
    266     // Jumps may also be linked to a Label.
    267     class Jump {
    268         friend class PatchBuffer;
    269         friend class MacroAssembler;
    270 
    271     public:
    272         Jump()
    273         {
    274         }
    275        
    276         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
    277         Jump(X86Assembler::JmpSrc jmp)
    278             : m_jmp(jmp)
    279         {
    280         }
    281        
    282         void link(MacroAssembler* masm)
    283         {
    284             masm->m_assembler.link(m_jmp, masm->m_assembler.label());
    285         }
    286        
    287         void linkTo(Label label, MacroAssembler* masm)
    288         {
    289             masm->m_assembler.link(m_jmp, label.m_label);
    290         }
    291        
    292         // FIXME: transitionary method, while we replace JmpSrces with Jumps.
    293         operator X86Assembler::JmpSrc()
    294         {
    295             return m_jmp;
    296         }
    297 
    298         static void patch(void* address, void* destination)
    299         {
    300             X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination);
    301         }
    302 
    303     private:
    304         X86Assembler::JmpSrc m_jmp;
    305     };
    306 
    307     // JumpList:
    308     //
    309     // A JumpList is a set of Jump objects.
    310     // All jumps in the set will be linked to the same destination.
    311     class JumpList {
    312         friend class PatchBuffer;
    313 
    314     public:
    315         void link(MacroAssembler* masm)
    316         {
    317             size_t size = m_jumps.size();
    318             for (size_t i = 0; i < size; ++i)
    319                 m_jumps[i].link(masm);
    320             m_jumps.clear();
    321         }
    322        
    323         void linkTo(Label label, MacroAssembler* masm)
    324         {
    325             size_t size = m_jumps.size();
    326             for (size_t i = 0; i < size; ++i)
    327                 m_jumps[i].linkTo(label, masm);
    328             m_jumps.clear();
    329         }
    330        
    331         void append(Jump jump)
    332         {
    333             m_jumps.append(jump);
    334         }
    335        
    336         void append(JumpList& other)
    337         {
    338             m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
    339         }
    340 
    341         bool empty()
    342         {
    343             return !m_jumps.size();
    344         }
    345 
    346     private:
    347         Vector<Jump, 16> m_jumps;
    348     };
    349 
    350 
    351     // PatchBuffer:
    352     //
    353     // This class assists in linking code generated by the macro assembler, once code generation
    354     // has been completed, and the code has been copied to is final location in memory.  At this
    355     // time pointers to labels within the code may be resolved, and relative offsets to external
    356     // addresses may be fixed.
    357     //
    358     // Specifically:
    359     //   * Jump objects may be linked to external targets,
    360     //   * The address of Jump objects may taken, such that it can later be relinked.
    361     //   * The return address of a Jump object representing a call may be acquired.
    362     //   * The address of a Label pointing into the code may be resolved.
    363     //   * The value referenced by a DataLabel may be fixed.
    364     //
    365     // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return
    366     // address of calls, as opposed to a point that can be used to later relink a Jump -
    367     // possibly wrap the later up in an object that can do just that).
    368     class PatchBuffer {
    369     public:
    370         PatchBuffer(void* code)
    371             : m_code(code)
    372         {
    373         }
    374 
    375         void link(Jump jump, void* target)
    376         {
    377             X86Assembler::link(m_code, jump.m_jmp, target);
    378         }
    379 
    380         void link(JumpList list, void* target)
    381         {
    382             for (unsigned i = 0; i < list.m_jumps.size(); ++i)
    383                 X86Assembler::link(m_code, list.m_jumps[i], target);
    384         }
    385 
    386         void* addressOf(Jump jump)
    387         {
    388             return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp);
    389         }
    390 
    391         void* addressOf(Label label)
    392         {
    393             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
    394         }
    395 
    396         void* addressOf(DataLabelPtr label)
    397         {
    398             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
    399         }
    400 
    401         void* addressOf(DataLabel32 label)
    402         {
    403             return X86Assembler::getRelocatedAddress(m_code, label.m_label);
    404         }
    405 
    406         void setPtr(DataLabelPtr label, void* value)
    407         {
    408             X86Assembler::patchAddress(m_code, label.m_label, value);
    409         }
    410 
    411     private:
    412         void* m_code;
    413     };
    414  
    415 
    416     // ImmPtr:
    417     //
    418     // A pointer sized immediate operand to an instruction - this is wrapped
    419     // in a class requiring explicit construction in order to differentiate
    420     // from pointers used as absolute addresses to memory operations
    421     struct ImmPtr {
    422         explicit ImmPtr(void* value)
    423             : m_value(value)
    424         {
    425         }
    426 
    427         intptr_t asIntptr()
    428         {
    429             return reinterpret_cast<intptr_t>(m_value);
    430         }
    431 
    432         void* m_value;
    433     };
    434 
    435 
    436     // Imm32:
    437     //
    438     // A 32bit immediate operand to an instruction - this is wrapped in a
    439     // class requiring explicit construction in order to prevent RegisterIDs
    440     // (which are implemented as an enum) from accidentally being passed as
    441     // immediate values.
    442     struct Imm32 {
    443         explicit Imm32(int32_t value)
    444             : m_value(value)
    445         {
    446         }
    447 
    448 #if PLATFORM(X86)
    449         explicit Imm32(ImmPtr ptr)
    450             : m_value(ptr.asIntptr())
    451         {
    452         }
    453 #endif
    454 
    455         int32_t m_value;
    456     };
     480    static const RegisterID stackPointerRegister = X86::esp;
    457481
    458482    // Integer arithmetic operations:
     
    463487    // object).
    464488
    465     void addPtr(RegisterID src, RegisterID dest)
    466     {
    467 #if PLATFORM(X86_64)
    468         m_assembler.addq_rr(src, dest);
    469 #else
    470         add32(src, dest);
    471 #endif
    472     }
    473 
    474     void addPtr(Imm32 imm, RegisterID srcDest)
    475     {
    476 #if PLATFORM(X86_64)
    477         m_assembler.addq_ir(imm.m_value, srcDest);
    478 #else
    479         add32(imm, srcDest);
    480 #endif
    481     }
    482 
    483     void addPtr(ImmPtr imm, RegisterID dest)
    484     {
    485 #if PLATFORM(X86_64)
    486         move(imm, scratchRegister);
    487         m_assembler.addq_rr(scratchRegister, dest);
    488 #else
    489         add32(Imm32(imm), dest);
    490 #endif
    491     }
    492 
    493     void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
    494     {
    495         m_assembler.leal_mr(imm.m_value, src, dest);
    496     }
    497 
    498489    void add32(RegisterID src, RegisterID dest)
    499490    {
     
    511502    }
    512503   
    513     void add32(Imm32 imm, AbsoluteAddress address)
    514     {
    515 #if PLATFORM(X86_64)
    516         move(ImmPtr(address.m_ptr), scratchRegister);
    517         add32(imm, Address(scratchRegister));
    518 #else
    519         m_assembler.addl_im(imm.m_value, address.m_ptr);
    520 #endif
    521     }
    522    
    523504    void add32(Address src, RegisterID dest)
    524505    {
     
    526507    }
    527508   
    528     void andPtr(RegisterID src, RegisterID dest)
    529     {
    530 #if PLATFORM(X86_64)
    531         m_assembler.andq_rr(src, dest);
    532 #else
    533         and32(src, dest);
    534 #endif
    535     }
    536 
    537     void andPtr(Imm32 imm, RegisterID srcDest)
    538     {
    539 #if PLATFORM(X86_64)
    540         m_assembler.andq_ir(imm.m_value, srcDest);
    541 #else
    542         and32(imm, srcDest);
    543 #endif
    544     }
    545 
    546509    void and32(RegisterID src, RegisterID dest)
    547510    {
     
    615578    }
    616579   
     580    void or32(RegisterID src, RegisterID dest)
     581    {
     582        m_assembler.orl_rr(src, dest);
     583    }
     584
     585    void or32(Imm32 imm, RegisterID dest)
     586    {
     587        m_assembler.orl_ir(imm.m_value, dest);
     588    }
     589
     590    void rshift32(RegisterID shift_amount, RegisterID dest)
     591    {
     592        // On x86 we can only shift by ecx; if asked to shift by another register we'll
     593        // need rejig the shift amount into ecx first, and restore the registers afterwards.
     594        if (shift_amount != X86::ecx) {
     595            swap(shift_amount, X86::ecx);
     596
     597            // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
     598            if (dest == shift_amount)
     599                m_assembler.sarl_CLr(X86::ecx);
     600            // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
     601            else if (dest == X86::ecx)
     602                m_assembler.sarl_CLr(shift_amount);
     603            // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
     604            else
     605                m_assembler.sarl_CLr(dest);
     606       
     607            swap(shift_amount, X86::ecx);
     608        } else
     609            m_assembler.sarl_CLr(dest);
     610    }
     611
     612    void rshift32(Imm32 imm, RegisterID dest)
     613    {
     614        m_assembler.sarl_i8r(imm.m_value, dest);
     615    }
     616
     617    void sub32(RegisterID src, RegisterID dest)
     618    {
     619        m_assembler.subl_rr(src, dest);
     620    }
     621   
     622    void sub32(Imm32 imm, RegisterID dest)
     623    {
     624        m_assembler.subl_ir(imm.m_value, dest);
     625    }
     626   
     627    void sub32(Imm32 imm, Address address)
     628    {
     629        m_assembler.subl_im(imm.m_value, address.offset, address.base);
     630    }
     631
     632    void sub32(Address src, RegisterID dest)
     633    {
     634        m_assembler.subl_mr(src.offset, src.base, dest);
     635    }
     636
     637    void xor32(RegisterID src, RegisterID dest)
     638    {
     639        m_assembler.xorl_rr(src, dest);
     640    }
     641
     642    void xor32(Imm32 imm, RegisterID srcDest)
     643    {
     644        m_assembler.xorl_ir(imm.m_value, srcDest);
     645    }
     646   
     647
     648    // Memory access operations:
     649    //
     650    // Loads are of the form load(address, destination) and stores of the form
     651    // store(source, address).  The source for a store may be an Imm32.  Address
     652    // operand objects to loads and store will be implicitly constructed if a
     653    // register is passed.
     654
     655    void load32(ImplicitAddress address, RegisterID dest)
     656    {
     657        m_assembler.movl_mr(address.offset, address.base, dest);
     658    }
     659
     660    void load32(BaseIndex address, RegisterID dest)
     661    {
     662        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
     663    }
     664
     665    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
     666    {
     667        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
     668        return DataLabel32(this);
     669    }
     670
     671    void load16(BaseIndex address, RegisterID dest)
     672    {
     673        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
     674    }
     675
     676    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
     677    {
     678        m_assembler.movl_rm_disp32(src, address.offset, address.base);
     679        return DataLabel32(this);
     680    }
     681
     682    void store32(RegisterID src, ImplicitAddress address)
     683    {
     684        m_assembler.movl_rm(src, address.offset, address.base);
     685    }
     686
     687    void store32(RegisterID src, BaseIndex address)
     688    {
     689        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
     690    }
     691
     692    void store32(Imm32 imm, ImplicitAddress address)
     693    {
     694        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
     695    }
     696   
     697
     698    // Stack manipulation operations:
     699    //
     700    // The ABI is assumed to provide a stack abstraction to memory,
     701    // containing machine word sized units of data.  Push and pop
     702    // operations add and remove a single register sized unit of data
     703    // to or from the stack.  Peek and poke operations read or write
     704    // values on the stack, without moving the current stack position.
     705   
     706    void pop(RegisterID dest)
     707    {
     708        m_assembler.pop_r(dest);
     709    }
     710
     711    void push(RegisterID src)
     712    {
     713        m_assembler.push_r(src);
     714    }
     715
     716    void push(Address address)
     717    {
     718        m_assembler.push_m(address.offset, address.base);
     719    }
     720
     721    void push(Imm32 imm)
     722    {
     723        m_assembler.push_i32(imm.m_value);
     724    }
     725
     726    // Register move operations:
     727    //
     728    // Move values in registers.
     729
     730    void move(Imm32 imm, RegisterID dest)
     731    {
     732        // Note: on 64-bit the Imm32 value is zero extended into the register, it
     733        // may be useful to have a separate version that sign extends the value?
     734        if (!imm.m_value)
     735            m_assembler.xorl_rr(dest, dest);
     736        else
     737            m_assembler.movl_i32r(imm.m_value, dest);
     738    }
     739
     740#if PLATFORM(X86_64)
     741    void move(RegisterID src, RegisterID dest)
     742    {
     743        // Note: on 64-bit this is is a full register move; perhaps it would be
     744        // useful to have separate move32 & movePtr, with move32 zero extending?
     745        m_assembler.movq_rr(src, dest);
     746    }
     747
     748    void move(ImmPtr imm, RegisterID dest)
     749    {
     750        if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
     751            m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
     752        else
     753            m_assembler.movq_i64r(imm.asIntptr(), dest);
     754    }
     755
     756    void swap(RegisterID reg1, RegisterID reg2)
     757    {
     758        m_assembler.xchgq_rr(reg1, reg2);
     759    }
     760
     761    void signExtend32ToPtr(RegisterID src, RegisterID dest)
     762    {
     763        m_assembler.movsxd_rr(src, dest);
     764    }
     765
     766    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
     767    {
     768        m_assembler.movl_rr(src, dest);
     769    }
     770#else
     771    void move(RegisterID src, RegisterID dest)
     772    {
     773        m_assembler.movl_rr(src, dest);
     774    }
     775
     776    void move(ImmPtr imm, RegisterID dest)
     777    {
     778        m_assembler.movl_i32r(imm.asIntptr(), dest);
     779    }
     780
     781    void swap(RegisterID reg1, RegisterID reg2)
     782    {
     783        m_assembler.xchgl_rr(reg1, reg2);
     784    }
     785
     786    void signExtend32ToPtr(RegisterID src, RegisterID dest)
     787    {
     788        if (src != dest)
     789            move(src, dest);
     790    }
     791
     792    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
     793    {
     794        if (src != dest)
     795            move(src, dest);
     796    }
     797#endif
     798
     799
     800    // Forwards / external control flow operations:
     801    //
     802    // This set of jump and conditional branch operations return a Jump
     803    // object which may linked at a later point, allow forwards jump,
     804    // or jumps that will require external linkage (after the code has been
     805    // relocated).
     806    //
     807    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
     808    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
     809    // used (representing the names 'below' and 'above').
     810    //
     811    // Operands to the comparision are provided in the expected order, e.g.
     812    // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
     813    // treated as a signed 32bit value, is less than or equal to 5.
     814    //
     815    // jz and jnz test whether the first operand is equal to zero, and take
     816    // an optional second operand of a mask under which to perform the test.
     817
     818public:
     819    Jump branch32(Condition cond, RegisterID left, RegisterID right)
     820    {
     821        m_assembler.cmpl_rr(right, left);
     822        return Jump(m_assembler.jCC(cond));
     823    }
     824
     825    Jump branch32(Condition cond, RegisterID left, Imm32 right)
     826    {
     827        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
     828            m_assembler.testl_rr(left, left);
     829        else
     830            m_assembler.cmpl_ir(right.m_value, left);
     831        return Jump(m_assembler.jCC(cond));
     832    }
     833   
     834    Jump branch32(Condition cond, RegisterID left, Address right)
     835    {
     836        m_assembler.cmpl_mr(right.offset, right.base, left);
     837        return Jump(m_assembler.jCC(cond));
     838    }
     839   
     840    Jump branch32(Condition cond, Address left, RegisterID right)
     841    {
     842        m_assembler.cmpl_rm(right, left.offset, left.base);
     843        return Jump(m_assembler.jCC(cond));
     844    }
     845
     846    Jump branch32(Condition cond, Address left, Imm32 right)
     847    {
     848        m_assembler.cmpl_im(right.m_value, left.offset, left.base);
     849        return Jump(m_assembler.jCC(cond));
     850    }
     851
     852    Jump branch16(Condition cond, BaseIndex left, RegisterID right)
     853    {
     854        m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
     855        return Jump(m_assembler.jCC(cond));
     856    }
     857
     858    Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
     859    {
     860        ASSERT((cond == Zero) || (cond == NonZero));
     861        m_assembler.testl_rr(reg, mask);
     862        return Jump(m_assembler.jCC(cond));
     863    }
     864
     865    Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
     866    {
     867        ASSERT((cond == Zero) || (cond == NonZero));
     868        // if we are only interested in the low seven bits, this can be tested with a testb
     869        if (mask.m_value == -1)
     870            m_assembler.testl_rr(reg, reg);
     871        else if ((mask.m_value & ~0x7f) == 0)
     872            m_assembler.testb_i8r(mask.m_value, reg);
     873        else
     874            m_assembler.testl_i32r(mask.m_value, reg);
     875        return Jump(m_assembler.jCC(cond));
     876    }
     877
     878    Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
     879    {
     880        ASSERT((cond == Zero) || (cond == NonZero));
     881        if (mask.m_value == -1)
     882            m_assembler.cmpl_im(0, address.offset, address.base);
     883        else
     884            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
     885        return Jump(m_assembler.jCC(cond));
     886    }
     887
     888    Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
     889    {
     890        ASSERT((cond == Zero) || (cond == NonZero));
     891        if (mask.m_value == -1)
     892            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
     893        else
     894            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
     895        return Jump(m_assembler.jCC(cond));
     896    }
     897
     898    Jump jump()
     899    {
     900        return Jump(m_assembler.jmp());
     901    }
     902
     903    void jump(RegisterID target)
     904    {
     905        m_assembler.jmp_r(target);
     906    }
     907
     908    // Address is a memory location containing the address to jump to
     909    void jump(Address address)
     910    {
     911        m_assembler.jmp_m(address.offset, address.base);
     912    }
     913
     914
     915    // Arithmetic control flow operations:
     916    //
     917    // This set of conditional branch operations branch based
     918    // on the result of an arithmetic operation.  The operation
     919    // is performed as normal, storing the result.
     920    //
     921    // * jz operations branch if the result is zero.
     922    // * jo operations branch if the (signed) arithmetic
     923    //   operation caused an overflow to occur.
     924   
     925    Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
     926    {
     927        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     928        add32(src, dest);
     929        return Jump(m_assembler.jCC(cond));
     930    }
     931   
     932    Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
     933    {
     934        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     935        add32(imm, dest);
     936        return Jump(m_assembler.jCC(cond));
     937    }
     938   
     939    Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
     940    {
     941        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     942        mul32(src, dest);
     943        return Jump(m_assembler.jCC(cond));
     944    }
     945   
     946    Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
     947    {
     948        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     949        mul32(imm, src, dest);
     950        return Jump(m_assembler.jCC(cond));
     951    }
     952   
     953    Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
     954    {
     955        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     956        sub32(src, dest);
     957        return Jump(m_assembler.jCC(cond));
     958    }
     959   
     960    Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
     961    {
     962        ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     963        sub32(imm, dest);
     964        return Jump(m_assembler.jCC(cond));
     965    }
     966   
     967
     968    // Miscellaneous operations:
     969
     970    void breakpoint()
     971    {
     972        m_assembler.int3();
     973    }
     974
     975    Jump call()
     976    {
     977        return Jump(m_assembler.call());
     978    }
     979
     980    // FIXME: why does this return a Jump object? - it can't be linked.
     981    // This may be to get a reference to the return address of the call.
     982    //
     983    // This should probably be handled by a separate label type to a regular
     984    // jump.  Todo: add a CallLabel type, for the regular call - can be linked
     985    // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
     986    // Also add a CallReturnLabel type for this to return (just a more JmpDsty
     987    // form of label, can get the void* after the code has been linked, but can't
     988    // try to link it like a Jump object), and let the CallLabel be cast into a
     989    // CallReturnLabel.
     990    Jump call(RegisterID target)
     991    {
     992        return Jump(m_assembler.call(target));
     993    }
     994
     995    void ret()
     996    {
     997        m_assembler.ret();
     998    }
     999
     1000    void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
     1001    {
     1002        m_assembler.cmpl_rr(right, left);
     1003        m_assembler.setCC_r(cond, dest);
     1004        m_assembler.movzbl_rr(dest, dest);
     1005    }
     1006
     1007    void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
     1008    {
     1009        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
     1010            m_assembler.testl_rr(left, left);
     1011        else
     1012            m_assembler.cmpl_ir(right.m_value, left);
     1013        m_assembler.setCC_r(cond, dest);
     1014        m_assembler.movzbl_rr(dest, dest);
     1015    }
     1016
     1017    // FIXME:
     1018    // The mask should be optional... paerhaps the argument order should be
     1019    // dest-src, operations always have a dest? ... possibly not true, considering
     1020    // asm ops like test, or pseudo ops like pop().
     1021    void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
     1022    {
     1023        if (mask.m_value == -1)
     1024            m_assembler.cmpl_im(0, address.offset, address.base);
     1025        else
     1026            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
     1027        m_assembler.setCC_r(cond, dest);
     1028        m_assembler.movzbl_rr(dest, dest);
     1029    }
     1030};
     1031
     1032
     1033#if PLATFORM(X86_64)
     1034
     1035class MacroAssemblerX86_64 : public MacroAssemblerX86Common {
     1036protected:
     1037    static const X86::RegisterID scratchRegister = X86::r11;
     1038
     1039public:
     1040    static const Scale ScalePtr = TimesEight;
     1041
     1042    using MacroAssemblerX86Common::add32;
     1043    using MacroAssemblerX86Common::sub32;
     1044    using MacroAssemblerX86Common::load32;
     1045    using MacroAssemblerX86Common::store32;
     1046
     1047    void add32(Imm32 imm, AbsoluteAddress address)
     1048    {
     1049        move(ImmPtr(address.m_ptr), scratchRegister);
     1050        add32(imm, Address(scratchRegister));
     1051    }
     1052   
     1053    void sub32(Imm32 imm, AbsoluteAddress address)
     1054    {
     1055        move(ImmPtr(address.m_ptr), scratchRegister);
     1056        sub32(imm, Address(scratchRegister));
     1057    }
     1058
     1059    void load32(void* address, RegisterID dest)
     1060    {
     1061        if (dest == X86::eax)
     1062            m_assembler.movl_mEAX(address);
     1063        else {
     1064            move(X86::eax, dest);
     1065            m_assembler.movl_mEAX(address);
     1066            swap(X86::eax, dest);
     1067        }
     1068    }
     1069
     1070    void store32(Imm32 imm, void* address)
     1071    {
     1072        move(X86::eax, scratchRegister);
     1073        move(imm, X86::eax);
     1074        m_assembler.movl_EAXm(address);
     1075        move(scratchRegister, X86::eax);
     1076    }
     1077
     1078
     1079
     1080    void addPtr(RegisterID src, RegisterID dest)
     1081    {
     1082        m_assembler.addq_rr(src, dest);
     1083    }
     1084
     1085    void addPtr(Imm32 imm, RegisterID srcDest)
     1086    {
     1087        m_assembler.addq_ir(imm.m_value, srcDest);
     1088    }
     1089
     1090    void addPtr(ImmPtr imm, RegisterID dest)
     1091    {
     1092        move(imm, scratchRegister);
     1093        m_assembler.addq_rr(scratchRegister, dest);
     1094    }
     1095
     1096    void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
     1097    {
     1098        m_assembler.leal_mr(imm.m_value, src, dest);
     1099    }
     1100
     1101    void andPtr(RegisterID src, RegisterID dest)
     1102    {
     1103        m_assembler.andq_rr(src, dest);
     1104    }
     1105
     1106    void andPtr(Imm32 imm, RegisterID srcDest)
     1107    {
     1108        m_assembler.andq_ir(imm.m_value, srcDest);
     1109    }
     1110
    6171111    void orPtr(RegisterID src, RegisterID dest)
    6181112    {
    619 #if PLATFORM(X86_64)
    6201113        m_assembler.orq_rr(src, dest);
    621 #else
    622         or32(src, dest);
    623 #endif
    6241114    }
    6251115
    6261116    void orPtr(ImmPtr imm, RegisterID dest)
    6271117    {
    628 #if PLATFORM(X86_64)
    6291118        move(imm, scratchRegister);
    6301119        m_assembler.orq_rr(scratchRegister, dest);
    631 #else
    632         or32(Imm32(imm), dest);
    633 #endif
    6341120    }
    6351121
    6361122    void orPtr(Imm32 imm, RegisterID dest)
    6371123    {
    638 #if PLATFORM(X86_64)
    6391124        m_assembler.orq_ir(imm.m_value, dest);
    640 #else
    641         or32(imm, dest);
    642 #endif
    643     }
    644 
    645     void or32(RegisterID src, RegisterID dest)
    646     {
    647         m_assembler.orl_rr(src, dest);
    648     }
    649 
    650     void or32(Imm32 imm, RegisterID dest)
    651     {
    652         m_assembler.orl_ir(imm.m_value, dest);
    6531125    }
    6541126
    6551127    void rshiftPtr(RegisterID shift_amount, RegisterID dest)
    6561128    {
    657 #if PLATFORM(X86_64)
    6581129        // On x86 we can only shift by ecx; if asked to shift by another register we'll
    6591130        // need rejig the shift amount into ecx first, and restore the registers afterwards.
     
    6741145        } else
    6751146            m_assembler.sarq_CLr(dest);
    676 #else
    677         rshift32(shift_amount, dest);
    678 #endif
    6791147    }
    6801148
    6811149    void rshiftPtr(Imm32 imm, RegisterID dest)
    6821150    {
    683 #if PLATFORM(X86_64)
    6841151        m_assembler.sarq_i8r(imm.m_value, dest);
    685 #else
    686         rshift32(imm, dest);
    687 #endif
    688     }
    689 
    690     void rshift32(RegisterID shift_amount, RegisterID dest)
    691     {
    692         // On x86 we can only shift by ecx; if asked to shift by another register we'll
    693         // need rejig the shift amount into ecx first, and restore the registers afterwards.
    694         if (shift_amount != X86::ecx) {
    695             swap(shift_amount, X86::ecx);
    696 
    697             // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
    698             if (dest == shift_amount)
    699                 m_assembler.sarl_CLr(X86::ecx);
    700             // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
    701             else if (dest == X86::ecx)
    702                 m_assembler.sarl_CLr(shift_amount);
    703             // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
    704             else
    705                 m_assembler.sarl_CLr(dest);
    706        
    707             swap(shift_amount, X86::ecx);
    708         } else
    709             m_assembler.sarl_CLr(dest);
    710     }
    711 
    712     void rshift32(Imm32 imm, RegisterID dest)
    713     {
    714         m_assembler.sarl_i8r(imm.m_value, dest);
    7151152    }
    7161153
    7171154    void subPtr(RegisterID src, RegisterID dest)
    7181155    {
    719 #if PLATFORM(X86_64)
    7201156        m_assembler.subq_rr(src, dest);
    721 #else
    722         sub32(src, dest);
    723 #endif
    7241157    }
    7251158   
    7261159    void subPtr(Imm32 imm, RegisterID dest)
    7271160    {
    728 #if PLATFORM(X86_64)
    7291161        m_assembler.subq_ir(imm.m_value, dest);
    730 #else
    731         sub32(imm, dest);
    732 #endif
    7331162    }
    7341163   
    7351164    void subPtr(ImmPtr imm, RegisterID dest)
    7361165    {
    737 #if PLATFORM(X86_64)
    7381166        move(imm, scratchRegister);
    7391167        m_assembler.subq_rr(scratchRegister, dest);
    740 #else
    741         sub32(Imm32(imm), dest);
    742 #endif
    743     }
    744 
    745     void sub32(RegisterID src, RegisterID dest)
    746     {
    747         m_assembler.subl_rr(src, dest);
    748     }
    749    
    750     void sub32(Imm32 imm, RegisterID dest)
    751     {
    752         m_assembler.subl_ir(imm.m_value, dest);
    753     }
    754    
    755     void sub32(Imm32 imm, Address address)
    756     {
    757         m_assembler.subl_im(imm.m_value, address.offset, address.base);
    758     }
    759 
    760     void sub32(Imm32 imm, AbsoluteAddress address)
    761     {
    762 #if PLATFORM(X86_64)
    763         move(ImmPtr(address.m_ptr), scratchRegister);
    764         sub32(imm, Address(scratchRegister));
    765 #else
    766         m_assembler.subl_im(imm.m_value, address.m_ptr);
    767 #endif
    768     }
    769 
    770     void sub32(Address src, RegisterID dest)
    771     {
    772         m_assembler.subl_mr(src.offset, src.base, dest);
    7731168    }
    7741169
    7751170    void xorPtr(RegisterID src, RegisterID dest)
    7761171    {
    777 #if PLATFORM(X86_64)
    7781172        m_assembler.xorq_rr(src, dest);
    779 #else
    780         xor32(src, dest);
    781 #endif
    7821173    }
    7831174
    7841175    void xorPtr(Imm32 imm, RegisterID srcDest)
    7851176    {
    786 #if PLATFORM(X86_64)
    7871177        m_assembler.xorq_ir(imm.m_value, srcDest);
    788 #else
    789         xor32(imm, srcDest);
    790 #endif
    791     }
    792 
    793     void xor32(RegisterID src, RegisterID dest)
    794     {
    795         m_assembler.xorl_rr(src, dest);
    796     }
    797 
    798     void xor32(Imm32 imm, RegisterID srcDest)
    799     {
    800         m_assembler.xorl_ir(imm.m_value, srcDest);
    801     }
    802    
    803 
    804     // Memory access operations:
    805     //
    806     // Loads are of the form load(address, destination) and stores of the form
    807     // store(source, address).  The source for a store may be an Imm32.  Address
    808     // operand objects to loads and store will be implicitly constructed if a
    809     // register is passed.
     1178    }
     1179
    8101180
    8111181    void loadPtr(ImplicitAddress address, RegisterID dest)
    8121182    {
    813 #if PLATFORM(X86_64)
    8141183        m_assembler.movq_mr(address.offset, address.base, dest);
    815 #else
    816         load32(address, dest);
    817 #endif
    818     }
    819 
    820     DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
    821     {
    822 #if PLATFORM(X86_64)
    823         m_assembler.movq_mr_disp32(address.offset, address.base, dest);
    824         return DataLabel32(this);
    825 #else
    826         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
    827         return DataLabel32(this);
    828 #endif
    8291184    }
    8301185
    8311186    void loadPtr(BaseIndex address, RegisterID dest)
    8321187    {
    833 #if PLATFORM(X86_64)
    8341188        m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest);
    835 #else
    836         load32(address, dest);
    837 #endif
    8381189    }
    8391190
    8401191    void loadPtr(void* address, RegisterID dest)
    8411192    {
    842 #if PLATFORM(X86_64)
    8431193        if (dest == X86::eax)
    8441194            m_assembler.movq_mEAX(address);
     
    8481198            swap(X86::eax, dest);
    8491199        }
    850 #else
    851         load32(address, dest);
    852 #endif
    853     }
    854 
    855     void load32(ImplicitAddress address, RegisterID dest)
    856     {
    857         m_assembler.movl_mr(address.offset, address.base, dest);
    858     }
    859 
    860     void load32(BaseIndex address, RegisterID dest)
    861     {
    862         m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
    863     }
    864 
    865     void load32(void* address, RegisterID dest)
    866     {
    867 #if PLATFORM(X86_64)
    868         if (dest == X86::eax)
    869             m_assembler.movl_mEAX(address);
    870         else {
    871             move(X86::eax, dest);
    872             m_assembler.movl_mEAX(address);
    873             swap(X86::eax, dest);
    874         }
    875 #else
    876         m_assembler.movl_mr(address, dest);
    877 #endif
    878     }
    879 
    880     void load16(BaseIndex address, RegisterID dest)
    881     {
    882         m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
     1200    }
     1201
     1202    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
     1203    {
     1204        m_assembler.movq_mr_disp32(address.offset, address.base, dest);
     1205        return DataLabel32(this);
    8831206    }
    8841207
    8851208    void storePtr(RegisterID src, ImplicitAddress address)
    8861209    {
    887 #if PLATFORM(X86_64)
    8881210        m_assembler.movq_rm(src, address.offset, address.base);
    889 #else
    890         store32(src, address);
    891 #endif
     1211    }
     1212
     1213    void storePtr(RegisterID src, BaseIndex address)
     1214    {
     1215        m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
     1216    }
     1217
     1218    void storePtr(ImmPtr imm, ImplicitAddress address)
     1219    {
     1220        move(imm, scratchRegister);
     1221        storePtr(scratchRegister, address);
    8921222    }
    8931223
    8941224    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
    8951225    {
    896 #if PLATFORM(X86_64)
    8971226        m_assembler.movq_rm_disp32(src, address.offset, address.base);
    8981227        return DataLabel32(this);
    899 #else
    900         m_assembler.movl_rm_disp32(src, address.offset, address.base);
    901         return DataLabel32(this);
    902 #endif
    903     }
    904 
    905     void storePtr(RegisterID src, BaseIndex address)
    906     {
    907 #if PLATFORM(X86_64)
    908         m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale);
    909 #else
    910         store32(src, address);
    911 #endif
    912     }
    913 
    914     void storePtr(ImmPtr imm, ImplicitAddress address)
    915     {
    916 #if PLATFORM(X86_64)
    917         move(imm, scratchRegister);
    918         storePtr(scratchRegister, address);
    919 #else
    920         m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base);
    921 #endif
    922     }
    923 
    924 #if !PLATFORM(X86_64)
    925     void storePtr(ImmPtr imm, void* address)
    926     {
    927         store32(Imm32(imm), address);
    928     }
    929 #endif
    930 
    931     DataLabelPtr storePtrWithPatch(Address address)
    932     {
    933 #if PLATFORM(X86_64)
    934         m_assembler.movq_i64r(0, scratchRegister);
    935         DataLabelPtr label(this);
    936         storePtr(scratchRegister, address);
    937         return label;
    938 #else
    939         m_assembler.movl_i32m(0, address.offset, address.base);
    940         return DataLabelPtr(this);
    941 #endif
    942     }
    943 
    944     void store32(RegisterID src, ImplicitAddress address)
    945     {
    946         m_assembler.movl_rm(src, address.offset, address.base);
    947     }
    948 
    949     void store32(RegisterID src, BaseIndex address)
    950     {
    951         m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
    952     }
    953 
    954     void store32(Imm32 imm, ImplicitAddress address)
    955     {
    956         m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
    957     }
    958    
    959     void store32(Imm32 imm, void* address)
    960     {
    961 #if PLATFORM(X86_64)
    962         move(X86::eax, scratchRegister);
    963         move(imm, X86::eax);
    964         m_assembler.movl_EAXm(address);
    965         move(scratchRegister, X86::eax);
    966 #else
    967         m_assembler.movl_i32m(imm.m_value, address);
    968 #endif
    969     }
    970 
    971 
    972     // Stack manipulation operations:
    973     //
    974     // The ABI is assumed to provide a stack abstraction to memory,
    975     // containing machine word sized units of data.  Push and pop
    976     // operations add and remove a single register sized unit of data
    977     // to or from the stack.  Peek and poke operations read or write
    978     // values on the stack, without moving the current stack position.
    979    
    980     void pop(RegisterID dest)
    981     {
    982         m_assembler.pop_r(dest);
    983     }
    984 
    985     void push(RegisterID src)
    986     {
    987         m_assembler.push_r(src);
    988     }
    989 
    990     void push(Address address)
    991     {
    992         m_assembler.push_m(address.offset, address.base);
    993     }
    994 
    995     void push(Imm32 imm)
    996     {
    997         m_assembler.push_i32(imm.m_value);
    998     }
    999 
    1000     void pop()
    1001     {
    1002         addPtr(Imm32(sizeof(void*)), X86::esp);
    1003     }
    1004    
    1005     void peek(RegisterID dest, int index = 0)
    1006     {
    1007         loadPtr(Address(X86::esp, (index * sizeof(void *))), dest);
    1008     }
    1009 
    1010     void poke(RegisterID src, int index = 0)
    1011     {
    1012         storePtr(src, Address(X86::esp, (index * sizeof(void *))));
    1013     }
    1014 
    1015     void poke(Imm32 value, int index = 0)
    1016     {
    1017         store32(value, Address(X86::esp, (index * sizeof(void *))));
    1018     }
    1019 
    1020     void poke(ImmPtr imm, int index = 0)
    1021     {
    1022         storePtr(imm, Address(X86::esp, (index * sizeof(void *))));
    1023     }
    1024 
    1025     // Register move operations:
    1026     //
    1027     // Move values in registers.
    1028 
    1029     void move(Imm32 imm, RegisterID dest)
    1030     {
    1031         // Note: on 64-bit the Imm32 value is zero extended into the register, it
    1032         // may be useful to have a separate version that sign extends the value?
    1033         if (!imm.m_value)
    1034             m_assembler.xorl_rr(dest, dest);
    1035         else
    1036             m_assembler.movl_i32r(imm.m_value, dest);
    1037     }
    1038 
    1039     void move(RegisterID src, RegisterID dest)
    1040     {
    1041         // Note: on 64-bit this is is a full register move; perhaps it would be
    1042         // useful to have separate move32 & movePtr, with move32 zero extending?
    1043 #if PLATFORM(X86_64)
    1044         m_assembler.movq_rr(src, dest);
    1045 #else
    1046         m_assembler.movl_rr(src, dest);
    1047 #endif
    1048     }
    1049 
    1050     void move(ImmPtr imm, RegisterID dest)
    1051     {
    1052 #if PLATFORM(X86_64)
    1053         if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr()))
    1054             m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest);
    1055         else
    1056             m_assembler.movq_i64r(imm.asIntptr(), dest);
    1057 #else
    1058         m_assembler.movl_i32r(imm.asIntptr(), dest);
    1059 #endif
    1060     }
    1061 
    1062     void swap(RegisterID reg1, RegisterID reg2)
    1063     {
    1064 #if PLATFORM(X86_64)
    1065         m_assembler.xchgq_rr(reg1, reg2);
    1066 #else
    1067         m_assembler.xchgl_rr(reg1, reg2);
    1068 #endif
    1069     }
    1070 
    1071     void signExtend32ToPtr(RegisterID src, RegisterID dest)
    1072     {
    1073 #if PLATFORM(X86_64)
    1074         m_assembler.movsxd_rr(src, dest);
    1075 #else
    1076         if (src != dest)
    1077             move(src, dest);
    1078 #endif
    1079     }
    1080 
    1081     void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
    1082     {
    1083 #if PLATFORM(X86_64)
    1084         m_assembler.movl_rr(src, dest);
    1085 #else
    1086         if (src != dest)
    1087             move(src, dest);
    1088 #endif
    1089     }
    1090 
    1091 
    1092     // Forwards / external control flow operations:
    1093     //
    1094     // This set of jump and conditional branch operations return a Jump
    1095     // object which may linked at a later point, allow forwards jump,
    1096     // or jumps that will require external linkage (after the code has been
    1097     // relocated).
    1098     //
    1099     // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
    1100     // respecitvely, for unsigned comparisons the names b, a, be, and ae are
    1101     // used (representing the names 'below' and 'above').
    1102     //
    1103     // Operands to the comparision are provided in the expected order, e.g.
    1104     // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
    1105     // treated as a signed 32bit value, is less than or equal to 5.
    1106     //
    1107     // jz and jnz test whether the first operand is equal to zero, and take
    1108     // an optional second operand of a mask under which to perform the test.
    1109 
    1110 public:
     1228    }
     1229
     1230
    11111231    Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
    11121232    {
    1113 #if PLATFORM(X86_64)
    11141233        m_assembler.cmpq_rr(right, left);
    11151234        return Jump(m_assembler.jCC(cond));
    1116 #else
    1117         return branch32(cond, left, right);
    1118 #endif
    11191235    }
    11201236
    11211237    Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
    11221238    {
    1123 #if PLATFORM(X86_64)
    11241239        intptr_t imm = right.asIntptr();
    11251240        if (CAN_SIGN_EXTEND_32_64(imm)) {
     
    11331248            return branchPtr(cond, left, scratchRegister);
    11341249        }
    1135 #else
    1136         return branch32(cond, left, Imm32(right));
    1137 #endif
    11381250    }
    11391251
    11401252    Jump branchPtr(Condition cond, RegisterID left, Address right)
    11411253    {
    1142 #if PLATFORM(X86_64)
    11431254        m_assembler.cmpq_mr(right.offset, right.base, left);
    11441255        return Jump(m_assembler.jCC(cond));
    1145 #else
    1146         return branch32(cond, left, right);
    1147 #endif
    11481256    }
    11491257
    11501258    Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
    11511259    {
    1152 #if PLATFORM(X86_64)
    11531260        move(ImmPtr(left.m_ptr), scratchRegister);
    11541261        return branchPtr(cond, Address(scratchRegister), right);
    1155 #else
    1156         m_assembler.cmpl_rm(right, left.m_ptr);
    1157         return Jump(m_assembler.jCC(cond));
    1158 #endif
    11591262    }
    11601263
    11611264    Jump branchPtr(Condition cond, Address left, RegisterID right)
    11621265    {
    1163 #if PLATFORM(X86_64)
    11641266        m_assembler.cmpq_rm(right, left.offset, left.base);
    11651267        return Jump(m_assembler.jCC(cond));
    1166 #else
    1167         return branch32(cond, left, right);
    1168 #endif
    11691268    }
    11701269
    11711270    Jump branchPtr(Condition cond, Address left, ImmPtr right)
    11721271    {
    1173 #if PLATFORM(X86_64)
    11741272        move(right, scratchRegister);
    11751273        return branchPtr(cond, left, scratchRegister);
    1176 #else
    1177         return branch32(cond, left, Imm32(right));
    1178 #endif
    1179     }
    1180 
    1181 #if !PLATFORM(X86_64)
    1182     Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
    1183     {
    1184         m_assembler.cmpl_im(right.asIntptr(), left.m_ptr);
    1185         return Jump(m_assembler.jCC(cond));
    1186     }
    1187 #endif
    1188 
    1189     Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1190     {
    1191 #if PLATFORM(X86_64)
    1192         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1193         dataLabel = DataLabelPtr(this);
    1194         return branchPtr(cond, left, scratchRegister);
    1195 #else
    1196         m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
    1197         dataLabel = DataLabelPtr(this);
    1198         return Jump(m_assembler.jCC(cond));
    1199 #endif
    1200     }
    1201 
    1202     Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
    1203     {
    1204 #if PLATFORM(X86_64)
    1205         m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
    1206         dataLabel = DataLabelPtr(this);
    1207         return branchPtr(cond, left, scratchRegister);
    1208 #else
    1209         m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
    1210         dataLabel = DataLabelPtr(this);
    1211         return Jump(m_assembler.jCC(cond));
    1212 #endif
    1213     }
    1214 
    1215     Jump branch32(Condition cond, RegisterID left, RegisterID right)
    1216     {
    1217         m_assembler.cmpl_rr(right, left);
    1218         return Jump(m_assembler.jCC(cond));
    1219     }
    1220 
    1221     Jump branch32(Condition cond, RegisterID left, Imm32 right)
    1222     {
    1223         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1224             m_assembler.testl_rr(left, left);
    1225         else
    1226             m_assembler.cmpl_ir(right.m_value, left);
    1227         return Jump(m_assembler.jCC(cond));
    1228     }
    1229    
    1230     Jump branch32(Condition cond, RegisterID left, Address right)
    1231     {
    1232         m_assembler.cmpl_mr(right.offset, right.base, left);
    1233         return Jump(m_assembler.jCC(cond));
    1234     }
    1235    
    1236     Jump branch32(Condition cond, Address left, RegisterID right)
    1237     {
    1238         m_assembler.cmpl_rm(right, left.offset, left.base);
    1239         return Jump(m_assembler.jCC(cond));
    1240     }
    1241    
    1242     Jump branch32(Condition cond, Address left, Imm32 right)
    1243     {
    1244         m_assembler.cmpl_im(right.m_value, left.offset, left.base);
    1245         return Jump(m_assembler.jCC(cond));
    1246     }
    1247 
    1248     Jump branch16(Condition cond, BaseIndex left, RegisterID right)
    1249     {
    1250         m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale);
    1251         return Jump(m_assembler.jCC(cond));
    12521274    }
    12531275
    12541276    Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
    12551277    {
    1256 #if PLATFORM(X86_64)
    12571278        m_assembler.testq_rr(reg, mask);
    12581279        return Jump(m_assembler.jCC(cond));
    1259 #else
    1260         return branchTest32(cond, reg, mask);
    1261 #endif
    12621280    }
    12631281
    12641282    Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    12651283    {
    1266 #if PLATFORM(X86_64)
    12671284        // if we are only interested in the low seven bits, this can be tested with a testb
    12681285        if (mask.m_value == -1)
     
    12731290            m_assembler.testq_i32r(mask.m_value, reg);
    12741291        return Jump(m_assembler.jCC(cond));
    1275 #else
    1276         return branchTest32(cond, reg, mask);
    1277 #endif
    12781292    }
    12791293
    12801294    Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
    12811295    {
    1282 #if PLATFORM(X86_64)
    12831296        if (mask.m_value == -1)
    12841297            m_assembler.cmpq_im(0, address.offset, address.base);
     
    12861299            m_assembler.testq_i32m(mask.m_value, address.offset, address.base);
    12871300        return Jump(m_assembler.jCC(cond));
    1288 #else
    1289         return branchTest32(cond, address, mask);
    1290 #endif
    12911301    }
    12921302
    12931303    Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    12941304    {
    1295 #if PLATFORM(X86_64)
    12961305        if (mask.m_value == -1)
    12971306            m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale);
     
    12991308            m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    13001309        return Jump(m_assembler.jCC(cond));
    1301 #else
    1302         return branchTest32(cond, address, mask);
    1303 #endif
    1304     }
    1305 
    1306     Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
    1307     {
    1308         ASSERT((cond == Zero) || (cond == NonZero));
    1309         m_assembler.testl_rr(reg, mask);
    1310         return Jump(m_assembler.jCC(cond));
    1311     }
    1312 
    1313     Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
    1314     {
    1315         ASSERT((cond == Zero) || (cond == NonZero));
    1316         // if we are only interested in the low seven bits, this can be tested with a testb
    1317         if (mask.m_value == -1)
    1318             m_assembler.testl_rr(reg, reg);
    1319         else if ((mask.m_value & ~0x7f) == 0)
    1320             m_assembler.testb_i8r(mask.m_value, reg);
    1321         else
    1322             m_assembler.testl_i32r(mask.m_value, reg);
    1323         return Jump(m_assembler.jCC(cond));
    1324     }
    1325 
    1326     Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
    1327     {
    1328         ASSERT((cond == Zero) || (cond == NonZero));
    1329         if (mask.m_value == -1)
    1330             m_assembler.cmpl_im(0, address.offset, address.base);
    1331         else
    1332             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1333         return Jump(m_assembler.jCC(cond));
    1334     }
    1335 
    1336     Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
    1337     {
    1338         ASSERT((cond == Zero) || (cond == NonZero));
    1339         if (mask.m_value == -1)
    1340             m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
    1341         else
    1342             m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
    1343         return Jump(m_assembler.jCC(cond));
    1344     }
    1345 
    1346     Jump jump()
    1347     {
    1348         return Jump(m_assembler.jmp());
    1349     }
    1350 
    1351 
    1352     // Backwards, local control flow operations:
    1353     //
    1354     // These operations provide a shorter notation for local
    1355     // backwards branches, which may be both more convenient
    1356     // for the user, and for the programmer, and for the
    1357     // assembler (allowing shorter values to be used in
    1358     // relative offsets).
    1359     //
    1360     // The code sequence:
    1361     //
    1362     //     Label topOfLoop(this);
    1363     //     // ...
    1364     //     jne32(reg1, reg2, topOfLoop);
    1365     //
    1366     // Is equivalent to the longer, potentially less efficient form:
    1367     //
    1368     //     Label topOfLoop(this);
    1369     //     // ...
    1370     //     jne32(reg1, reg2).linkTo(topOfLoop);
    1371 
    1372     void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
    1373     {
    1374         branchPtr(cond, op1, imm).linkTo(target, this);
    1375     }
    1376 
    1377     void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
    1378     {
    1379         branch32(cond, op1, op2).linkTo(target, this);
    1380     }
    1381 
    1382     void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
    1383     {
    1384         branch32(cond, op1, imm).linkTo(target, this);
    1385     }
    1386 
    1387     void branch32(Condition cond, RegisterID left, Address right, Label target)
    1388     {
    1389         branch32(cond, left, right).linkTo(target, this);
    1390     }
    1391 
    1392     void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
    1393     {
    1394         branch16(cond, left, right).linkTo(target, this);
    1395     }
    1396    
    1397     void branchTestPtr(Condition cond, RegisterID reg, Label target)
    1398     {
    1399         branchTestPtr(cond, reg).linkTo(target, this);
    1400     }
    1401 
    1402     void jump(Label target)
    1403     {
    1404         m_assembler.link(m_assembler.jmp(), target.m_label);
    1405     }
    1406 
    1407     void jump(RegisterID target)
    1408     {
    1409         m_assembler.jmp_r(target);
    1410     }
    1411 
    1412     // Address is a memory location containing the address to jump to
    1413     void jump(Address address)
    1414     {
    1415         m_assembler.jmp_m(address.offset, address.base);
    1416     }
    1417 
    1418 
    1419     // Arithmetic control flow operations:
    1420     //
    1421     // This set of conditional branch operations branch based
    1422     // on the result of an arithmetic operation.  The operation
    1423     // is performed as normal, storing the result.
    1424     //
    1425     // * jz operations branch if the result is zero.
    1426     // * jo operations branch if the (signed) arithmetic
    1427     //   operation caused an overflow to occur.
     1310    }
     1311
    14281312
    14291313    Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
     
    14331317        return Jump(m_assembler.jCC(cond));
    14341318    }
    1435    
    1436     Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
    1437     {
    1438         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1439         add32(src, dest);
    1440         return Jump(m_assembler.jCC(cond));
    1441     }
    1442    
    1443     Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
    1444     {
    1445         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1446         add32(imm, dest);
    1447         return Jump(m_assembler.jCC(cond));
    1448     }
    1449    
    1450     Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
    1451     {
    1452         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1453         mul32(src, dest);
    1454         return Jump(m_assembler.jCC(cond));
    1455     }
    1456    
    1457     Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
    1458     {
    1459         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
    1460         mul32(imm, src, dest);
    1461         return Jump(m_assembler.jCC(cond));
    1462     }
    1463    
     1319
    14641320    Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
    14651321    {
     
    14681324        return Jump(m_assembler.jCC(cond));
    14691325    }
    1470    
    1471     Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
    1472     {
    1473         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     1326
     1327    Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
     1328    {
     1329        m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
     1330        dataLabel = DataLabelPtr(this);
     1331        return branchPtr(cond, left, scratchRegister);
     1332    }
     1333
     1334    Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
     1335    {
     1336        m_assembler.movq_i64r(initialRightValue.asIntptr(), scratchRegister);
     1337        dataLabel = DataLabelPtr(this);
     1338        return branchPtr(cond, left, scratchRegister);
     1339    }
     1340
     1341    DataLabelPtr storePtrWithPatch(Address address)
     1342    {
     1343        m_assembler.movq_i64r(0, scratchRegister);
     1344        DataLabelPtr label(this);
     1345        storePtr(scratchRegister, address);
     1346        return label;
     1347    }
     1348};
     1349
     1350typedef MacroAssemblerX86_64 MacroAssemblerBase;
     1351
     1352#else
     1353
     1354class MacroAssemblerX86 : public MacroAssemblerX86Common {
     1355public:
     1356    static const Scale ScalePtr = TimesFour;
     1357
     1358    using MacroAssemblerX86Common::add32;
     1359    using MacroAssemblerX86Common::sub32;
     1360    using MacroAssemblerX86Common::load32;
     1361    using MacroAssemblerX86Common::store32;
     1362    using MacroAssemblerX86Common::branch32;
     1363
     1364    void add32(Imm32 imm, RegisterID src, RegisterID dest)
     1365    {
     1366        m_assembler.leal_mr(imm.m_value, src, dest);
     1367    }
     1368
     1369    void add32(Imm32 imm, AbsoluteAddress address)
     1370    {
     1371        m_assembler.addl_im(imm.m_value, address.m_ptr);
     1372    }
     1373   
     1374    void sub32(Imm32 imm, AbsoluteAddress address)
     1375    {
     1376        m_assembler.subl_im(imm.m_value, address.m_ptr);
     1377    }
     1378
     1379    void load32(void* address, RegisterID dest)
     1380    {
     1381        m_assembler.movl_mr(address, dest);
     1382    }
     1383
     1384    void store32(Imm32 imm, void* address)
     1385    {
     1386        m_assembler.movl_i32m(imm.m_value, address);
     1387    }
     1388
     1389    Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
     1390    {
     1391        m_assembler.cmpl_rm(right, left.m_ptr);
     1392        return Jump(m_assembler.jCC(cond));
     1393    }
     1394
     1395    Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
     1396    {
     1397        m_assembler.cmpl_im(right.m_value, left.m_ptr);
     1398        return Jump(m_assembler.jCC(cond));
     1399    }
     1400
     1401    Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
     1402    {
     1403        m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
     1404        dataLabel = DataLabelPtr(this);
     1405        return Jump(m_assembler.jCC(cond));
     1406    }
     1407
     1408    Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
     1409    {
     1410        m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
     1411        dataLabel = DataLabelPtr(this);
     1412        return Jump(m_assembler.jCC(cond));
     1413    }
     1414
     1415    DataLabelPtr storePtrWithPatch(Address address)
     1416    {
     1417        m_assembler.movl_i32m(0, address.offset, address.base);
     1418        return DataLabelPtr(this);
     1419    }
     1420};
     1421
     1422typedef MacroAssemblerX86 MacroAssemblerBase;
     1423
     1424#endif
     1425
     1426
     1427class MacroAssembler : public MacroAssemblerBase {
     1428public:
     1429
     1430    using MacroAssemblerBase::pop;
     1431    using MacroAssemblerBase::jump;
     1432    using MacroAssemblerBase::branch32;
     1433    using MacroAssemblerBase::branch16;
     1434#if PLATFORM(X86_64)
     1435    using MacroAssemblerBase::branchPtr;
     1436    using MacroAssemblerBase::branchTestPtr;
     1437#endif
     1438
     1439
     1440    // Platform agnostic onvenience functions,
     1441    // described in terms of other macro assembly methods.
     1442    void pop()
     1443    {
     1444        addPtr(Imm32(sizeof(void*)), stackPointerRegister);
     1445    }
     1446   
     1447    void peek(RegisterID dest, int index = 0)
     1448    {
     1449        loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest);
     1450    }
     1451
     1452    void poke(RegisterID src, int index = 0)
     1453    {
     1454        storePtr(src, Address(stackPointerRegister, (index * sizeof(void*))));
     1455    }
     1456
     1457    void poke(Imm32 value, int index = 0)
     1458    {
     1459        store32(value, Address(stackPointerRegister, (index * sizeof(void*))));
     1460    }
     1461
     1462    void poke(ImmPtr imm, int index = 0)
     1463    {
     1464        storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*))));
     1465    }
     1466
     1467
     1468    // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
     1469    void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target)
     1470    {
     1471        branchPtr(cond, op1, imm).linkTo(target, this);
     1472    }
     1473
     1474    void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target)
     1475    {
     1476        branch32(cond, op1, op2).linkTo(target, this);
     1477    }
     1478
     1479    void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target)
     1480    {
     1481        branch32(cond, op1, imm).linkTo(target, this);
     1482    }
     1483
     1484    void branch32(Condition cond, RegisterID left, Address right, Label target)
     1485    {
     1486        branch32(cond, left, right).linkTo(target, this);
     1487    }
     1488
     1489    void branch16(Condition cond, BaseIndex left, RegisterID right, Label target)
     1490    {
     1491        branch16(cond, left, right).linkTo(target, this);
     1492    }
     1493   
     1494    void branchTestPtr(Condition cond, RegisterID reg, Label target)
     1495    {
     1496        branchTestPtr(cond, reg).linkTo(target, this);
     1497    }
     1498
     1499    void jump(Label target)
     1500    {
     1501        jump().linkTo(target, this);
     1502    }
     1503
     1504
     1505    // Ptr methods
     1506    // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
     1507#if !PLATFORM(X86_64)
     1508    void addPtr(RegisterID src, RegisterID dest)
     1509    {
     1510        add32(src, dest);
     1511    }
     1512
     1513    void addPtr(Imm32 imm, RegisterID srcDest)
     1514    {
     1515        add32(imm, srcDest);
     1516    }
     1517
     1518    void addPtr(ImmPtr imm, RegisterID dest)
     1519    {
     1520        add32(Imm32(imm), dest);
     1521    }
     1522
     1523    void addPtr(Imm32 imm, RegisterID src, RegisterID dest)
     1524    {
     1525        add32(imm, src, dest);
     1526    }
     1527
     1528    void andPtr(RegisterID src, RegisterID dest)
     1529    {
     1530        and32(src, dest);
     1531    }
     1532
     1533    void andPtr(Imm32 imm, RegisterID srcDest)
     1534    {
     1535        and32(imm, srcDest);
     1536    }
     1537
     1538    void orPtr(RegisterID src, RegisterID dest)
     1539    {
     1540        or32(src, dest);
     1541    }
     1542
     1543    void orPtr(ImmPtr imm, RegisterID dest)
     1544    {
     1545        or32(Imm32(imm), dest);
     1546    }
     1547
     1548    void orPtr(Imm32 imm, RegisterID dest)
     1549    {
     1550        or32(imm, dest);
     1551    }
     1552
     1553    void rshiftPtr(RegisterID shift_amount, RegisterID dest)
     1554    {
     1555        rshift32(shift_amount, dest);
     1556    }
     1557
     1558    void rshiftPtr(Imm32 imm, RegisterID dest)
     1559    {
     1560        rshift32(imm, dest);
     1561    }
     1562
     1563    void subPtr(RegisterID src, RegisterID dest)
     1564    {
    14741565        sub32(src, dest);
    1475         return Jump(m_assembler.jCC(cond));
    1476     }
    1477    
    1478     Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
    1479     {
    1480         ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
     1566    }
     1567   
     1568    void subPtr(Imm32 imm, RegisterID dest)
     1569    {
    14811570        sub32(imm, dest);
    1482         return Jump(m_assembler.jCC(cond));
    1483     }
    1484    
    1485 
    1486     // Miscellaneous operations:
    1487 
    1488     void breakpoint()
    1489     {
    1490         m_assembler.int3();
    1491     }
    1492 
    1493     Jump call()
    1494     {
    1495         return Jump(m_assembler.call());
    1496     }
    1497 
    1498     // FIXME: why does this return a Jump object? - it can't be linked.
    1499     // This may be to get a reference to the return address of the call.
    1500     //
    1501     // This should probably be handled by a separate label type to a regular
    1502     // jump.  Todo: add a CallLabel type, for the regular call - can be linked
    1503     // like a jump (possibly a subclass of jump?, or possibly casts to a Jump).
    1504     // Also add a CallReturnLabel type for this to return (just a more JmpDsty
    1505     // form of label, can get the void* after the code has been linked, but can't
    1506     // try to link it like a Jump object), and let the CallLabel be cast into a
    1507     // CallReturnLabel.
    1508     Jump call(RegisterID target)
    1509     {
    1510         return Jump(m_assembler.call(target));
    1511     }
    1512 
    1513     Label label()
    1514     {
    1515         return Label(this);
    1516     }
    1517    
    1518     Label align()
    1519     {
    1520         m_assembler.align(16);
    1521         return Label(this);
    1522     }
    1523 
    1524     ptrdiff_t differenceBetween(Label from, Jump to)
    1525     {
    1526         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    1527     }
    1528 
    1529     ptrdiff_t differenceBetween(Label from, Label to)
    1530     {
    1531         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
    1532     }
    1533 
    1534     ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
    1535     {
    1536         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
    1537     }
    1538 
    1539     ptrdiff_t differenceBetween(Label from, DataLabel32 to)
    1540     {
    1541         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label);
    1542     }
    1543 
    1544     ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
    1545     {
    1546         return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
    1547     }
    1548 
    1549     void ret()
    1550     {
    1551         m_assembler.ret();
    1552     }
    1553 
    1554     void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
    1555     {
    1556         m_assembler.cmpl_rr(right, left);
    1557         m_assembler.setCC_r(cond, dest);
    1558         m_assembler.movzbl_rr(dest, dest);
    1559     }
    1560 
    1561     void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest)
    1562     {
    1563         if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
    1564             m_assembler.testl_rr(left, left);
    1565         else
    1566             m_assembler.cmpl_ir(right.m_value, left);
    1567         m_assembler.setCC_r(cond, dest);
    1568         m_assembler.movzbl_rr(dest, dest);
    1569     }
    1570 
    1571     // FIXME:
    1572     // The mask should be optional... paerhaps the argument order should be
    1573     // dest-src, operations always have a dest? ... possibly not true, considering
    1574     // asm ops like test, or pseudo ops like pop().
    1575     void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest)
    1576     {
    1577         if (mask.m_value == -1)
    1578             m_assembler.cmpl_im(0, address.offset, address.base);
    1579         else
    1580             m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
    1581         m_assembler.setCC_r(cond, dest);
    1582         m_assembler.movzbl_rr(dest, dest);
    1583     }
     1571    }
     1572   
     1573    void subPtr(ImmPtr imm, RegisterID dest)
     1574    {
     1575        sub32(Imm32(imm), dest);
     1576    }
     1577
     1578    void xorPtr(RegisterID src, RegisterID dest)
     1579    {
     1580        xor32(src, dest);
     1581    }
     1582
     1583    void xorPtr(Imm32 imm, RegisterID srcDest)
     1584    {
     1585        xor32(imm, srcDest);
     1586    }
     1587
     1588
     1589    void loadPtr(ImplicitAddress address, RegisterID dest)
     1590    {
     1591        load32(address, dest);
     1592    }
     1593
     1594    void loadPtr(BaseIndex address, RegisterID dest)
     1595    {
     1596        load32(address, dest);
     1597    }
     1598
     1599    void loadPtr(void* address, RegisterID dest)
     1600    {
     1601        load32(address, dest);
     1602    }
     1603
     1604    DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
     1605    {
     1606        return load32WithAddressOffsetPatch(address, dest);
     1607    }
     1608
     1609    void storePtr(RegisterID src, ImplicitAddress address)
     1610    {
     1611        store32(src, address);
     1612    }
     1613
     1614    void storePtr(RegisterID src, BaseIndex address)
     1615    {
     1616        store32(src, address);
     1617    }
     1618
     1619    void storePtr(ImmPtr imm, ImplicitAddress address)
     1620    {
     1621        store32(Imm32(imm), address);
     1622    }
     1623
     1624    void storePtr(ImmPtr imm, void* address)
     1625    {
     1626        store32(Imm32(imm), address);
     1627    }
     1628
     1629    DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
     1630    {
     1631        return store32WithAddressOffsetPatch(src, address);
     1632    }
     1633
     1634
     1635    Jump branchPtr(Condition cond, RegisterID left, RegisterID right)
     1636    {
     1637        return branch32(cond, left, right);
     1638    }
     1639
     1640    Jump branchPtr(Condition cond, RegisterID left, ImmPtr right)
     1641    {
     1642        return branch32(cond, left, Imm32(right));
     1643    }
     1644
     1645    Jump branchPtr(Condition cond, RegisterID left, Address right)
     1646    {
     1647        return branch32(cond, left, right);
     1648    }
     1649
     1650    Jump branchPtr(Condition cond, Address left, RegisterID right)
     1651    {
     1652        return branch32(cond, left, right);
     1653    }
     1654
     1655    Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right)
     1656    {
     1657        return branch32(cond, left, right);
     1658    }
     1659
     1660    Jump branchPtr(Condition cond, Address left, ImmPtr right)
     1661    {
     1662        return branch32(cond, left, Imm32(right));
     1663    }
     1664
     1665    Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right)
     1666    {
     1667        return branch32(cond, left, Imm32(right));
     1668    }
     1669
     1670    Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask)
     1671    {
     1672        return branchTest32(cond, reg, mask);
     1673    }
     1674
     1675    Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
     1676    {
     1677        return branchTest32(cond, reg, mask);
     1678    }
     1679
     1680    Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1))
     1681    {
     1682        return branchTest32(cond, address, mask);
     1683    }
     1684
     1685    Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
     1686    {
     1687        return branchTest32(cond, address, mask);
     1688    }
     1689
     1690
     1691    Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
     1692    {
     1693        return branchAdd32(cond, src, dest);
     1694    }
     1695
     1696    Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
     1697    {
     1698        return branchSub32(cond, imm, dest);
     1699    }
     1700#endif
     1701
    15841702};
    15851703
Note: See TracChangeset for help on using the changeset viewer.