Changeset 52729 in webkit for trunk/JavaScriptCore/assembler
- Timestamp:
- Jan 4, 2010, 3:38:56 AM (15 years ago)
- Location:
- trunk/JavaScriptCore/assembler
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/assembler/ARMAssembler.cpp
r50553 r52729 27 27 #include "config.h" 28 28 29 #if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 30 30 31 31 #include "ARMAssembler.h" … … 403 403 } // namespace JSC 404 404 405 #endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)405 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) -
trunk/JavaScriptCore/assembler/ARMAssembler.h
r51067 r52729 30 30 #include <wtf/Platform.h> 31 31 32 #if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)32 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 33 33 34 34 #include "AssemblerBufferWithConstantPool.h" … … 813 813 } // namespace JSC 814 814 815 #endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)815 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 816 816 817 817 #endif // ARMAssembler_h -
trunk/JavaScriptCore/assembler/ARMv7Assembler.h
r50539 r52729 29 29 #include <wtf/Platform.h> 30 30 31 #if ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)31 #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) 32 32 33 33 #include "AssemblerBuffer.h" … … 1833 1833 } // namespace JSC 1834 1834 1835 #endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_THUMB2)1835 #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) 1836 1836 1837 1837 #endif // ARMAssembler_h -
trunk/JavaScriptCore/assembler/AbstractMacroAssembler.h
r47186 r52729 174 174 explicit Imm32(int32_t value) 175 175 : m_value(value) 176 #if PLATFORM(ARM)176 #if CPU(ARM) 177 177 , m_isPointer(false) 178 178 #endif … … 180 180 } 181 181 182 #if ! PLATFORM(X86_64)182 #if !CPU(X86_64) 183 183 explicit Imm32(ImmPtr ptr) 184 184 : m_value(ptr.asIntptr()) 185 #if PLATFORM(ARM)185 #if CPU(ARM) 186 186 , m_isPointer(true) 187 187 #endif … … 191 191 192 192 int32_t m_value; 193 #if PLATFORM(ARM)193 #if CPU(ARM) 194 194 // We rely on being able to regenerate code to recover exception handling 195 195 // information. Since ARMv7 supports 16-bit immediates there is a danger -
trunk/JavaScriptCore/assembler/MacroAssembler.h
r50595 r52729 31 31 #if ENABLE(ASSEMBLER) 32 32 33 #if PLATFORM(ARM_THUMB2)33 #if CPU(ARM_THUMB2) 34 34 #include "MacroAssemblerARMv7.h" 35 35 namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; 36 36 37 #elif PLATFORM(ARM_TRADITIONAL)37 #elif CPU(ARM_TRADITIONAL) 38 38 #include "MacroAssemblerARM.h" 39 39 namespace JSC { typedef MacroAssemblerARM MacroAssemblerBase; }; 40 40 41 #elif PLATFORM(X86)41 #elif CPU(X86) 42 42 #include "MacroAssemblerX86.h" 43 43 namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; 44 44 45 #elif PLATFORM(X86_64)45 #elif CPU(X86_64) 46 46 #include "MacroAssemblerX86_64.h" 47 47 namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; … … 61 61 using MacroAssemblerBase::branch32; 62 62 using MacroAssemblerBase::branch16; 63 #if PLATFORM(X86_64)63 #if CPU(X86_64) 64 64 using MacroAssemblerBase::branchPtr; 65 65 using MacroAssemblerBase::branchTestPtr; … … 134 134 // Ptr methods 135 135 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. 136 #if !PLATFORM(X86_64) 136 // FIXME: should this use a test for 32-bitness instead of this specific exception? 137 #if !CPU(X86_64) 137 138 void addPtr(RegisterID src, RegisterID dest) 138 139 { -
trunk/JavaScriptCore/assembler/MacroAssemblerARM.cpp
r48782 r52729 27 27 #include "config.h" 28 28 29 #if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)29 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 30 30 31 31 #include "MacroAssemblerARM.h" … … 63 63 const bool MacroAssemblerARM::s_isVFPPresent = isVFPPresent(); 64 64 65 #if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT 65 #if CPU(ARMV5_OR_LOWER) 66 /* On ARMv5 and below, natural alignment is required. */ 66 67 void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) 67 68 { … … 92 93 } 93 94 94 #endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)95 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) -
trunk/JavaScriptCore/assembler/MacroAssemblerARM.h
r51067 r52729 31 31 #include <wtf/Platform.h> 32 32 33 #if ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)33 #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 34 34 35 35 #include "ARMAssembler.h" … … 225 225 } 226 226 227 #if defined(ARM_REQUIRE_NATURAL_ALIGNMENT) && ARM_REQUIRE_NATURAL_ALIGNMENT227 #if CPU(ARMV5_OR_LOWER) 228 228 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest); 229 229 #else … … 929 929 } 930 930 931 #endif // ENABLE(ASSEMBLER) && PLATFORM(ARM_TRADITIONAL)931 #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) 932 932 933 933 #endif // MacroAssemblerARM_h -
trunk/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
r49509 r52729 38 38 // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid 39 39 // instruction address on the platform (for example, check any alignment requirements). 40 #if PLATFORM(ARM_THUMB2)40 #if CPU(ARM_THUMB2) 41 41 // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded 42 42 // into the processor are decorated with the bottom bit set, indicating that this is … … 131 131 132 132 explicit MacroAssemblerCodePtr(void* value) 133 #if PLATFORM(ARM_THUMB2)133 #if CPU(ARM_THUMB2) 134 134 // Decorate the pointer as a thumb code pointer. 135 135 : m_value(reinterpret_cast<char*>(value) + 1) … … 148 148 149 149 void* executableAddress() const { return m_value; } 150 #if PLATFORM(ARM_THUMB2)150 #if CPU(ARM_THUMB2) 151 151 // To use this pointer as a data address remove the decoration. 152 152 void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; } -
trunk/JavaScriptCore/assembler/MacroAssemblerX86.h
r46598 r52729 29 29 #include <wtf/Platform.h> 30 30 31 #if ENABLE(ASSEMBLER) && PLATFORM(X86)31 #if ENABLE(ASSEMBLER) && CPU(X86) 32 32 33 33 #include "MacroAssemblerX86Common.h" -
trunk/JavaScriptCore/assembler/MacroAssemblerX86Common.h
r50595 r52729 543 543 } 544 544 545 #if PLATFORM(X86_64)545 #if CPU(X86_64) 546 546 void move(RegisterID src, RegisterID dest) 547 547 { … … 945 945 friend class MacroAssemblerX86; 946 946 947 #if PLATFORM(X86)947 #if CPU(X86) 948 948 #if PLATFORM(MAC) 949 949 … … 998 998 999 999 #endif // PLATFORM(MAC) 1000 #elif !defined(NDEBUG) // PLATFORM(X86)1000 #elif !defined(NDEBUG) // CPU(X86) 1001 1001 1002 1002 // On x86-64 we should never be checking for SSE2 in a non-debug build, -
trunk/JavaScriptCore/assembler/MacroAssemblerX86_64.h
r50595 r52729 29 29 #include <wtf/Platform.h> 30 30 31 #if ENABLE(ASSEMBLER) && PLATFORM(X86_64)31 #if ENABLE(ASSEMBLER) && CPU(X86_64) 32 32 33 33 #include "MacroAssemblerX86Common.h" -
trunk/JavaScriptCore/assembler/X86Assembler.h
r47834 r52729 29 29 #include <wtf/Platform.h> 30 30 31 #if ENABLE(ASSEMBLER) && ( PLATFORM(X86) || PLATFORM(X86_64))31 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) 32 32 33 33 #include "AssemblerBuffer.h" … … 51 51 edi, 52 52 53 #if PLATFORM(X86_64)53 #if CPU(X86_64) 54 54 r8, 55 55 r9, … … 119 119 OP_CMP_EvGv = 0x39, 120 120 OP_CMP_GvEv = 0x3B, 121 #if PLATFORM(X86_64)121 #if CPU(X86_64) 122 122 PRE_REX = 0x40, 123 123 #endif 124 124 OP_PUSH_EAX = 0x50, 125 125 OP_POP_EAX = 0x58, 126 #if PLATFORM(X86_64)126 #if CPU(X86_64) 127 127 OP_MOVSXD_GvEv = 0x63, 128 128 #endif … … 297 297 // Arithmetic operations: 298 298 299 #if ! PLATFORM(X86_64)299 #if !CPU(X86_64) 300 300 void adcl_im(int imm, void* addr) 301 301 { … … 347 347 } 348 348 349 #if PLATFORM(X86_64)349 #if CPU(X86_64) 350 350 void addq_rr(RegisterID src, RegisterID dst) 351 351 { … … 424 424 } 425 425 426 #if PLATFORM(X86_64)426 #if CPU(X86_64) 427 427 void andq_rr(RegisterID src, RegisterID dst) 428 428 { … … 510 510 } 511 511 512 #if PLATFORM(X86_64)512 #if CPU(X86_64) 513 513 void orq_rr(RegisterID src, RegisterID dst) 514 514 { … … 576 576 } 577 577 578 #if PLATFORM(X86_64)578 #if CPU(X86_64) 579 579 void subq_rr(RegisterID src, RegisterID dst) 580 580 { … … 642 642 } 643 643 644 #if PLATFORM(X86_64)644 #if CPU(X86_64) 645 645 void xorq_rr(RegisterID src, RegisterID dst) 646 646 { … … 690 690 } 691 691 692 #if PLATFORM(X86_64)692 #if CPU(X86_64) 693 693 void sarq_CLr(RegisterID dst) 694 694 { … … 790 790 } 791 791 792 #if PLATFORM(X86_64)792 #if CPU(X86_64) 793 793 void cmpq_rr(RegisterID src, RegisterID dst) 794 794 { … … 898 898 } 899 899 900 #if PLATFORM(X86_64)900 #if CPU(X86_64) 901 901 void testq_rr(RegisterID src, RegisterID dst) 902 902 { … … 972 972 } 973 973 974 #if PLATFORM(X86_64)974 #if CPU(X86_64) 975 975 void xchgq_rr(RegisterID src, RegisterID dst) 976 976 { … … 1002 1002 { 1003 1003 m_formatter.oneByteOp(OP_MOV_EAXOv); 1004 #if PLATFORM(X86_64)1004 #if CPU(X86_64) 1005 1005 m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); 1006 1006 #else … … 1039 1039 { 1040 1040 m_formatter.oneByteOp(OP_MOV_OvEAX); 1041 #if PLATFORM(X86_64)1041 #if CPU(X86_64) 1042 1042 m_formatter.immediate64(reinterpret_cast<int64_t>(addr)); 1043 1043 #else … … 1046 1046 } 1047 1047 1048 #if PLATFORM(X86_64)1048 #if CPU(X86_64) 1049 1049 void movq_rr(RegisterID src, RegisterID dst) 1050 1050 { … … 1158 1158 m_formatter.oneByteOp(OP_LEA, dst, base, offset); 1159 1159 } 1160 #if PLATFORM(X86_64)1160 #if CPU(X86_64) 1161 1161 void leaq_mr(int offset, RegisterID base, RegisterID dst) 1162 1162 { … … 1324 1324 } 1325 1325 1326 #if ! PLATFORM(X86_64)1326 #if !CPU(X86_64) 1327 1327 void cvtsi2sd_mr(void* address, XMMRegisterID dst) 1328 1328 { … … 1344 1344 } 1345 1345 1346 #if PLATFORM(X86_64)1346 #if CPU(X86_64) 1347 1347 void movq_rr(XMMRegisterID src, RegisterID dst) 1348 1348 { … … 1370 1370 } 1371 1371 1372 #if ! PLATFORM(X86_64)1372 #if !CPU(X86_64) 1373 1373 void movsd_mr(void* address, XMMRegisterID dst) 1374 1374 { … … 1536 1536 static void repatchLoadPtrToLEA(void* where) 1537 1537 { 1538 #if PLATFORM(X86_64)1538 #if CPU(X86_64) 1539 1539 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix. 1540 1540 // Skip over the prefix byte. … … 1680 1680 } 1681 1681 1682 #if ! PLATFORM(X86_64)1682 #if !CPU(X86_64) 1683 1683 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address) 1684 1684 { … … 1723 1723 } 1724 1724 1725 #if ! PLATFORM(X86_64)1725 #if !CPU(X86_64) 1726 1726 void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address) 1727 1727 { … … 1733 1733 #endif 1734 1734 1735 #if PLATFORM(X86_64)1735 #if CPU(X86_64) 1736 1736 // Quad-word-sized operands: 1737 1737 // … … 1892 1892 static const RegisterID hasSib = X86Registers::esp; 1893 1893 static const RegisterID noIndex = X86Registers::esp; 1894 #if PLATFORM(X86_64)1894 #if CPU(X86_64) 1895 1895 static const RegisterID noBase2 = X86Registers::r13; 1896 1896 static const RegisterID hasSib2 = X86Registers::r12; … … 1968 1968 { 1969 1969 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. 1970 #if PLATFORM(X86_64)1970 #if CPU(X86_64) 1971 1971 if ((base == hasSib) || (base == hasSib2)) { 1972 1972 #else … … 1983 1983 } 1984 1984 } else { 1985 #if PLATFORM(X86_64)1985 #if CPU(X86_64) 1986 1986 if (!offset && (base != noBase) && (base != noBase2)) 1987 1987 #else … … 2002 2002 { 2003 2003 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. 2004 #if PLATFORM(X86_64)2004 #if CPU(X86_64) 2005 2005 if ((base == hasSib) || (base == hasSib2)) { 2006 2006 #else … … 2019 2019 ASSERT(index != noIndex); 2020 2020 2021 #if PLATFORM(X86_64)2021 #if CPU(X86_64) 2022 2022 if (!offset && (base != noBase) && (base != noBase2)) 2023 2023 #else … … 2034 2034 } 2035 2035 2036 #if ! PLATFORM(X86_64)2036 #if !CPU(X86_64) 2037 2037 void memoryModRM(int reg, void* address) 2038 2038 { … … 2049 2049 } // namespace JSC 2050 2050 2051 #endif // ENABLE(ASSEMBLER) && PLATFORM(X86)2051 #endif // ENABLE(ASSEMBLER) && CPU(X86) 2052 2052 2053 2053 #endif // X86Assembler_h
Note:
See TracChangeset
for help on using the changeset viewer.