1 | /*
|
---|
2 | * Copyright (C) 2008 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #ifndef MacroAssemblerX86_h
|
---|
27 | #define MacroAssemblerX86_h
|
---|
28 |
|
---|
29 | #if ENABLE(ASSEMBLER) && CPU(X86)
|
---|
30 |
|
---|
31 | #include "MacroAssemblerX86Common.h"
|
---|
32 |
|
---|
33 | namespace JSC {
|
---|
34 |
|
---|
35 | class MacroAssemblerX86 : public MacroAssemblerX86Common {
|
---|
36 | public:
|
---|
37 | MacroAssemblerX86()
|
---|
38 | : m_isSSE2Present(isSSE2Present())
|
---|
39 | {
|
---|
40 | }
|
---|
41 |
|
---|
42 | static const Scale ScalePtr = TimesFour;
|
---|
43 |
|
---|
44 | using MacroAssemblerX86Common::add32;
|
---|
45 | using MacroAssemblerX86Common::and32;
|
---|
46 | using MacroAssemblerX86Common::sub32;
|
---|
47 | using MacroAssemblerX86Common::or32;
|
---|
48 | using MacroAssemblerX86Common::load32;
|
---|
49 | using MacroAssemblerX86Common::store32;
|
---|
50 | using MacroAssemblerX86Common::branch32;
|
---|
51 | using MacroAssemblerX86Common::call;
|
---|
52 | using MacroAssemblerX86Common::loadDouble;
|
---|
53 | using MacroAssemblerX86Common::convertInt32ToDouble;
|
---|
54 |
|
---|
55 | void add32(Imm32 imm, RegisterID src, RegisterID dest)
|
---|
56 | {
|
---|
57 | m_assembler.leal_mr(imm.m_value, src, dest);
|
---|
58 | }
|
---|
59 |
|
---|
60 | void add32(Imm32 imm, AbsoluteAddress address)
|
---|
61 | {
|
---|
62 | m_assembler.addl_im(imm.m_value, address.m_ptr);
|
---|
63 | }
|
---|
64 |
|
---|
65 | void addWithCarry32(Imm32 imm, AbsoluteAddress address)
|
---|
66 | {
|
---|
67 | m_assembler.adcl_im(imm.m_value, address.m_ptr);
|
---|
68 | }
|
---|
69 |
|
---|
70 | void and32(Imm32 imm, AbsoluteAddress address)
|
---|
71 | {
|
---|
72 | m_assembler.andl_im(imm.m_value, address.m_ptr);
|
---|
73 | }
|
---|
74 |
|
---|
75 | void or32(Imm32 imm, AbsoluteAddress address)
|
---|
76 | {
|
---|
77 | m_assembler.orl_im(imm.m_value, address.m_ptr);
|
---|
78 | }
|
---|
79 |
|
---|
80 | void sub32(Imm32 imm, AbsoluteAddress address)
|
---|
81 | {
|
---|
82 | m_assembler.subl_im(imm.m_value, address.m_ptr);
|
---|
83 | }
|
---|
84 |
|
---|
85 | void load32(void* address, RegisterID dest)
|
---|
86 | {
|
---|
87 | m_assembler.movl_mr(address, dest);
|
---|
88 | }
|
---|
89 |
|
---|
90 | void loadDouble(const void* address, FPRegisterID dest)
|
---|
91 | {
|
---|
92 | ASSERT(isSSE2Present());
|
---|
93 | m_assembler.movsd_mr(address, dest);
|
---|
94 | }
|
---|
95 |
|
---|
96 | void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
|
---|
97 | {
|
---|
98 | m_assembler.cvtsi2sd_mr(src.m_ptr, dest);
|
---|
99 | }
|
---|
100 |
|
---|
101 | void store32(Imm32 imm, void* address)
|
---|
102 | {
|
---|
103 | m_assembler.movl_i32m(imm.m_value, address);
|
---|
104 | }
|
---|
105 |
|
---|
106 | void store32(RegisterID src, void* address)
|
---|
107 | {
|
---|
108 | m_assembler.movl_rm(src, address);
|
---|
109 | }
|
---|
110 |
|
---|
111 | Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right)
|
---|
112 | {
|
---|
113 | m_assembler.cmpl_rm(right, left.m_ptr);
|
---|
114 | return Jump(m_assembler.jCC(x86Condition(cond)));
|
---|
115 | }
|
---|
116 |
|
---|
117 | Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right)
|
---|
118 | {
|
---|
119 | m_assembler.cmpl_im(right.m_value, left.m_ptr);
|
---|
120 | return Jump(m_assembler.jCC(x86Condition(cond)));
|
---|
121 | }
|
---|
122 |
|
---|
123 | Call call()
|
---|
124 | {
|
---|
125 | return Call(m_assembler.call(), Call::Linkable);
|
---|
126 | }
|
---|
127 |
|
---|
128 | Call tailRecursiveCall()
|
---|
129 | {
|
---|
130 | return Call::fromTailJump(jump());
|
---|
131 | }
|
---|
132 |
|
---|
133 | Call makeTailRecursiveCall(Jump oldJump)
|
---|
134 | {
|
---|
135 | return Call::fromTailJump(oldJump);
|
---|
136 | }
|
---|
137 |
|
---|
138 |
|
---|
139 | DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest)
|
---|
140 | {
|
---|
141 | m_assembler.movl_i32r(initialValue.asIntptr(), dest);
|
---|
142 | return DataLabelPtr(this);
|
---|
143 | }
|
---|
144 |
|
---|
145 | Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
|
---|
146 | {
|
---|
147 | m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left);
|
---|
148 | dataLabel = DataLabelPtr(this);
|
---|
149 | return Jump(m_assembler.jCC(x86Condition(cond)));
|
---|
150 | }
|
---|
151 |
|
---|
152 | Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0))
|
---|
153 | {
|
---|
154 | m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base);
|
---|
155 | dataLabel = DataLabelPtr(this);
|
---|
156 | return Jump(m_assembler.jCC(x86Condition(cond)));
|
---|
157 | }
|
---|
158 |
|
---|
159 | DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address)
|
---|
160 | {
|
---|
161 | m_assembler.movl_i32m(initialValue.asIntptr(), address.offset, address.base);
|
---|
162 | return DataLabelPtr(this);
|
---|
163 | }
|
---|
164 |
|
---|
165 | Label loadPtrWithPatchToLEA(Address address, RegisterID dest)
|
---|
166 | {
|
---|
167 | Label label(this);
|
---|
168 | load32(address, dest);
|
---|
169 | return label;
|
---|
170 | }
|
---|
171 |
|
---|
172 | bool supportsFloatingPoint() const { return m_isSSE2Present; }
|
---|
173 | // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate()
|
---|
174 | bool supportsFloatingPointTruncate() const { return m_isSSE2Present; }
|
---|
175 | bool supportsFloatingPointSqrt() const { return m_isSSE2Present; }
|
---|
176 |
|
---|
177 | private:
|
---|
178 | const bool m_isSSE2Present;
|
---|
179 |
|
---|
180 | friend class LinkBuffer;
|
---|
181 | friend class RepatchBuffer;
|
---|
182 |
|
---|
183 | static void linkCall(void* code, Call call, FunctionPtr function)
|
---|
184 | {
|
---|
185 | X86Assembler::linkCall(code, call.m_jmp, function.value());
|
---|
186 | }
|
---|
187 |
|
---|
188 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
|
---|
189 | {
|
---|
190 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
|
---|
191 | }
|
---|
192 |
|
---|
193 | static void repatchCall(CodeLocationCall call, FunctionPtr destination)
|
---|
194 | {
|
---|
195 | X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
|
---|
196 | }
|
---|
197 | };
|
---|
198 |
|
---|
199 | } // namespace JSC
|
---|
200 |
|
---|
201 | #endif // ENABLE(ASSEMBLER)
|
---|
202 |
|
---|
203 | #endif // MacroAssemblerX86_h
|
---|