source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 39540

Last change on this file since 39540 was 39540, checked in by [email protected], 16 years ago

2009-01-01 Gavin Barraclough <[email protected]>

Reviewed by Darin Adler.

Allow 32-bit integers to be stored in JSImmediates, on x64-bit.
Presently the top 32-bits of a 64-bit JSImmediate serve as a sign extension of a 31-bit
int stored in the low word (shifted left by one, to make room for a tag). In the new
format, the top 31-bits serve as a sign extension of a 32-bit int, still shifted left by
one.

The new behavior is enabled using a flag in Platform.h, 'WTF_USE_ALTERNATE_JSIMMEDIATE'.
When this is set the constants defining the range of ints allowed to be stored as
JSImmediate values is extended. The code in JSImmediate.h can safely operate on either
format. This patch updates the JIT so that it can also operate with the new format.

~2% progression on x86-64, with & without the JIT, on sunspider & v8 tests.

  • assembler/MacroAssembler.h: (JSC::MacroAssembler::addPtr): (JSC::MacroAssembler::orPtr): (JSC::MacroAssembler::or32): (JSC::MacroAssembler::rshiftPtr): (JSC::MacroAssembler::rshift32): (JSC::MacroAssembler::subPtr): (JSC::MacroAssembler::xorPtr): (JSC::MacroAssembler::xor32): (JSC::MacroAssembler::move): (JSC::MacroAssembler::compareImm64ForBranch): (JSC::MacroAssembler::compareImm64ForBranchEquality): (JSC::MacroAssembler::jePtr): (JSC::MacroAssembler::jgePtr): (JSC::MacroAssembler::jlPtr): (JSC::MacroAssembler::jlePtr): (JSC::MacroAssembler::jnePtr): (JSC::MacroAssembler::jnzSubPtr): (JSC::MacroAssembler::joAddPtr): (JSC::MacroAssembler::jzSubPtr):
  • assembler/X86Assembler.h: (JSC::X86Assembler::addq_rr): (JSC::X86Assembler::orq_ir): (JSC::X86Assembler::subq_ir): (JSC::X86Assembler::xorq_rr): (JSC::X86Assembler::sarq_CLr): (JSC::X86Assembler::sarq_i8r): (JSC::X86Assembler::cmpq_ir):
  • jit/JIT.cpp: (JSC::JIT::compileOpStrictEq): (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases): (JSC::JIT::privateCompileCTIMachineTrampolines):
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArith_op_lshift): (JSC::JIT::compileFastArithSlow_op_lshift): (JSC::JIT::compileFastArith_op_rshift): (JSC::JIT::compileFastArithSlow_op_rshift): (JSC::JIT::compileFastArith_op_bitand): (JSC::JIT::compileFastArithSlow_op_bitand): (JSC::JIT::compileFastArith_op_mod): (JSC::JIT::compileFastArithSlow_op_mod): (JSC::JIT::compileFastArith_op_add): (JSC::JIT::compileFastArithSlow_op_add): (JSC::JIT::compileFastArith_op_mul): (JSC::JIT::compileFastArithSlow_op_mul): (JSC::JIT::compileFastArith_op_post_inc): (JSC::JIT::compileFastArithSlow_op_post_inc): (JSC::JIT::compileFastArith_op_post_dec): (JSC::JIT::compileFastArithSlow_op_post_dec): (JSC::JIT::compileFastArith_op_pre_inc): (JSC::JIT::compileFastArithSlow_op_pre_inc): (JSC::JIT::compileFastArith_op_pre_dec): (JSC::JIT::compileFastArithSlow_op_pre_dec): (JSC::JIT::compileBinaryArithOp):
  • jit/JITInlineMethods.h: (JSC::JIT::getConstantOperand): (JSC::JIT::getConstantOperandImmediateInt): (JSC::JIT::isOperandConstantImmediateInt): (JSC::JIT::isOperandConstant31BitImmediateInt): (JSC::JIT::emitFastArithDeTagImmediate): (JSC::JIT::emitFastArithDeTagImmediateJumpIfZero): (JSC::JIT::emitFastArithReTagImmediate): (JSC::JIT::emitFastArithImmToInt): (JSC::JIT::emitFastArithIntToImmNoCheck):
  • runtime/JSImmediate.h: (JSC::JSImmediate::isPositiveNumber): (JSC::JSImmediate::isNegative): (JSC::JSImmediate::rightShiftImmediateNumbers): (JSC::JSImmediate::canDoFastAdditiveOperations): (JSC::JSImmediate::makeValue): (JSC::JSImmediate::makeInt): (JSC::JSImmediate::makeBool): (JSC::JSImmediate::intValue): (JSC::JSImmediate::rawValue): (JSC::JSImmediate::toBoolean): (JSC::JSImmediate::from):
  • wtf/Platform.h:
File size: 29.2 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47namespace JSC {
48
49void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
50{
51 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
52 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmNums'? - we *probably* ought to be consistent.
53 emitJumpSlowCaseIfNotImmNum(X86::eax);
54 emitJumpSlowCaseIfNotImmNum(X86::ecx);
55 emitFastArithImmToInt(X86::eax);
56 emitFastArithImmToInt(X86::ecx);
57#if !PLATFORM(X86)
58 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
59 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
60 and32(Imm32(0x1f), X86::ecx);
61#endif
62 lshift32(X86::ecx, X86::eax);
63#if USE(ALTERNATE_JSIMMEDIATE)
64 emitFastArithIntToImmNoCheck(X86::eax);
65#else
66 addSlowCase(joAdd32(X86::eax, X86::eax));
67 signExtend32ToPtr(X86::eax, X86::eax);
68 emitFastArithReTagImmediate(X86::eax);
69#endif
70 emitPutVirtualRegister(result);
71}
72void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
73{
74#if USE(ALTERNATE_JSIMMEDIATE)
75 UNUSED_PARAM(op1);
76 UNUSED_PARAM(op2);
77 linkSlowCase(iter);
78 linkSlowCase(iter);
79#else
80 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
81 Jump notImm1 = getSlowCase(iter);
82 Jump notImm2 = getSlowCase(iter);
83 linkSlowCase(iter);
84 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
85 notImm1.link(this);
86 notImm2.link(this);
87#endif
88 emitPutJITStubArg(X86::eax, 1);
89 emitPutJITStubArg(X86::ecx, 2);
90 emitCTICall(Interpreter::cti_op_lshift);
91 emitPutVirtualRegister(result);
92}
93
94void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
95{
96 if (JSValue* value = getConstantImmediateNumericArg(op2)) {
97 emitGetVirtualRegister(op1, X86::eax);
98 emitJumpSlowCaseIfNotImmNum(X86::eax);
99 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
100 rshiftPtr(Imm32(JSImmediate::getTruncatedUInt32(value) & 0x1f), X86::eax);
101 } else {
102 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
103 emitJumpSlowCaseIfNotImmNum(X86::eax);
104 emitJumpSlowCaseIfNotImmNum(X86::ecx);
105 emitFastArithImmToInt(X86::ecx);
106#if !PLATFORM(X86)
107 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
108 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
109 and32(Imm32(0x1f), X86::ecx);
110#endif
111 rshiftPtr(X86::ecx, X86::eax);
112 }
113 orPtr(Imm32(JSImmediate::TagBitTypeInteger), X86::eax);
114 emitPutVirtualRegister(result);
115}
116void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
117{
118 linkSlowCase(iter);
119 if (getConstantImmediateNumericArg(op2))
120 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
121 else {
122 linkSlowCase(iter);
123 emitPutJITStubArg(X86::ecx, 2);
124 }
125
126 emitPutJITStubArg(X86::eax, 1);
127 emitCTICall(Interpreter::cti_op_rshift);
128 emitPutVirtualRegister(result);
129}
130
131void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
132{
133 if (isOperandConstant31BitImmediateInt(op1)) {
134 emitGetVirtualRegister(op2, X86::eax);
135 emitJumpSlowCaseIfNotImmNum(X86::eax);
136 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), X86::eax);
137 } else if (isOperandConstant31BitImmediateInt(op2)) {
138 emitGetVirtualRegister(op1, X86::eax);
139 emitJumpSlowCaseIfNotImmNum(X86::eax);
140 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), X86::eax);
141 } else {
142 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
143 andPtr(X86::edx, X86::eax);
144 emitJumpSlowCaseIfNotImmNum(X86::eax);
145 }
146 emitPutVirtualRegister(result);
147}
148void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
149{
150 linkSlowCase(iter);
151 if (isOperandConstant31BitImmediateInt(op1)) {
152 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
153 emitPutJITStubArg(X86::eax, 2);
154 } else if (isOperandConstant31BitImmediateInt(op2)) {
155 emitPutJITStubArg(X86::eax, 1);
156 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
157 } else {
158 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
159 emitPutJITStubArg(X86::edx, 2);
160 }
161 emitCTICall(Interpreter::cti_op_bitand);
162 emitPutVirtualRegister(result);
163}
164
165void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
166{
167 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
168 emitJumpSlowCaseIfNotImmNum(X86::eax);
169 emitJumpSlowCaseIfNotImmNum(X86::ecx);
170#if USE(ALTERNATE_JSIMMEDIATE)
171 addSlowCase(jePtr(X86::ecx, ImmPtr(JSImmediate::zeroImmediate())));
172 emitFastArithImmToInt(X86::eax);
173 emitFastArithImmToInt(X86::ecx);
174 mod32(X86::ecx, X86::eax, X86::edx);
175 emitFastArithIntToImmNoCheck(X86::edx);
176#else
177 emitFastArithDeTagImmediate(X86::eax);
178 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
179 mod32(X86::ecx, X86::eax, X86::edx);
180 signExtend32ToPtr(X86::edx, X86::edx);
181 emitFastArithReTagImmediate(X86::edx);
182#endif
183 move(X86::edx, X86::eax);
184 emitPutVirtualRegister(result);
185}
186void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
187{
188#if USE(ALTERNATE_JSIMMEDIATE)
189 linkSlowCase(iter);
190 linkSlowCase(iter);
191 linkSlowCase(iter);
192#else
193 Jump notImm1 = getSlowCase(iter);
194 Jump notImm2 = getSlowCase(iter);
195 linkSlowCase(iter);
196 emitFastArithReTagImmediate(X86::eax);
197 emitFastArithReTagImmediate(X86::ecx);
198 notImm1.link(this);
199 notImm2.link(this);
200#endif
201 emitPutJITStubArg(X86::eax, 1);
202 emitPutJITStubArg(X86::ecx, 2);
203 emitCTICall(Interpreter::cti_op_mod);
204 emitPutVirtualRegister(result);
205}
206
207void JIT::compileFastArith_op_add(Instruction* currentInstruction)
208{
209 unsigned result = currentInstruction[1].u.operand;
210 unsigned op1 = currentInstruction[2].u.operand;
211 unsigned op2 = currentInstruction[3].u.operand;
212
213 if (isOperandConstantImmediateInt(op1)) {
214 emitGetVirtualRegister(op2, X86::eax);
215 emitJumpSlowCaseIfNotImmNum(X86::eax);
216#if USE(ALTERNATE_JSIMMEDIATE)
217 // FIXME: investigate performing a 31-bit add here (can we preserve upper bit & detect overflow from low word to high?)
218 // (or, detect carry? - if const is positive, will only carry when overflowing from negative to positive?)
219 emitFastArithImmToInt(X86::eax);
220 addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
221 emitFastArithIntToImmNoCheck(X86::eax);
222#else
223 addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
224 signExtend32ToPtr(X86::eax, X86::eax);
225#endif
226 emitPutVirtualRegister(result);
227 } else if (isOperandConstantImmediateInt(op2)) {
228 emitGetVirtualRegister(op1, X86::eax);
229 emitJumpSlowCaseIfNotImmNum(X86::eax);
230#if USE(ALTERNATE_JSIMMEDIATE)
231 emitFastArithImmToInt(X86::eax);
232 addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
233 emitFastArithIntToImmNoCheck(X86::eax);
234#else
235 addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
236 signExtend32ToPtr(X86::eax, X86::eax);
237#endif
238 emitPutVirtualRegister(result);
239 } else {
240 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
241 if (types.first().mightBeNumber() && types.second().mightBeNumber())
242 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
243 else {
244 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
245 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
246 emitCTICall(Interpreter::cti_op_add);
247 emitPutVirtualRegister(result);
248 }
249 }
250}
251void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
252{
253 unsigned result = currentInstruction[1].u.operand;
254 unsigned op1 = currentInstruction[2].u.operand;
255 unsigned op2 = currentInstruction[3].u.operand;
256
257 if (isOperandConstantImmediateInt(op1)) {
258#if USE(ALTERNATE_JSIMMEDIATE)
259 linkSlowCase(iter);
260 linkSlowCase(iter);
261 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
262 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
263#else
264 Jump notImm = getSlowCase(iter);
265 linkSlowCase(iter);
266 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
267 notImm.link(this);
268 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
269 emitPutJITStubArg(X86::eax, 2);
270#endif
271 emitCTICall(Interpreter::cti_op_add);
272 emitPutVirtualRegister(result);
273 } else if (isOperandConstantImmediateInt(op2)) {
274#if USE(ALTERNATE_JSIMMEDIATE)
275 linkSlowCase(iter);
276 linkSlowCase(iter);
277 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
278 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
279#else
280 Jump notImm = getSlowCase(iter);
281 linkSlowCase(iter);
282 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
283 notImm.link(this);
284 emitPutJITStubArg(X86::eax, 1);
285 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
286#endif
287 emitCTICall(Interpreter::cti_op_add);
288 emitPutVirtualRegister(result);
289 } else {
290 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
291 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
292 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
293 }
294}
295
296void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
297{
298 unsigned result = currentInstruction[1].u.operand;
299 unsigned op1 = currentInstruction[2].u.operand;
300 unsigned op2 = currentInstruction[3].u.operand;
301
302 // For now, only plant a fast int case if the constant operand is greater than zero.
303 int32_t value;
304 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
305 emitGetVirtualRegister(op2, X86::eax);
306 emitJumpSlowCaseIfNotImmNum(X86::eax);
307#if USE(ALTERNATE_JSIMMEDIATE)
308 emitFastArithImmToInt(X86::eax);
309 addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
310 emitFastArithIntToImmNoCheck(X86::eax);
311#else
312 emitFastArithDeTagImmediate(X86::eax);
313 addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
314 signExtend32ToPtr(X86::eax, X86::eax);
315 emitFastArithReTagImmediate(X86::eax);
316#endif
317 emitPutVirtualRegister(result);
318 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
319 emitGetVirtualRegister(op1, X86::eax);
320 emitJumpSlowCaseIfNotImmNum(X86::eax);
321#if USE(ALTERNATE_JSIMMEDIATE)
322 emitFastArithImmToInt(X86::eax);
323 addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
324 emitFastArithIntToImmNoCheck(X86::eax);
325#else
326 emitFastArithDeTagImmediate(X86::eax);
327 addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax));
328 signExtend32ToPtr(X86::eax, X86::eax);
329 emitFastArithReTagImmediate(X86::eax);
330#endif
331 emitPutVirtualRegister(result);
332 } else
333 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
334}
335void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
336{
337 int result = currentInstruction[1].u.operand;
338 int op1 = currentInstruction[2].u.operand;
339 int op2 = currentInstruction[3].u.operand;
340
341 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
342 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
343 linkSlowCase(iter);
344 linkSlowCase(iter);
345 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
346 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
347 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
348 emitCTICall(Interpreter::cti_op_mul);
349 emitPutVirtualRegister(result);
350 } else
351 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
352}
353
354void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
355{
356 emitGetVirtualRegister(srcDst, X86::eax);
357 move(X86::eax, X86::edx);
358 emitJumpSlowCaseIfNotImmNum(X86::eax);
359#if USE(ALTERNATE_JSIMMEDIATE)
360 emitFastArithImmToInt(X86::edx);
361 addSlowCase(joAdd32(Imm32(1), X86::edx));
362 emitFastArithIntToImmNoCheck(X86::edx);
363#else
364 addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
365 signExtend32ToPtr(X86::edx, X86::edx);
366#endif
367 emitPutVirtualRegister(srcDst, X86::edx);
368 emitPutVirtualRegister(result);
369}
370void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
371{
372 linkSlowCase(iter);
373 linkSlowCase(iter);
374 emitPutJITStubArg(X86::eax, 1);
375 emitCTICall(Interpreter::cti_op_post_inc);
376 emitPutVirtualRegister(srcDst, X86::edx);
377 emitPutVirtualRegister(result);
378}
379
380void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
381{
382 emitGetVirtualRegister(srcDst, X86::eax);
383 move(X86::eax, X86::edx);
384 emitJumpSlowCaseIfNotImmNum(X86::eax);
385#if USE(ALTERNATE_JSIMMEDIATE)
386 emitFastArithImmToInt(X86::edx);
387 addSlowCase(joSub32(Imm32(1), X86::edx));
388 emitFastArithIntToImmNoCheck(X86::edx);
389#else
390 addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx));
391 signExtend32ToPtr(X86::edx, X86::edx);
392#endif
393 emitPutVirtualRegister(srcDst, X86::edx);
394 emitPutVirtualRegister(result);
395}
396void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
397{
398 linkSlowCase(iter);
399 linkSlowCase(iter);
400 emitPutJITStubArg(X86::eax, 1);
401 emitCTICall(Interpreter::cti_op_post_dec);
402 emitPutVirtualRegister(srcDst, X86::edx);
403 emitPutVirtualRegister(result);
404}
405
406void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
407{
408 emitGetVirtualRegister(srcDst, X86::eax);
409 emitJumpSlowCaseIfNotImmNum(X86::eax);
410#if USE(ALTERNATE_JSIMMEDIATE)
411 emitFastArithImmToInt(X86::eax);
412 // FIXME: Could add ptr & specify int64; no need to re-sign-extend?
413 addSlowCase(joAdd32(Imm32(1), X86::eax));
414 emitFastArithIntToImmNoCheck(X86::eax);
415#else
416 addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
417 signExtend32ToPtr(X86::eax, X86::eax);
418#endif
419 emitPutVirtualRegister(srcDst);
420}
421void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
422{
423 Jump notImm = getSlowCase(iter);
424 linkSlowCase(iter);
425 emitGetVirtualRegister(srcDst, X86::eax);
426 notImm.link(this);
427 emitPutJITStubArg(X86::eax, 1);
428 emitCTICall(Interpreter::cti_op_pre_inc);
429 emitPutVirtualRegister(srcDst);
430}
431
432void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
433{
434 emitGetVirtualRegister(srcDst, X86::eax);
435 emitJumpSlowCaseIfNotImmNum(X86::eax);
436#if USE(ALTERNATE_JSIMMEDIATE)
437 emitFastArithImmToInt(X86::eax);
438 addSlowCase(joSub32(Imm32(1), X86::eax));
439 emitFastArithIntToImmNoCheck(X86::eax);
440#else
441 addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax));
442 signExtend32ToPtr(X86::eax, X86::eax);
443#endif
444 emitPutVirtualRegister(srcDst);
445}
446void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
447{
448 Jump notImm = getSlowCase(iter);
449 linkSlowCase(iter);
450 emitGetVirtualRegister(srcDst, X86::eax);
451 notImm.link(this);
452 emitPutJITStubArg(X86::eax, 1);
453 emitCTICall(Interpreter::cti_op_pre_dec);
454 emitPutVirtualRegister(srcDst);
455}
456
457
458#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
459
460void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes)
461{
462 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
463 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
464 if (opcodeID == op_add)
465 emitCTICall(Interpreter::cti_op_add);
466 else if (opcodeID == op_sub)
467 emitCTICall(Interpreter::cti_op_sub);
468 else {
469 ASSERT(opcodeID == op_mul);
470 emitCTICall(Interpreter::cti_op_mul);
471 }
472 emitPutVirtualRegister(dst);
473}
474
475void JIT::compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned, unsigned, unsigned, OperandTypes)
476{
477 ASSERT_NOT_REACHED();
478}
479
480#else
481
482typedef X86Assembler::JmpSrc JmpSrc;
483typedef X86Assembler::JmpDst JmpDst;
484typedef X86Assembler::XMMRegisterID XMMRegisterID;
485
486#if PLATFORM(MAC)
487
488static inline bool isSSE2Present()
489{
490 return true; // All X86 Macs are guaranteed to support at least SSE2
491}
492
493#else
494
495static bool isSSE2Present()
496{
497 static const int SSE2FeatureBit = 1 << 26;
498 struct SSE2Check {
499 SSE2Check()
500 {
501 int flags;
502#if COMPILER(MSVC)
503 _asm {
504 mov eax, 1 // cpuid function 1 gives us the standard feature set
505 cpuid;
506 mov flags, edx;
507 }
508#else
509 flags = 0;
510 // FIXME: Add GCC code to do above asm
511#endif
512 present = (flags & SSE2FeatureBit) != 0;
513 }
514 bool present;
515 };
516 static SSE2Check check;
517 return check.present;
518}
519
520#endif
521
522/*
523 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
524
525 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
526 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
527
528 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
529 control will fall through from the code planted.
530*/
531void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
532{
533 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
534 __ cvttsd2si_rr(xmmSource, tempReg1);
535 __ addl_rr(tempReg1, tempReg1);
536 __ sarl_i8r(1, tempReg1);
537 __ cvtsi2sd_rr(tempReg1, tempXmm);
538 // Compare & branch if immediate.
539 __ ucomis_rr(tempXmm, xmmSource);
540 JmpSrc resultIsImm = __ je();
541 JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
542
543 // Store the result to the JSNumberCell and jump.
544 __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
545 if (jsNumberCell != X86::eax)
546 __ movl_rr(jsNumberCell, X86::eax);
547 emitPutVirtualRegister(dst);
548 *wroteJSNumberCell = __ jmp();
549
550 __ link(resultIsImm, __ label());
551 // value == (double)(JSImmediate)value... or at least, it looks that way...
552 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
553 __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
554 __ pextrw_irr(3, xmmSource, tempReg2);
555 __ cmpl_ir(0x8000, tempReg2);
556 __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
557 // Yes it really really really is representable as a JSImmediate.
558 emitFastArithIntToImmNoCheck(tempReg1);
559 if (tempReg1 != X86::eax)
560 __ movl_rr(tempReg1, X86::eax);
561 emitPutVirtualRegister(dst);
562}
563
564void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
565{
566 Structure* numberStructure = m_globalData->numberStructure.get();
567 JmpSrc wasJSNumberCell1;
568 JmpSrc wasJSNumberCell1b;
569 JmpSrc wasJSNumberCell2;
570 JmpSrc wasJSNumberCell2b;
571
572 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
573
574 if (types.second().isReusable() && isSSE2Present()) {
575 ASSERT(types.second().mightBeNumber());
576
577 // Check op2 is a number
578 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
579 JmpSrc op2imm = __ jne();
580 if (!types.second().definitelyIsNumber()) {
581 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
582 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
583 addSlowCase(__ jne());
584 }
585
586 // (1) In this case src2 is a reusable number cell.
587 // Slow case if src1 is not a number type.
588 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
589 JmpSrc op1imm = __ jne();
590 if (!types.first().definitelyIsNumber()) {
591 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
592 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
593 addSlowCase(__ jne());
594 }
595
596 // (1a) if we get here, src1 is also a number cell
597 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
598 JmpSrc loadedDouble = __ jmp();
599 // (1b) if we get here, src1 is an immediate
600 __ link(op1imm, __ label());
601 emitFastArithImmToInt(X86::eax);
602 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
603 // (1c)
604 __ link(loadedDouble, __ label());
605 if (opcodeID == op_add)
606 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
607 else if (opcodeID == op_sub)
608 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
609 else {
610 ASSERT(opcodeID == op_mul);
611 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
612 }
613
614 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
615 wasJSNumberCell2b = __ jmp();
616
617 // (2) This handles cases where src2 is an immediate number.
618 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
619 __ link(op2imm, __ label());
620 emitJumpSlowCaseIfNotImmNum(X86::eax);
621 } else if (types.first().isReusable() && isSSE2Present()) {
622 ASSERT(types.first().mightBeNumber());
623
624 // Check op1 is a number
625 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::eax);
626 JmpSrc op1imm = __ jne();
627 if (!types.first().definitelyIsNumber()) {
628 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
629 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
630 addSlowCase(__ jne());
631 }
632
633 // (1) In this case src1 is a reusable number cell.
634 // Slow case if src2 is not a number type.
635 __ testl_i32r(JSImmediate::TagBitTypeInteger, X86::edx);
636 JmpSrc op2imm = __ jne();
637 if (!types.second().definitelyIsNumber()) {
638 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
639 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
640 addSlowCase(__ jne());
641 }
642
643 // (1a) if we get here, src2 is also a number cell
644 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
645 JmpSrc loadedDouble = __ jmp();
646 // (1b) if we get here, src2 is an immediate
647 __ link(op2imm, __ label());
648 emitFastArithImmToInt(X86::edx);
649 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
650 // (1c)
651 __ link(loadedDouble, __ label());
652 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
653 if (opcodeID == op_add)
654 __ addsd_rr(X86::xmm1, X86::xmm0);
655 else if (opcodeID == op_sub)
656 __ subsd_rr(X86::xmm1, X86::xmm0);
657 else {
658 ASSERT(opcodeID == op_mul);
659 __ mulsd_rr(X86::xmm1, X86::xmm0);
660 }
661 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
662 emitPutVirtualRegister(dst);
663
664 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
665 wasJSNumberCell1b = __ jmp();
666
667 // (2) This handles cases where src1 is an immediate number.
668 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
669 __ link(op1imm, __ label());
670 emitJumpSlowCaseIfNotImmNum(X86::edx);
671 } else
672 emitJumpSlowCaseIfNotImmNums(X86::eax, X86::edx, X86::ecx);
673
674 if (opcodeID == op_add) {
675 emitFastArithDeTagImmediate(X86::eax);
676 __ addl_rr(X86::edx, X86::eax);
677 addSlowCase(__ jo());
678 } else if (opcodeID == op_sub) {
679 __ subl_rr(X86::edx, X86::eax);
680 addSlowCase(__ jo());
681 signExtend32ToPtr(X86::eax, X86::eax);
682 emitFastArithReTagImmediate(X86::eax);
683 } else {
684 ASSERT(opcodeID == op_mul);
685 // convert eax & edx from JSImmediates to ints, and check if either are zero
686 emitFastArithImmToInt(X86::edx);
687 JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
688 __ testl_rr(X86::edx, X86::edx);
689 JmpSrc op2NonZero = __ jne();
690 __ link(op1Zero, __ label());
691 // if either input is zero, add the two together, and check if the result is < 0.
692 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
693 __ movl_rr(X86::eax, X86::ecx);
694 __ addl_rr(X86::edx, X86::ecx);
695 addSlowCase(__ js());
696 // Skip the above check if neither input is zero
697 __ link(op2NonZero, __ label());
698 __ imull_rr(X86::edx, X86::eax);
699 addSlowCase(__ jo());
700 signExtend32ToPtr(X86::eax, X86::eax);
701 emitFastArithReTagImmediate(X86::eax);
702 }
703 emitPutVirtualRegister(dst);
704
705 if (types.second().isReusable() && isSSE2Present()) {
706 __ link(wasJSNumberCell2, __ label());
707 __ link(wasJSNumberCell2b, __ label());
708 }
709 else if (types.first().isReusable() && isSSE2Present()) {
710 __ link(wasJSNumberCell1, __ label());
711 __ link(wasJSNumberCell1b, __ label());
712 }
713}
714
715void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
716{
717 linkSlowCase(iter);
718 if (types.second().isReusable() && isSSE2Present()) {
719 if (!types.first().definitelyIsNumber()) {
720 linkSlowCaseIfNotJSCell(iter, src1);
721 linkSlowCase(iter);
722 }
723 if (!types.second().definitelyIsNumber()) {
724 linkSlowCaseIfNotJSCell(iter, src2);
725 linkSlowCase(iter);
726 }
727 } else if (types.first().isReusable() && isSSE2Present()) {
728 if (!types.first().definitelyIsNumber()) {
729 linkSlowCaseIfNotJSCell(iter, src1);
730 linkSlowCase(iter);
731 }
732 if (!types.second().definitelyIsNumber()) {
733 linkSlowCaseIfNotJSCell(iter, src2);
734 linkSlowCase(iter);
735 }
736 }
737 linkSlowCase(iter);
738
739 // additional entry point to handle -0 cases.
740 if (opcodeID == op_mul)
741 linkSlowCase(iter);
742
743 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
744 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
745 if (opcodeID == op_add)
746 emitCTICall(Interpreter::cti_op_add);
747 else if (opcodeID == op_sub)
748 emitCTICall(Interpreter::cti_op_sub);
749 else {
750 ASSERT(opcodeID == op_mul);
751 emitCTICall(Interpreter::cti_op_mul);
752 }
753 emitPutVirtualRegister(dst);
754}
755
756#endif
757
758} // namespace JSC
759
760#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.