source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 41089

Last change on this file since 41089 was 41089, checked in by [email protected], 16 years ago

2009-02-19 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Fix for x86-64. Where the JavaScriptCore text segment lies outside
a 2gb range of the heap containing JIT generated code, callbacks
from JIT code to the stub functions in Interpreter will be incorrectly
linked.

No performance impact on Sunspider, 1% regression on v8-tests,
due to a 3% regression on richards.

  • assembler/AbstractMacroAssembler.h: (JSC::AbstractMacroAssembler::Call::Call): (JSC::AbstractMacroAssembler::Jump::link): (JSC::AbstractMacroAssembler::Jump::linkTo): (JSC::AbstractMacroAssembler::CodeLocationJump::relink): (JSC::AbstractMacroAssembler::CodeLocationCall::relink): (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToFunction): (JSC::AbstractMacroAssembler::PatchBuffer::link): (JSC::AbstractMacroAssembler::PatchBuffer::linkTailRecursive): (JSC::AbstractMacroAssembler::differenceBetween):
  • assembler/MacroAssembler.h: (JSC::MacroAssembler::tailRecursiveCall): (JSC::MacroAssembler::makeTailRecursiveCall):
  • assembler/MacroAssemblerX86.h: (JSC::MacroAssemblerX86::call):
  • assembler/MacroAssemblerX86Common.h:
  • assembler/MacroAssemblerX86_64.h: (JSC::MacroAssemblerX86_64::call): (JSC::MacroAssemblerX86_64::moveWithPatch): (JSC::MacroAssemblerX86_64::branchPtrWithPatch): (JSC::MacroAssemblerX86_64::storePtrWithPatch):
  • assembler/X86Assembler.h: (JSC::X86Assembler::jmp_r): (JSC::X86Assembler::linkJump): (JSC::X86Assembler::patchJump): (JSC::X86Assembler::patchCall): (JSC::X86Assembler::linkCall): (JSC::X86Assembler::patchAddress):
  • interpreter/Interpreter.cpp: (JSC::Interpreter::tryCTICachePutByID):
  • jit/JIT.cpp: (JSC::JIT::privateCompile): (JSC::JIT::privateCompileCTIMachineTrampolines):
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::putDoubleResultToJSNumberCellOrJSImmediate): (JSC::JIT::compileBinaryArithOp):
  • jit/JITPropertyAccess.cpp: (JSC::JIT::privateCompilePutByIdTransition): (JSC::JIT::privateCompileGetByIdSelf): (JSC::JIT::privateCompilePutByIdReplace):
File size: 38.6 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47namespace JSC {
48
49void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
50{
51 emitGetVirtualRegisters(op1, regT0, op2, regT2);
52 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
53 emitJumpSlowCaseIfNotImmediateInteger(regT0);
54 emitJumpSlowCaseIfNotImmediateInteger(regT2);
55 emitFastArithImmToInt(regT0);
56 emitFastArithImmToInt(regT2);
57#if !PLATFORM(X86)
58 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
59 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
60 and32(Imm32(0x1f), regT2);
61#endif
62 lshift32(regT2, regT0);
63#if !USE(ALTERNATE_JSIMMEDIATE)
64 addSlowCase(branchAdd32(Overflow, regT0, regT0));
65 signExtend32ToPtr(regT0, regT0);
66#endif
67 emitFastArithReTagImmediate(regT0, regT0);
68 emitPutVirtualRegister(result);
69}
70void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
71{
72#if USE(ALTERNATE_JSIMMEDIATE)
73 UNUSED_PARAM(op1);
74 UNUSED_PARAM(op2);
75 linkSlowCase(iter);
76 linkSlowCase(iter);
77#else
78 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
79 Jump notImm1 = getSlowCase(iter);
80 Jump notImm2 = getSlowCase(iter);
81 linkSlowCase(iter);
82 emitGetVirtualRegisters(op1, regT0, op2, regT2);
83 notImm1.link(this);
84 notImm2.link(this);
85#endif
86 emitPutJITStubArg(regT0, 1);
87 emitPutJITStubArg(regT2, 2);
88 emitCTICall(Interpreter::cti_op_lshift);
89 emitPutVirtualRegister(result);
90}
91
92void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
93{
94 if (isOperandConstantImmediateInt(op2)) {
95 emitGetVirtualRegister(op1, regT0);
96 emitJumpSlowCaseIfNotImmediateInteger(regT0);
97 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
98#if USE(ALTERNATE_JSIMMEDIATE)
99 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
100#else
101 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
102#endif
103 } else {
104 emitGetVirtualRegisters(op1, regT0, op2, regT2);
105 emitJumpSlowCaseIfNotImmediateInteger(regT0);
106 emitJumpSlowCaseIfNotImmediateInteger(regT2);
107 emitFastArithImmToInt(regT2);
108#if !PLATFORM(X86)
109 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
110 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
111 and32(Imm32(0x1f), regT2);
112#endif
113#if USE(ALTERNATE_JSIMMEDIATE)
114 rshift32(regT2, regT0);
115#else
116 rshiftPtr(regT2, regT0);
117#endif
118 }
119#if USE(ALTERNATE_JSIMMEDIATE)
120 emitFastArithIntToImmNoCheck(regT0, regT0);
121#else
122 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
123#endif
124 emitPutVirtualRegister(result);
125}
126void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
127{
128 linkSlowCase(iter);
129 if (isOperandConstantImmediateInt(op2))
130 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
131 else {
132 linkSlowCase(iter);
133 emitPutJITStubArg(regT2, 2);
134 }
135
136 emitPutJITStubArg(regT0, 1);
137 emitCTICall(Interpreter::cti_op_rshift);
138 emitPutVirtualRegister(result);
139}
140
141void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
142{
143 if (isOperandConstantImmediateInt(op1)) {
144 emitGetVirtualRegister(op2, regT0);
145 emitJumpSlowCaseIfNotImmediateInteger(regT0);
146#if USE(ALTERNATE_JSIMMEDIATE)
147 int32_t imm = getConstantOperandImmediateInt(op1);
148 andPtr(Imm32(imm), regT0);
149 if (imm >= 0)
150 emitFastArithIntToImmNoCheck(regT0, regT0);
151#else
152 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
153#endif
154 } else if (isOperandConstantImmediateInt(op2)) {
155 emitGetVirtualRegister(op1, regT0);
156 emitJumpSlowCaseIfNotImmediateInteger(regT0);
157#if USE(ALTERNATE_JSIMMEDIATE)
158 int32_t imm = getConstantOperandImmediateInt(op2);
159 andPtr(Imm32(imm), regT0);
160 if (imm >= 0)
161 emitFastArithIntToImmNoCheck(regT0, regT0);
162#else
163 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
164#endif
165 } else {
166 emitGetVirtualRegisters(op1, regT0, op2, regT1);
167 andPtr(regT1, regT0);
168 emitJumpSlowCaseIfNotImmediateInteger(regT0);
169 }
170 emitPutVirtualRegister(result);
171}
172void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
173{
174 linkSlowCase(iter);
175 if (isOperandConstantImmediateInt(op1)) {
176 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
177 emitPutJITStubArg(regT0, 2);
178 } else if (isOperandConstantImmediateInt(op2)) {
179 emitPutJITStubArg(regT0, 1);
180 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
181 } else {
182 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
183 emitPutJITStubArg(regT1, 2);
184 }
185 emitCTICall(Interpreter::cti_op_bitand);
186 emitPutVirtualRegister(result);
187}
188
189#if PLATFORM(X86) || PLATFORM(X86_64)
190void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
191{
192 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
193 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
194 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
195#if USE(ALTERNATE_JSIMMEDIATE)
196 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValuePtr::encode(js0()))));
197 m_assembler.cdq();
198 m_assembler.idivl_r(X86::ecx);
199#else
200 emitFastArithDeTagImmediate(X86::eax);
201 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
202 m_assembler.cdq();
203 m_assembler.idivl_r(X86::ecx);
204 signExtend32ToPtr(X86::edx, X86::edx);
205#endif
206 emitFastArithReTagImmediate(X86::edx, X86::eax);
207 emitPutVirtualRegister(result);
208}
209void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
210{
211#if USE(ALTERNATE_JSIMMEDIATE)
212 linkSlowCase(iter);
213 linkSlowCase(iter);
214 linkSlowCase(iter);
215#else
216 Jump notImm1 = getSlowCase(iter);
217 Jump notImm2 = getSlowCase(iter);
218 linkSlowCase(iter);
219 emitFastArithReTagImmediate(X86::eax, X86::eax);
220 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
221 notImm1.link(this);
222 notImm2.link(this);
223#endif
224 emitPutJITStubArg(X86::eax, 1);
225 emitPutJITStubArg(X86::ecx, 2);
226 emitCTICall(Interpreter::cti_op_mod);
227 emitPutVirtualRegister(result);
228}
229#else
230void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
231{
232 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
233 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
234 emitCTICall(Interpreter::cti_op_mod);
235 emitPutVirtualRegister(result);
236}
237void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
238{
239 ASSERT_NOT_REACHED();
240}
241#endif
242
243void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
244{
245 emitGetVirtualRegister(srcDst, regT0);
246 move(regT0, regT1);
247 emitJumpSlowCaseIfNotImmediateInteger(regT0);
248#if USE(ALTERNATE_JSIMMEDIATE)
249 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
250 emitFastArithIntToImmNoCheck(regT1, regT1);
251#else
252 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
253 signExtend32ToPtr(regT1, regT1);
254#endif
255 emitPutVirtualRegister(srcDst, regT1);
256 emitPutVirtualRegister(result);
257}
258void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
259{
260 linkSlowCase(iter);
261 linkSlowCase(iter);
262 emitPutJITStubArg(regT0, 1);
263 emitCTICall(Interpreter::cti_op_post_inc);
264 emitPutVirtualRegister(srcDst, regT1);
265 emitPutVirtualRegister(result);
266}
267
268void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
269{
270 emitGetVirtualRegister(srcDst, regT0);
271 move(regT0, regT1);
272 emitJumpSlowCaseIfNotImmediateInteger(regT0);
273#if USE(ALTERNATE_JSIMMEDIATE)
274 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
275 emitFastArithIntToImmNoCheck(regT1, regT1);
276#else
277 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
278 signExtend32ToPtr(regT1, regT1);
279#endif
280 emitPutVirtualRegister(srcDst, regT1);
281 emitPutVirtualRegister(result);
282}
283void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
284{
285 linkSlowCase(iter);
286 linkSlowCase(iter);
287 emitPutJITStubArg(regT0, 1);
288 emitCTICall(Interpreter::cti_op_post_dec);
289 emitPutVirtualRegister(srcDst, regT1);
290 emitPutVirtualRegister(result);
291}
292
293void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
294{
295 emitGetVirtualRegister(srcDst, regT0);
296 emitJumpSlowCaseIfNotImmediateInteger(regT0);
297#if USE(ALTERNATE_JSIMMEDIATE)
298 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
299 emitFastArithIntToImmNoCheck(regT0, regT0);
300#else
301 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
302 signExtend32ToPtr(regT0, regT0);
303#endif
304 emitPutVirtualRegister(srcDst);
305}
306void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
307{
308 Jump notImm = getSlowCase(iter);
309 linkSlowCase(iter);
310 emitGetVirtualRegister(srcDst, regT0);
311 notImm.link(this);
312 emitPutJITStubArg(regT0, 1);
313 emitCTICall(Interpreter::cti_op_pre_inc);
314 emitPutVirtualRegister(srcDst);
315}
316
317void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
318{
319 emitGetVirtualRegister(srcDst, regT0);
320 emitJumpSlowCaseIfNotImmediateInteger(regT0);
321#if USE(ALTERNATE_JSIMMEDIATE)
322 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
323 emitFastArithIntToImmNoCheck(regT0, regT0);
324#else
325 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
326 signExtend32ToPtr(regT0, regT0);
327#endif
328 emitPutVirtualRegister(srcDst);
329}
330void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
331{
332 Jump notImm = getSlowCase(iter);
333 linkSlowCase(iter);
334 emitGetVirtualRegister(srcDst, regT0);
335 notImm.link(this);
336 emitPutJITStubArg(regT0, 1);
337 emitCTICall(Interpreter::cti_op_pre_dec);
338 emitPutVirtualRegister(srcDst);
339}
340
341
342#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
343
344void JIT::compileFastArith_op_add(Instruction* currentInstruction)
345{
346 unsigned result = currentInstruction[1].u.operand;
347 unsigned op1 = currentInstruction[2].u.operand;
348 unsigned op2 = currentInstruction[3].u.operand;
349
350 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
351 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
352 emitCTICall(Interpreter::cti_op_add);
353 emitPutVirtualRegister(result);
354}
355void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
356{
357 ASSERT_NOT_REACHED();
358}
359
360void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
361{
362 unsigned result = currentInstruction[1].u.operand;
363 unsigned op1 = currentInstruction[2].u.operand;
364 unsigned op2 = currentInstruction[3].u.operand;
365
366 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
367 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
368 emitCTICall(Interpreter::cti_op_mul);
369 emitPutVirtualRegister(result);
370}
371void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
372{
373 ASSERT_NOT_REACHED();
374}
375
376void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
377{
378 unsigned result = currentInstruction[1].u.operand;
379 unsigned op1 = currentInstruction[2].u.operand;
380 unsigned op2 = currentInstruction[3].u.operand;
381
382 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
383 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
384 emitCTICall(Interpreter::cti_op_sub);
385 emitPutVirtualRegister(result);
386}
387void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
388{
389 ASSERT_NOT_REACHED();
390}
391
392#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
393
394void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
395{
396 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
397 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
398 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
399 if (opcodeID == op_add)
400 addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
401 else if (opcodeID == op_sub)
402 addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
403 else {
404 ASSERT(opcodeID == op_mul);
405 addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
406 addSlowCase(branchTest32(Zero, X86::eax));
407 }
408 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
409}
410
411void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types)
412{
413 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
414 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
415
416 Jump notImm1 = getSlowCase(iter);
417 Jump notImm2 = getSlowCase(iter);
418
419 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
420 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
421 linkSlowCase(iter);
422 emitGetVirtualRegister(op1, X86::eax);
423
424 Label stubFunctionCall(this);
425 emitPutJITStubArg(X86::eax, 1);
426 emitPutJITStubArg(X86::edx, 2);
427 if (opcodeID == op_add)
428 emitCTICall(Interpreter::cti_op_add);
429 else if (opcodeID == op_sub)
430 emitCTICall(Interpreter::cti_op_sub);
431 else {
432 ASSERT(opcodeID == op_mul);
433 emitCTICall(Interpreter::cti_op_mul);
434 }
435 Jump end = jump();
436
437 // if we get here, eax is not an int32, edx not yet checked.
438 notImm1.link(this);
439 if (!types.first().definitelyIsNumber())
440 emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
441 if (!types.second().definitelyIsNumber())
442 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
443 addPtr(tagTypeNumberRegister, X86::eax);
444 m_assembler.movq_rr(X86::eax, X86::xmm1);
445 Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
446 m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
447 Jump op2wasInteger = jump();
448
449 // if we get here, eax IS an int32, edx is not.
450 notImm2.link(this);
451 if (!types.second().definitelyIsNumber())
452 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
453 m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
454 op2isDouble.link(this);
455 addPtr(tagTypeNumberRegister, X86::edx);
456 m_assembler.movq_rr(X86::edx, X86::xmm2);
457 op2wasInteger.link(this);
458
459 if (opcodeID == op_add)
460 m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
461 else if (opcodeID == op_sub)
462 m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
463 else {
464 ASSERT(opcodeID == op_mul);
465 m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
466 }
467 m_assembler.movq_rr(X86::xmm1, X86::eax);
468 subPtr(tagTypeNumberRegister, X86::eax);
469
470 end.link(this);
471}
472
473void JIT::compileFastArith_op_add(Instruction* currentInstruction)
474{
475 unsigned result = currentInstruction[1].u.operand;
476 unsigned op1 = currentInstruction[2].u.operand;
477 unsigned op2 = currentInstruction[3].u.operand;
478 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
479
480 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
481 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
482 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
483 emitCTICall(Interpreter::cti_op_add);
484 emitPutVirtualRegister(result);
485 return;
486 }
487
488 if (isOperandConstantImmediateInt(op1)) {
489 emitGetVirtualRegister(op2, X86::eax);
490 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
491 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
492 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
493 } else if (isOperandConstantImmediateInt(op2)) {
494 emitGetVirtualRegister(op1, X86::eax);
495 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
496 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
497 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
498 } else
499 compileBinaryArithOp(op_add, result, op1, op2, types);
500
501 emitPutVirtualRegister(result);
502}
503void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
504{
505 unsigned result = currentInstruction[1].u.operand;
506 unsigned op1 = currentInstruction[2].u.operand;
507 unsigned op2 = currentInstruction[3].u.operand;
508 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
509
510 if (isOperandConstantImmediateInt(op1)) {
511 linkSlowCase(iter);
512 linkSlowCase(iter);
513 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
514 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
515 emitCTICall(Interpreter::cti_op_add);
516 } else if (isOperandConstantImmediateInt(op2)) {
517 linkSlowCase(iter);
518 linkSlowCase(iter);
519 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
520 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
521 emitCTICall(Interpreter::cti_op_add);
522 } else
523 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
524
525 emitPutVirtualRegister(result);
526}
527
528void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
529{
530 unsigned result = currentInstruction[1].u.operand;
531 unsigned op1 = currentInstruction[2].u.operand;
532 unsigned op2 = currentInstruction[3].u.operand;
533 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
534
535 // For now, only plant a fast int case if the constant operand is greater than zero.
536 int32_t value;
537 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
538 emitGetVirtualRegister(op2, X86::eax);
539 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
540 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
541 emitFastArithReTagImmediate(X86::eax, X86::eax);
542 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
543 emitGetVirtualRegister(op1, X86::eax);
544 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
545 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
546 emitFastArithReTagImmediate(X86::eax, X86::eax);
547 } else
548 compileBinaryArithOp(op_mul, result, op1, op2, types);
549
550 emitPutVirtualRegister(result);
551}
552void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
553{
554 unsigned result = currentInstruction[1].u.operand;
555 unsigned op1 = currentInstruction[2].u.operand;
556 unsigned op2 = currentInstruction[3].u.operand;
557 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
558
559 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
560 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
561 linkSlowCase(iter);
562 linkSlowCase(iter);
563 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
564 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
565 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
566 emitCTICall(Interpreter::cti_op_mul);
567 } else
568 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
569
570 emitPutVirtualRegister(result);
571}
572
573void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
574{
575 unsigned result = currentInstruction[1].u.operand;
576 unsigned op1 = currentInstruction[2].u.operand;
577 unsigned op2 = currentInstruction[3].u.operand;
578 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
579
580 compileBinaryArithOp(op_sub, result, op1, op2, types);
581
582 emitPutVirtualRegister(result);
583}
584void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
585{
586 unsigned result = currentInstruction[1].u.operand;
587 unsigned op1 = currentInstruction[2].u.operand;
588 unsigned op2 = currentInstruction[3].u.operand;
589 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
590
591 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
592
593 emitPutVirtualRegister(result);
594}
595
596#else
597
598typedef X86Assembler::JmpSrc JmpSrc;
599typedef X86Assembler::JmpDst JmpDst;
600typedef X86Assembler::XMMRegisterID XMMRegisterID;
601
602#if PLATFORM(MAC)
603
604static inline bool isSSE2Present()
605{
606 return true; // All X86 Macs are guaranteed to support at least SSE2
607}
608
609#else
610
611static bool isSSE2Present()
612{
613 static const int SSE2FeatureBit = 1 << 26;
614 struct SSE2Check {
615 SSE2Check()
616 {
617 int flags;
618#if COMPILER(MSVC)
619 _asm {
620 mov eax, 1 // cpuid function 1 gives us the standard feature set
621 cpuid;
622 mov flags, edx;
623 }
624#else
625 flags = 0;
626 // FIXME: Add GCC code to do above asm
627#endif
628 present = (flags & SSE2FeatureBit) != 0;
629 }
630 bool present;
631 };
632 static SSE2Check check;
633 return check.present;
634}
635
636#endif
637
638/*
639 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
640
641 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
642 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
643
644 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
645 control will fall through from the code planted.
646*/
647void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
648{
649 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
650 __ cvttsd2si_rr(xmmSource, tempReg1);
651 __ addl_rr(tempReg1, tempReg1);
652 __ sarl_i8r(1, tempReg1);
653 __ cvtsi2sd_rr(tempReg1, tempXmm);
654 // Compare & branch if immediate.
655 __ ucomisd_rr(tempXmm, xmmSource);
656 JmpSrc resultIsImm = __ je();
657 JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
658
659 // Store the result to the JSNumberCell and jump.
660 __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
661 if (jsNumberCell != X86::eax)
662 __ movl_rr(jsNumberCell, X86::eax);
663 emitPutVirtualRegister(dst);
664 *wroteJSNumberCell = __ jmp();
665
666 __ linkJump(resultIsImm, __ label());
667 // value == (double)(JSImmediate)value... or at least, it looks that way...
668 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
669 __ linkJump(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
670 __ pextrw_irr(3, xmmSource, tempReg2);
671 __ cmpl_ir(0x8000, tempReg2);
672 __ linkJump(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
673 // Yes it really really really is representable as a JSImmediate.
674 emitFastArithIntToImmNoCheck(tempReg1, X86::eax);
675 emitPutVirtualRegister(dst);
676}
677
678void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
679{
680 Structure* numberStructure = m_globalData->numberStructure.get();
681 JmpSrc wasJSNumberCell1;
682 JmpSrc wasJSNumberCell1b;
683 JmpSrc wasJSNumberCell2;
684 JmpSrc wasJSNumberCell2b;
685
686 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
687
688 if (types.second().isReusable() && isSSE2Present()) {
689 ASSERT(types.second().mightBeNumber());
690
691 // Check op2 is a number
692 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
693 JmpSrc op2imm = __ jne();
694 if (!types.second().definitelyIsNumber()) {
695 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
696 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
697 addSlowCase(__ jne());
698 }
699
700 // (1) In this case src2 is a reusable number cell.
701 // Slow case if src1 is not a number type.
702 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
703 JmpSrc op1imm = __ jne();
704 if (!types.first().definitelyIsNumber()) {
705 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
706 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
707 addSlowCase(__ jne());
708 }
709
710 // (1a) if we get here, src1 is also a number cell
711 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
712 JmpSrc loadedDouble = __ jmp();
713 // (1b) if we get here, src1 is an immediate
714 __ linkJump(op1imm, __ label());
715 emitFastArithImmToInt(X86::eax);
716 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
717 // (1c)
718 __ linkJump(loadedDouble, __ label());
719 if (opcodeID == op_add)
720 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
721 else if (opcodeID == op_sub)
722 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
723 else {
724 ASSERT(opcodeID == op_mul);
725 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
726 }
727
728 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
729 wasJSNumberCell2b = __ jmp();
730
731 // (2) This handles cases where src2 is an immediate number.
732 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
733 __ linkJump(op2imm, __ label());
734 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
735 } else if (types.first().isReusable() && isSSE2Present()) {
736 ASSERT(types.first().mightBeNumber());
737
738 // Check op1 is a number
739 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
740 JmpSrc op1imm = __ jne();
741 if (!types.first().definitelyIsNumber()) {
742 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
743 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
744 addSlowCase(__ jne());
745 }
746
747 // (1) In this case src1 is a reusable number cell.
748 // Slow case if src2 is not a number type.
749 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
750 JmpSrc op2imm = __ jne();
751 if (!types.second().definitelyIsNumber()) {
752 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
753 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
754 addSlowCase(__ jne());
755 }
756
757 // (1a) if we get here, src2 is also a number cell
758 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
759 JmpSrc loadedDouble = __ jmp();
760 // (1b) if we get here, src2 is an immediate
761 __ linkJump(op2imm, __ label());
762 emitFastArithImmToInt(X86::edx);
763 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
764 // (1c)
765 __ linkJump(loadedDouble, __ label());
766 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
767 if (opcodeID == op_add)
768 __ addsd_rr(X86::xmm1, X86::xmm0);
769 else if (opcodeID == op_sub)
770 __ subsd_rr(X86::xmm1, X86::xmm0);
771 else {
772 ASSERT(opcodeID == op_mul);
773 __ mulsd_rr(X86::xmm1, X86::xmm0);
774 }
775 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
776 emitPutVirtualRegister(dst);
777
778 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
779 wasJSNumberCell1b = __ jmp();
780
781 // (2) This handles cases where src1 is an immediate number.
782 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
783 __ linkJump(op1imm, __ label());
784 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
785 } else
786 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
787
788 if (opcodeID == op_add) {
789 emitFastArithDeTagImmediate(X86::eax);
790 __ addl_rr(X86::edx, X86::eax);
791 addSlowCase(__ jo());
792 } else if (opcodeID == op_sub) {
793 __ subl_rr(X86::edx, X86::eax);
794 addSlowCase(__ jo());
795 signExtend32ToPtr(X86::eax, X86::eax);
796 emitFastArithReTagImmediate(X86::eax, X86::eax);
797 } else {
798 ASSERT(opcodeID == op_mul);
799 // convert eax & edx from JSImmediates to ints, and check if either are zero
800 emitFastArithImmToInt(X86::edx);
801 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
802 __ testl_rr(X86::edx, X86::edx);
803 JmpSrc op2NonZero = __ jne();
804 op1Zero.link(this);
805 // if either input is zero, add the two together, and check if the result is < 0.
806 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
807 __ movl_rr(X86::eax, X86::ecx);
808 __ addl_rr(X86::edx, X86::ecx);
809 addSlowCase(__ js());
810 // Skip the above check if neither input is zero
811 __ linkJump(op2NonZero, __ label());
812 __ imull_rr(X86::edx, X86::eax);
813 addSlowCase(__ jo());
814 signExtend32ToPtr(X86::eax, X86::eax);
815 emitFastArithReTagImmediate(X86::eax, X86::eax);
816 }
817 emitPutVirtualRegister(dst);
818
819 if (types.second().isReusable() && isSSE2Present()) {
820 __ linkJump(wasJSNumberCell2, __ label());
821 __ linkJump(wasJSNumberCell2b, __ label());
822 }
823 else if (types.first().isReusable() && isSSE2Present()) {
824 __ linkJump(wasJSNumberCell1, __ label());
825 __ linkJump(wasJSNumberCell1b, __ label());
826 }
827}
828
829void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
830{
831 linkSlowCase(iter);
832 if (types.second().isReusable() && isSSE2Present()) {
833 if (!types.first().definitelyIsNumber()) {
834 linkSlowCaseIfNotJSCell(iter, src1);
835 linkSlowCase(iter);
836 }
837 if (!types.second().definitelyIsNumber()) {
838 linkSlowCaseIfNotJSCell(iter, src2);
839 linkSlowCase(iter);
840 }
841 } else if (types.first().isReusable() && isSSE2Present()) {
842 if (!types.first().definitelyIsNumber()) {
843 linkSlowCaseIfNotJSCell(iter, src1);
844 linkSlowCase(iter);
845 }
846 if (!types.second().definitelyIsNumber()) {
847 linkSlowCaseIfNotJSCell(iter, src2);
848 linkSlowCase(iter);
849 }
850 }
851 linkSlowCase(iter);
852
853 // additional entry point to handle -0 cases.
854 if (opcodeID == op_mul)
855 linkSlowCase(iter);
856
857 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
858 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
859 if (opcodeID == op_add)
860 emitCTICall(Interpreter::cti_op_add);
861 else if (opcodeID == op_sub)
862 emitCTICall(Interpreter::cti_op_sub);
863 else {
864 ASSERT(opcodeID == op_mul);
865 emitCTICall(Interpreter::cti_op_mul);
866 }
867 emitPutVirtualRegister(dst);
868}
869
870void JIT::compileFastArith_op_add(Instruction* currentInstruction)
871{
872 unsigned result = currentInstruction[1].u.operand;
873 unsigned op1 = currentInstruction[2].u.operand;
874 unsigned op2 = currentInstruction[3].u.operand;
875
876 if (isOperandConstantImmediateInt(op1)) {
877 emitGetVirtualRegister(op2, X86::eax);
878 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
879 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
880 signExtend32ToPtr(X86::eax, X86::eax);
881 emitPutVirtualRegister(result);
882 } else if (isOperandConstantImmediateInt(op2)) {
883 emitGetVirtualRegister(op1, X86::eax);
884 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
885 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
886 signExtend32ToPtr(X86::eax, X86::eax);
887 emitPutVirtualRegister(result);
888 } else {
889 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
890 if (types.first().mightBeNumber() && types.second().mightBeNumber())
891 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
892 else {
893 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
894 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
895 emitCTICall(Interpreter::cti_op_add);
896 emitPutVirtualRegister(result);
897 }
898 }
899}
900void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
901{
902 unsigned result = currentInstruction[1].u.operand;
903 unsigned op1 = currentInstruction[2].u.operand;
904 unsigned op2 = currentInstruction[3].u.operand;
905
906 if (isOperandConstantImmediateInt(op1)) {
907 Jump notImm = getSlowCase(iter);
908 linkSlowCase(iter);
909 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
910 notImm.link(this);
911 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
912 emitPutJITStubArg(X86::eax, 2);
913 emitCTICall(Interpreter::cti_op_add);
914 emitPutVirtualRegister(result);
915 } else if (isOperandConstantImmediateInt(op2)) {
916 Jump notImm = getSlowCase(iter);
917 linkSlowCase(iter);
918 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
919 notImm.link(this);
920 emitPutJITStubArg(X86::eax, 1);
921 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
922 emitCTICall(Interpreter::cti_op_add);
923 emitPutVirtualRegister(result);
924 } else {
925 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
926 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
927 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
928 }
929}
930
931void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
932{
933 unsigned result = currentInstruction[1].u.operand;
934 unsigned op1 = currentInstruction[2].u.operand;
935 unsigned op2 = currentInstruction[3].u.operand;
936
937 // For now, only plant a fast int case if the constant operand is greater than zero.
938 int32_t value;
939 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
940 emitGetVirtualRegister(op2, X86::eax);
941 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
942 emitFastArithDeTagImmediate(X86::eax);
943 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
944 signExtend32ToPtr(X86::eax, X86::eax);
945 emitFastArithReTagImmediate(X86::eax, X86::eax);
946 emitPutVirtualRegister(result);
947 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
948 emitGetVirtualRegister(op1, X86::eax);
949 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
950 emitFastArithDeTagImmediate(X86::eax);
951 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
952 signExtend32ToPtr(X86::eax, X86::eax);
953 emitFastArithReTagImmediate(X86::eax, X86::eax);
954 emitPutVirtualRegister(result);
955 } else
956 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
957}
958void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
959{
960 unsigned result = currentInstruction[1].u.operand;
961 unsigned op1 = currentInstruction[2].u.operand;
962 unsigned op2 = currentInstruction[3].u.operand;
963
964 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
965 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
966 linkSlowCase(iter);
967 linkSlowCase(iter);
968 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
969 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
970 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
971 emitCTICall(Interpreter::cti_op_mul);
972 emitPutVirtualRegister(result);
973 } else
974 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
975}
976
977void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
978{
979 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
980}
981void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
982{
983 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
984}
985
986#endif
987
988} // namespace JSC
989
990#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.