source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 41022

Last change on this file since 41022 was 41022, checked in by [email protected], 16 years ago

Bug 23787: Allow JIT to generate SSE2 code if using GCC
<https://bugs.webkit.org/show_bug.cgi?id=23787>

Provided by Csaba Osztrogonac
Reviewed by Oliver Hunt

GCC version of the cpuid check.

File size: 38.8 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47namespace JSC {
48
49void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
50{
51 emitGetVirtualRegisters(op1, regT0, op2, regT2);
52 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
53 emitJumpSlowCaseIfNotImmediateInteger(regT0);
54 emitJumpSlowCaseIfNotImmediateInteger(regT2);
55 emitFastArithImmToInt(regT0);
56 emitFastArithImmToInt(regT2);
57#if !PLATFORM(X86)
58 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
59 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
60 and32(Imm32(0x1f), regT2);
61#endif
62 lshift32(regT2, regT0);
63#if !USE(ALTERNATE_JSIMMEDIATE)
64 addSlowCase(branchAdd32(Overflow, regT0, regT0));
65 signExtend32ToPtr(regT0, regT0);
66#endif
67 emitFastArithReTagImmediate(regT0, regT0);
68 emitPutVirtualRegister(result);
69}
70void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
71{
72#if USE(ALTERNATE_JSIMMEDIATE)
73 UNUSED_PARAM(op1);
74 UNUSED_PARAM(op2);
75 linkSlowCase(iter);
76 linkSlowCase(iter);
77#else
78 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
79 Jump notImm1 = getSlowCase(iter);
80 Jump notImm2 = getSlowCase(iter);
81 linkSlowCase(iter);
82 emitGetVirtualRegisters(op1, regT0, op2, regT2);
83 notImm1.link(this);
84 notImm2.link(this);
85#endif
86 emitPutJITStubArg(regT0, 1);
87 emitPutJITStubArg(regT2, 2);
88 emitCTICall(Interpreter::cti_op_lshift);
89 emitPutVirtualRegister(result);
90}
91
92void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
93{
94 if (isOperandConstantImmediateInt(op2)) {
95 emitGetVirtualRegister(op1, regT0);
96 emitJumpSlowCaseIfNotImmediateInteger(regT0);
97 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
98#if USE(ALTERNATE_JSIMMEDIATE)
99 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
100#else
101 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
102#endif
103 } else {
104 emitGetVirtualRegisters(op1, regT0, op2, regT2);
105 emitJumpSlowCaseIfNotImmediateInteger(regT0);
106 emitJumpSlowCaseIfNotImmediateInteger(regT2);
107 emitFastArithImmToInt(regT2);
108#if !PLATFORM(X86)
109 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
110 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
111 and32(Imm32(0x1f), regT2);
112#endif
113#if USE(ALTERNATE_JSIMMEDIATE)
114 rshift32(regT2, regT0);
115#else
116 rshiftPtr(regT2, regT0);
117#endif
118 }
119#if USE(ALTERNATE_JSIMMEDIATE)
120 emitFastArithIntToImmNoCheck(regT0, regT0);
121#else
122 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
123#endif
124 emitPutVirtualRegister(result);
125}
126void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
127{
128 linkSlowCase(iter);
129 if (isOperandConstantImmediateInt(op2))
130 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
131 else {
132 linkSlowCase(iter);
133 emitPutJITStubArg(regT2, 2);
134 }
135
136 emitPutJITStubArg(regT0, 1);
137 emitCTICall(Interpreter::cti_op_rshift);
138 emitPutVirtualRegister(result);
139}
140
141void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
142{
143 if (isOperandConstantImmediateInt(op1)) {
144 emitGetVirtualRegister(op2, regT0);
145 emitJumpSlowCaseIfNotImmediateInteger(regT0);
146#if USE(ALTERNATE_JSIMMEDIATE)
147 int32_t imm = getConstantOperandImmediateInt(op1);
148 andPtr(Imm32(imm), regT0);
149 if (imm >= 0)
150 emitFastArithIntToImmNoCheck(regT0, regT0);
151#else
152 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
153#endif
154 } else if (isOperandConstantImmediateInt(op2)) {
155 emitGetVirtualRegister(op1, regT0);
156 emitJumpSlowCaseIfNotImmediateInteger(regT0);
157#if USE(ALTERNATE_JSIMMEDIATE)
158 int32_t imm = getConstantOperandImmediateInt(op2);
159 andPtr(Imm32(imm), regT0);
160 if (imm >= 0)
161 emitFastArithIntToImmNoCheck(regT0, regT0);
162#else
163 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
164#endif
165 } else {
166 emitGetVirtualRegisters(op1, regT0, op2, regT1);
167 andPtr(regT1, regT0);
168 emitJumpSlowCaseIfNotImmediateInteger(regT0);
169 }
170 emitPutVirtualRegister(result);
171}
172void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
173{
174 linkSlowCase(iter);
175 if (isOperandConstantImmediateInt(op1)) {
176 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
177 emitPutJITStubArg(regT0, 2);
178 } else if (isOperandConstantImmediateInt(op2)) {
179 emitPutJITStubArg(regT0, 1);
180 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
181 } else {
182 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
183 emitPutJITStubArg(regT1, 2);
184 }
185 emitCTICall(Interpreter::cti_op_bitand);
186 emitPutVirtualRegister(result);
187}
188
189#if PLATFORM(X86) || PLATFORM(X86_64)
190void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
191{
192 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
193 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
194 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
195#if USE(ALTERNATE_JSIMMEDIATE)
196 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValuePtr::encode(js0()))));
197 m_assembler.cdq();
198 m_assembler.idivl_r(X86::ecx);
199#else
200 emitFastArithDeTagImmediate(X86::eax);
201 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
202 m_assembler.cdq();
203 m_assembler.idivl_r(X86::ecx);
204 signExtend32ToPtr(X86::edx, X86::edx);
205#endif
206 emitFastArithReTagImmediate(X86::edx, X86::eax);
207 emitPutVirtualRegister(result);
208}
209void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
210{
211#if USE(ALTERNATE_JSIMMEDIATE)
212 linkSlowCase(iter);
213 linkSlowCase(iter);
214 linkSlowCase(iter);
215#else
216 Jump notImm1 = getSlowCase(iter);
217 Jump notImm2 = getSlowCase(iter);
218 linkSlowCase(iter);
219 emitFastArithReTagImmediate(X86::eax, X86::eax);
220 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
221 notImm1.link(this);
222 notImm2.link(this);
223#endif
224 emitPutJITStubArg(X86::eax, 1);
225 emitPutJITStubArg(X86::ecx, 2);
226 emitCTICall(Interpreter::cti_op_mod);
227 emitPutVirtualRegister(result);
228}
229#else
230void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
231{
232 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
233 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
234 emitCTICall(Interpreter::cti_op_mod);
235 emitPutVirtualRegister(result);
236}
237void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
238{
239 ASSERT_NOT_REACHED();
240}
241#endif
242
243void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
244{
245 emitGetVirtualRegister(srcDst, regT0);
246 move(regT0, regT1);
247 emitJumpSlowCaseIfNotImmediateInteger(regT0);
248#if USE(ALTERNATE_JSIMMEDIATE)
249 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
250 emitFastArithIntToImmNoCheck(regT1, regT1);
251#else
252 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
253 signExtend32ToPtr(regT1, regT1);
254#endif
255 emitPutVirtualRegister(srcDst, regT1);
256 emitPutVirtualRegister(result);
257}
258void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
259{
260 linkSlowCase(iter);
261 linkSlowCase(iter);
262 emitPutJITStubArg(regT0, 1);
263 emitCTICall(Interpreter::cti_op_post_inc);
264 emitPutVirtualRegister(srcDst, regT1);
265 emitPutVirtualRegister(result);
266}
267
268void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
269{
270 emitGetVirtualRegister(srcDst, regT0);
271 move(regT0, regT1);
272 emitJumpSlowCaseIfNotImmediateInteger(regT0);
273#if USE(ALTERNATE_JSIMMEDIATE)
274 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
275 emitFastArithIntToImmNoCheck(regT1, regT1);
276#else
277 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
278 signExtend32ToPtr(regT1, regT1);
279#endif
280 emitPutVirtualRegister(srcDst, regT1);
281 emitPutVirtualRegister(result);
282}
283void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
284{
285 linkSlowCase(iter);
286 linkSlowCase(iter);
287 emitPutJITStubArg(regT0, 1);
288 emitCTICall(Interpreter::cti_op_post_dec);
289 emitPutVirtualRegister(srcDst, regT1);
290 emitPutVirtualRegister(result);
291}
292
293void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
294{
295 emitGetVirtualRegister(srcDst, regT0);
296 emitJumpSlowCaseIfNotImmediateInteger(regT0);
297#if USE(ALTERNATE_JSIMMEDIATE)
298 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
299 emitFastArithIntToImmNoCheck(regT0, regT0);
300#else
301 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
302 signExtend32ToPtr(regT0, regT0);
303#endif
304 emitPutVirtualRegister(srcDst);
305}
306void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
307{
308 Jump notImm = getSlowCase(iter);
309 linkSlowCase(iter);
310 emitGetVirtualRegister(srcDst, regT0);
311 notImm.link(this);
312 emitPutJITStubArg(regT0, 1);
313 emitCTICall(Interpreter::cti_op_pre_inc);
314 emitPutVirtualRegister(srcDst);
315}
316
317void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
318{
319 emitGetVirtualRegister(srcDst, regT0);
320 emitJumpSlowCaseIfNotImmediateInteger(regT0);
321#if USE(ALTERNATE_JSIMMEDIATE)
322 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
323 emitFastArithIntToImmNoCheck(regT0, regT0);
324#else
325 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
326 signExtend32ToPtr(regT0, regT0);
327#endif
328 emitPutVirtualRegister(srcDst);
329}
330void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
331{
332 Jump notImm = getSlowCase(iter);
333 linkSlowCase(iter);
334 emitGetVirtualRegister(srcDst, regT0);
335 notImm.link(this);
336 emitPutJITStubArg(regT0, 1);
337 emitCTICall(Interpreter::cti_op_pre_dec);
338 emitPutVirtualRegister(srcDst);
339}
340
341
342#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
343
344void JIT::compileFastArith_op_add(Instruction* currentInstruction)
345{
346 unsigned result = currentInstruction[1].u.operand;
347 unsigned op1 = currentInstruction[2].u.operand;
348 unsigned op2 = currentInstruction[3].u.operand;
349
350 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
351 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
352 emitCTICall(Interpreter::cti_op_add);
353 emitPutVirtualRegister(result);
354}
355void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
356{
357 ASSERT_NOT_REACHED();
358}
359
360void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
361{
362 unsigned result = currentInstruction[1].u.operand;
363 unsigned op1 = currentInstruction[2].u.operand;
364 unsigned op2 = currentInstruction[3].u.operand;
365
366 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
367 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
368 emitCTICall(Interpreter::cti_op_mul);
369 emitPutVirtualRegister(result);
370}
371void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
372{
373 ASSERT_NOT_REACHED();
374}
375
376void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
377{
378 unsigned result = currentInstruction[1].u.operand;
379 unsigned op1 = currentInstruction[2].u.operand;
380 unsigned op2 = currentInstruction[3].u.operand;
381
382 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
383 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
384 emitCTICall(Interpreter::cti_op_sub);
385 emitPutVirtualRegister(result);
386}
387void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
388{
389 ASSERT_NOT_REACHED();
390}
391
392#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
393
394void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
395{
396 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
397 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
398 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
399 if (opcodeID == op_add)
400 addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
401 else if (opcodeID == op_sub)
402 addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
403 else {
404 ASSERT(opcodeID == op_mul);
405 addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
406 addSlowCase(branchTest32(Zero, X86::eax));
407 }
408 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
409}
410
411void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types)
412{
413 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
414 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
415
416 Jump notImm1 = getSlowCase(iter);
417 Jump notImm2 = getSlowCase(iter);
418
419 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
420 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
421 linkSlowCase(iter);
422 emitGetVirtualRegister(op1, X86::eax);
423
424 Label stubFunctionCall(this);
425 emitPutJITStubArg(X86::eax, 1);
426 emitPutJITStubArg(X86::edx, 2);
427 if (opcodeID == op_add)
428 emitCTICall(Interpreter::cti_op_add);
429 else if (opcodeID == op_sub)
430 emitCTICall(Interpreter::cti_op_sub);
431 else {
432 ASSERT(opcodeID == op_mul);
433 emitCTICall(Interpreter::cti_op_mul);
434 }
435 Jump end = jump();
436
437 // if we get here, eax is not an int32, edx not yet checked.
438 notImm1.link(this);
439 if (!types.first().definitelyIsNumber())
440 emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
441 if (!types.second().definitelyIsNumber())
442 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
443 addPtr(tagTypeNumberRegister, X86::eax);
444 m_assembler.movq_rr(X86::eax, X86::xmm1);
445 Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
446 m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
447 Jump op2wasInteger = jump();
448
449 // if we get here, eax IS an int32, edx is not.
450 notImm2.link(this);
451 if (!types.second().definitelyIsNumber())
452 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
453 m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
454 op2isDouble.link(this);
455 addPtr(tagTypeNumberRegister, X86::edx);
456 m_assembler.movq_rr(X86::edx, X86::xmm2);
457 op2wasInteger.link(this);
458
459 if (opcodeID == op_add)
460 m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
461 else if (opcodeID == op_sub)
462 m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
463 else {
464 ASSERT(opcodeID == op_mul);
465 m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
466 }
467 m_assembler.movq_rr(X86::xmm1, X86::eax);
468 subPtr(tagTypeNumberRegister, X86::eax);
469
470 end.link(this);
471}
472
473void JIT::compileFastArith_op_add(Instruction* currentInstruction)
474{
475 unsigned result = currentInstruction[1].u.operand;
476 unsigned op1 = currentInstruction[2].u.operand;
477 unsigned op2 = currentInstruction[3].u.operand;
478 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
479
480 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
481 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
482 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
483 emitCTICall(Interpreter::cti_op_add);
484 emitPutVirtualRegister(result);
485 return;
486 }
487
488 if (isOperandConstantImmediateInt(op1)) {
489 emitGetVirtualRegister(op2, X86::eax);
490 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
491 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
492 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
493 } else if (isOperandConstantImmediateInt(op2)) {
494 emitGetVirtualRegister(op1, X86::eax);
495 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
496 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
497 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
498 } else
499 compileBinaryArithOp(op_add, result, op1, op2, types);
500
501 emitPutVirtualRegister(result);
502}
503void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
504{
505 unsigned result = currentInstruction[1].u.operand;
506 unsigned op1 = currentInstruction[2].u.operand;
507 unsigned op2 = currentInstruction[3].u.operand;
508 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
509
510 if (isOperandConstantImmediateInt(op1)) {
511 linkSlowCase(iter);
512 linkSlowCase(iter);
513 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
514 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
515 emitCTICall(Interpreter::cti_op_add);
516 } else if (isOperandConstantImmediateInt(op2)) {
517 linkSlowCase(iter);
518 linkSlowCase(iter);
519 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
520 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
521 emitCTICall(Interpreter::cti_op_add);
522 } else
523 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
524
525 emitPutVirtualRegister(result);
526}
527
528void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
529{
530 unsigned result = currentInstruction[1].u.operand;
531 unsigned op1 = currentInstruction[2].u.operand;
532 unsigned op2 = currentInstruction[3].u.operand;
533 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
534
535 // For now, only plant a fast int case if the constant operand is greater than zero.
536 int32_t value;
537 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
538 emitGetVirtualRegister(op2, X86::eax);
539 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
540 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
541 emitFastArithReTagImmediate(X86::eax, X86::eax);
542 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
543 emitGetVirtualRegister(op1, X86::eax);
544 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
545 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
546 emitFastArithReTagImmediate(X86::eax, X86::eax);
547 } else
548 compileBinaryArithOp(op_mul, result, op1, op2, types);
549
550 emitPutVirtualRegister(result);
551}
552void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
553{
554 unsigned result = currentInstruction[1].u.operand;
555 unsigned op1 = currentInstruction[2].u.operand;
556 unsigned op2 = currentInstruction[3].u.operand;
557 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
558
559 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
560 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
561 linkSlowCase(iter);
562 linkSlowCase(iter);
563 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
564 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
565 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
566 emitCTICall(Interpreter::cti_op_mul);
567 } else
568 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
569
570 emitPutVirtualRegister(result);
571}
572
573void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
574{
575 unsigned result = currentInstruction[1].u.operand;
576 unsigned op1 = currentInstruction[2].u.operand;
577 unsigned op2 = currentInstruction[3].u.operand;
578 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
579
580 compileBinaryArithOp(op_sub, result, op1, op2, types);
581
582 emitPutVirtualRegister(result);
583}
584void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
585{
586 unsigned result = currentInstruction[1].u.operand;
587 unsigned op1 = currentInstruction[2].u.operand;
588 unsigned op2 = currentInstruction[3].u.operand;
589 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
590
591 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
592
593 emitPutVirtualRegister(result);
594}
595
596#else
597
598typedef X86Assembler::JmpSrc JmpSrc;
599typedef X86Assembler::JmpDst JmpDst;
600typedef X86Assembler::XMMRegisterID XMMRegisterID;
601
602#if PLATFORM(MAC)
603
604static inline bool isSSE2Present()
605{
606 return true; // All X86 Macs are guaranteed to support at least SSE2
607}
608
609#else
610
611static bool isSSE2Present()
612{
613 static const int SSE2FeatureBit = 1 << 26;
614 struct SSE2Check {
615 SSE2Check()
616 {
617 int flags;
618#if COMPILER(MSVC)
619 _asm {
620 mov eax, 1 // cpuid function 1 gives us the standard feature set
621 cpuid;
622 mov flags, edx;
623 }
624#elif COMPILER(GCC)
625 asm (
626 "movl $0x1, %%eax;"
627 "cpuid;"
628 "movl %%edx, %0;"
629 : "=g" (flags)
630 :
631 : "%eax", "%ebx", "%ecx", "%edx"
632 );
633#else
634 flags = 0;
635#endif
636 present = (flags & SSE2FeatureBit) != 0;
637 }
638 bool present;
639 };
640 static SSE2Check check;
641 return check.present;
642}
643
644#endif
645
646/*
647 This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell.
648
649 In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell'
650 is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell).
651
652 However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow
653 control will fall through from the code planted.
654*/
655void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2)
656{
657 // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate.
658 __ cvttsd2si_rr(xmmSource, tempReg1);
659 __ addl_rr(tempReg1, tempReg1);
660 __ sarl_i8r(1, tempReg1);
661 __ cvtsi2sd_rr(tempReg1, tempXmm);
662 // Compare & branch if immediate.
663 __ ucomisd_rr(tempXmm, xmmSource);
664 JmpSrc resultIsImm = __ je();
665 JmpDst resultLookedLikeImmButActuallyIsnt = __ label();
666
667 // Store the result to the JSNumberCell and jump.
668 __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell);
669 if (jsNumberCell != X86::eax)
670 __ movl_rr(jsNumberCell, X86::eax);
671 emitPutVirtualRegister(dst);
672 *wroteJSNumberCell = __ jmp();
673
674 __ link(resultIsImm, __ label());
675 // value == (double)(JSImmediate)value... or at least, it looks that way...
676 // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered).
677 __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN
678 __ pextrw_irr(3, xmmSource, tempReg2);
679 __ cmpl_ir(0x8000, tempReg2);
680 __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0
681 // Yes it really really really is representable as a JSImmediate.
682 emitFastArithIntToImmNoCheck(tempReg1, X86::eax);
683 emitPutVirtualRegister(dst);
684}
685
686void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
687{
688 Structure* numberStructure = m_globalData->numberStructure.get();
689 JmpSrc wasJSNumberCell1;
690 JmpSrc wasJSNumberCell1b;
691 JmpSrc wasJSNumberCell2;
692 JmpSrc wasJSNumberCell2b;
693
694 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
695
696 if (types.second().isReusable() && isSSE2Present()) {
697 ASSERT(types.second().mightBeNumber());
698
699 // Check op2 is a number
700 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
701 JmpSrc op2imm = __ jne();
702 if (!types.second().definitelyIsNumber()) {
703 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
704 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
705 addSlowCase(__ jne());
706 }
707
708 // (1) In this case src2 is a reusable number cell.
709 // Slow case if src1 is not a number type.
710 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
711 JmpSrc op1imm = __ jne();
712 if (!types.first().definitelyIsNumber()) {
713 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
714 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
715 addSlowCase(__ jne());
716 }
717
718 // (1a) if we get here, src1 is also a number cell
719 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
720 JmpSrc loadedDouble = __ jmp();
721 // (1b) if we get here, src1 is an immediate
722 __ link(op1imm, __ label());
723 emitFastArithImmToInt(X86::eax);
724 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
725 // (1c)
726 __ link(loadedDouble, __ label());
727 if (opcodeID == op_add)
728 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
729 else if (opcodeID == op_sub)
730 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
731 else {
732 ASSERT(opcodeID == op_mul);
733 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
734 }
735
736 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax);
737 wasJSNumberCell2b = __ jmp();
738
739 // (2) This handles cases where src2 is an immediate number.
740 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
741 __ link(op2imm, __ label());
742 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
743 } else if (types.first().isReusable() && isSSE2Present()) {
744 ASSERT(types.first().mightBeNumber());
745
746 // Check op1 is a number
747 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
748 JmpSrc op1imm = __ jne();
749 if (!types.first().definitelyIsNumber()) {
750 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
751 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
752 addSlowCase(__ jne());
753 }
754
755 // (1) In this case src1 is a reusable number cell.
756 // Slow case if src2 is not a number type.
757 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
758 JmpSrc op2imm = __ jne();
759 if (!types.second().definitelyIsNumber()) {
760 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
761 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
762 addSlowCase(__ jne());
763 }
764
765 // (1a) if we get here, src2 is also a number cell
766 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
767 JmpSrc loadedDouble = __ jmp();
768 // (1b) if we get here, src2 is an immediate
769 __ link(op2imm, __ label());
770 emitFastArithImmToInt(X86::edx);
771 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
772 // (1c)
773 __ link(loadedDouble, __ label());
774 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
775 if (opcodeID == op_add)
776 __ addsd_rr(X86::xmm1, X86::xmm0);
777 else if (opcodeID == op_sub)
778 __ subsd_rr(X86::xmm1, X86::xmm0);
779 else {
780 ASSERT(opcodeID == op_mul);
781 __ mulsd_rr(X86::xmm1, X86::xmm0);
782 }
783 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
784 emitPutVirtualRegister(dst);
785
786 putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx);
787 wasJSNumberCell1b = __ jmp();
788
789 // (2) This handles cases where src1 is an immediate number.
790 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
791 __ link(op1imm, __ label());
792 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
793 } else
794 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
795
796 if (opcodeID == op_add) {
797 emitFastArithDeTagImmediate(X86::eax);
798 __ addl_rr(X86::edx, X86::eax);
799 addSlowCase(__ jo());
800 } else if (opcodeID == op_sub) {
801 __ subl_rr(X86::edx, X86::eax);
802 addSlowCase(__ jo());
803 signExtend32ToPtr(X86::eax, X86::eax);
804 emitFastArithReTagImmediate(X86::eax, X86::eax);
805 } else {
806 ASSERT(opcodeID == op_mul);
807 // convert eax & edx from JSImmediates to ints, and check if either are zero
808 emitFastArithImmToInt(X86::edx);
809 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
810 __ testl_rr(X86::edx, X86::edx);
811 JmpSrc op2NonZero = __ jne();
812 op1Zero.link(this);
813 // if either input is zero, add the two together, and check if the result is < 0.
814 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
815 __ movl_rr(X86::eax, X86::ecx);
816 __ addl_rr(X86::edx, X86::ecx);
817 addSlowCase(__ js());
818 // Skip the above check if neither input is zero
819 __ link(op2NonZero, __ label());
820 __ imull_rr(X86::edx, X86::eax);
821 addSlowCase(__ jo());
822 signExtend32ToPtr(X86::eax, X86::eax);
823 emitFastArithReTagImmediate(X86::eax, X86::eax);
824 }
825 emitPutVirtualRegister(dst);
826
827 if (types.second().isReusable() && isSSE2Present()) {
828 __ link(wasJSNumberCell2, __ label());
829 __ link(wasJSNumberCell2b, __ label());
830 }
831 else if (types.first().isReusable() && isSSE2Present()) {
832 __ link(wasJSNumberCell1, __ label());
833 __ link(wasJSNumberCell1b, __ label());
834 }
835}
836
837void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
838{
839 linkSlowCase(iter);
840 if (types.second().isReusable() && isSSE2Present()) {
841 if (!types.first().definitelyIsNumber()) {
842 linkSlowCaseIfNotJSCell(iter, src1);
843 linkSlowCase(iter);
844 }
845 if (!types.second().definitelyIsNumber()) {
846 linkSlowCaseIfNotJSCell(iter, src2);
847 linkSlowCase(iter);
848 }
849 } else if (types.first().isReusable() && isSSE2Present()) {
850 if (!types.first().definitelyIsNumber()) {
851 linkSlowCaseIfNotJSCell(iter, src1);
852 linkSlowCase(iter);
853 }
854 if (!types.second().definitelyIsNumber()) {
855 linkSlowCaseIfNotJSCell(iter, src2);
856 linkSlowCase(iter);
857 }
858 }
859 linkSlowCase(iter);
860
861 // additional entry point to handle -0 cases.
862 if (opcodeID == op_mul)
863 linkSlowCase(iter);
864
865 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
866 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
867 if (opcodeID == op_add)
868 emitCTICall(Interpreter::cti_op_add);
869 else if (opcodeID == op_sub)
870 emitCTICall(Interpreter::cti_op_sub);
871 else {
872 ASSERT(opcodeID == op_mul);
873 emitCTICall(Interpreter::cti_op_mul);
874 }
875 emitPutVirtualRegister(dst);
876}
877
878void JIT::compileFastArith_op_add(Instruction* currentInstruction)
879{
880 unsigned result = currentInstruction[1].u.operand;
881 unsigned op1 = currentInstruction[2].u.operand;
882 unsigned op2 = currentInstruction[3].u.operand;
883
884 if (isOperandConstantImmediateInt(op1)) {
885 emitGetVirtualRegister(op2, X86::eax);
886 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
887 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
888 signExtend32ToPtr(X86::eax, X86::eax);
889 emitPutVirtualRegister(result);
890 } else if (isOperandConstantImmediateInt(op2)) {
891 emitGetVirtualRegister(op1, X86::eax);
892 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
893 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
894 signExtend32ToPtr(X86::eax, X86::eax);
895 emitPutVirtualRegister(result);
896 } else {
897 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
898 if (types.first().mightBeNumber() && types.second().mightBeNumber())
899 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
900 else {
901 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
902 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
903 emitCTICall(Interpreter::cti_op_add);
904 emitPutVirtualRegister(result);
905 }
906 }
907}
908void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
909{
910 unsigned result = currentInstruction[1].u.operand;
911 unsigned op1 = currentInstruction[2].u.operand;
912 unsigned op2 = currentInstruction[3].u.operand;
913
914 if (isOperandConstantImmediateInt(op1)) {
915 Jump notImm = getSlowCase(iter);
916 linkSlowCase(iter);
917 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
918 notImm.link(this);
919 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
920 emitPutJITStubArg(X86::eax, 2);
921 emitCTICall(Interpreter::cti_op_add);
922 emitPutVirtualRegister(result);
923 } else if (isOperandConstantImmediateInt(op2)) {
924 Jump notImm = getSlowCase(iter);
925 linkSlowCase(iter);
926 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
927 notImm.link(this);
928 emitPutJITStubArg(X86::eax, 1);
929 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
930 emitCTICall(Interpreter::cti_op_add);
931 emitPutVirtualRegister(result);
932 } else {
933 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
934 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
935 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
936 }
937}
938
939void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
940{
941 unsigned result = currentInstruction[1].u.operand;
942 unsigned op1 = currentInstruction[2].u.operand;
943 unsigned op2 = currentInstruction[3].u.operand;
944
945 // For now, only plant a fast int case if the constant operand is greater than zero.
946 int32_t value;
947 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
948 emitGetVirtualRegister(op2, X86::eax);
949 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
950 emitFastArithDeTagImmediate(X86::eax);
951 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
952 signExtend32ToPtr(X86::eax, X86::eax);
953 emitFastArithReTagImmediate(X86::eax, X86::eax);
954 emitPutVirtualRegister(result);
955 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
956 emitGetVirtualRegister(op1, X86::eax);
957 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
958 emitFastArithDeTagImmediate(X86::eax);
959 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
960 signExtend32ToPtr(X86::eax, X86::eax);
961 emitFastArithReTagImmediate(X86::eax, X86::eax);
962 emitPutVirtualRegister(result);
963 } else
964 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
965}
966void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
967{
968 unsigned result = currentInstruction[1].u.operand;
969 unsigned op1 = currentInstruction[2].u.operand;
970 unsigned op2 = currentInstruction[3].u.operand;
971
972 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
973 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
974 linkSlowCase(iter);
975 linkSlowCase(iter);
976 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
977 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
978 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
979 emitCTICall(Interpreter::cti_op_mul);
980 emitPutVirtualRegister(result);
981 } else
982 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
983}
984
985void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
986{
987 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
988}
989void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
990{
991 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
992}
993
994#endif
995
996} // namespace JSC
997
998#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.