source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 43370

Last change on this file since 43370 was 43370, checked in by [email protected], 16 years ago

2009-05-07 Dmitry Titov <[email protected]>

Attempt to fix Win build.

  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArithSlow_op_jnless):
File size: 44.3 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47#if PLATFORM(MAC)
48
49static inline bool isSSE2Present()
50{
51 return true; // All X86 Macs are guaranteed to support at least SSE2
52}
53
54#else
55
56static bool isSSE2Present()
57{
58 static const int SSE2FeatureBit = 1 << 26;
59 struct SSE2Check {
60 SSE2Check()
61 {
62 int flags;
63#if COMPILER(MSVC)
64 _asm {
65 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 cpuid;
67 mov flags, edx;
68 }
69#elif COMPILER(GCC)
70 asm (
71 "movl $0x1, %%eax;"
72 "pushl %%ebx;"
73 "cpuid;"
74 "popl %%ebx;"
75 "movl %%edx, %0;"
76 : "=g" (flags)
77 :
78 : "%eax", "%ecx", "%edx"
79 );
80#else
81 flags = 0;
82#endif
83 present = (flags & SSE2FeatureBit) != 0;
84 }
85 bool present;
86 };
87 static SSE2Check check;
88 return check.present;
89}
90
91#endif
92
93namespace JSC {
94
95void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
96{
97 emitGetVirtualRegisters(op1, regT0, op2, regT2);
98 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
99 emitJumpSlowCaseIfNotImmediateInteger(regT0);
100 emitJumpSlowCaseIfNotImmediateInteger(regT2);
101 emitFastArithImmToInt(regT0);
102 emitFastArithImmToInt(regT2);
103#if !PLATFORM(X86)
104 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
105 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
106 and32(Imm32(0x1f), regT2);
107#endif
108 lshift32(regT2, regT0);
109#if !USE(ALTERNATE_JSIMMEDIATE)
110 addSlowCase(branchAdd32(Overflow, regT0, regT0));
111 signExtend32ToPtr(regT0, regT0);
112#endif
113 emitFastArithReTagImmediate(regT0, regT0);
114 emitPutVirtualRegister(result);
115}
116void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
117{
118#if USE(ALTERNATE_JSIMMEDIATE)
119 UNUSED_PARAM(op1);
120 UNUSED_PARAM(op2);
121 linkSlowCase(iter);
122 linkSlowCase(iter);
123#else
124 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
125 Jump notImm1 = getSlowCase(iter);
126 Jump notImm2 = getSlowCase(iter);
127 linkSlowCase(iter);
128 emitGetVirtualRegisters(op1, regT0, op2, regT2);
129 notImm1.link(this);
130 notImm2.link(this);
131#endif
132 emitPutJITStubArg(regT0, 1);
133 emitPutJITStubArg(regT2, 2);
134 emitCTICall(JITStubs::cti_op_lshift);
135 emitPutVirtualRegister(result);
136}
137
138void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
139{
140 if (isOperandConstantImmediateInt(op2)) {
141 emitGetVirtualRegister(op1, regT0);
142 emitJumpSlowCaseIfNotImmediateInteger(regT0);
143 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
144#if USE(ALTERNATE_JSIMMEDIATE)
145 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
146#else
147 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
148#endif
149 } else {
150 emitGetVirtualRegisters(op1, regT0, op2, regT2);
151 if (isSSE2Present()) {
152 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
153#if USE(ALTERNATE_JSIMMEDIATE)
154 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
155 __ movq_rr(regT0, X86::xmm0);
156#else
157 emitJumpSlowCaseIfNotJSCell(regT0, op1);
158 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
159 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
160#endif
161 __ cvttsd2si_rr(X86::xmm0, regT0);
162 addSlowCase(branch32(Equal, regT0, Imm32(0x80000000)));
163#if !USE(ALTERNATE_JSIMMEDIATE)
164 add32(regT0, regT0);
165 addSlowCase(__ jo());
166#endif
167 lhsIsInt.link(this);
168 } else
169 emitJumpSlowCaseIfNotImmediateInteger(regT0);
170 emitJumpSlowCaseIfNotImmediateInteger(regT2);
171 emitFastArithImmToInt(regT2);
172#if !PLATFORM(X86)
173 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
174 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
175 and32(Imm32(0x1f), regT2);
176#endif
177#if USE(ALTERNATE_JSIMMEDIATE)
178 rshift32(regT2, regT0);
179#else
180 rshiftPtr(regT2, regT0);
181#endif
182 }
183#if USE(ALTERNATE_JSIMMEDIATE)
184 emitFastArithIntToImmNoCheck(regT0, regT0);
185#else
186 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
187#endif
188 emitPutVirtualRegister(result);
189}
190void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
191{
192 linkSlowCase(iter);
193 if (isOperandConstantImmediateInt(op2))
194 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
195 else {
196 if (isSSE2Present()) {
197#if USE(ALTERNATE_JSIMMEDIATE)
198 linkSlowCase(iter);
199#else
200 linkSlowCaseIfNotJSCell(iter, op1);
201 linkSlowCase(iter);
202 linkSlowCase(iter);
203#endif
204 linkSlowCase(iter);
205 // We're reloading op1 to regT0 as we can no longer guarantee that
206 // we have not munged the operand. It may have already been shifted
207 // correctly, but it still will not have been tagged.
208 emitGetVirtualRegister(op1, regT0);
209 } else {
210 linkSlowCase(iter);
211 linkSlowCase(iter);
212 }
213 emitPutJITStubArg(regT2, 2);
214 }
215
216 emitPutJITStubArg(regT0, 1);
217 emitCTICall(JITStubs::cti_op_rshift);
218 emitPutVirtualRegister(result);
219}
220
221void JIT::compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target)
222{
223 // We generate inline code for the following cases in the fast path:
224 // - int immediate to constant int immediate
225 // - constant int immediate to int immediate
226 // - int immediate to int immediate
227
228 if (isOperandConstantImmediateInt(op2)) {
229 emitGetVirtualRegister(op1, regT0);
230 emitJumpSlowCaseIfNotImmediateInteger(regT0);
231#if USE(ALTERNATE_JSIMMEDIATE)
232 int32_t op2imm = getConstantOperandImmediateInt(op2);
233#else
234 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
235#endif
236 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
237 } else if (isOperandConstantImmediateInt(op1)) {
238 emitGetVirtualRegister(op2, regT1);
239 emitJumpSlowCaseIfNotImmediateInteger(regT1);
240#if USE(ALTERNATE_JSIMMEDIATE)
241 int32_t op1imm = getConstantOperandImmediateInt(op1);
242#else
243 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
244#endif
245 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
246 } else {
247 emitGetVirtualRegisters(op1, regT0, op2, regT1);
248 emitJumpSlowCaseIfNotImmediateInteger(regT0);
249 emitJumpSlowCaseIfNotImmediateInteger(regT1);
250
251 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
252 }
253}
254void JIT::compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
255{
256 // We generate inline code for the following cases in the slow path:
257 // - floating-point number to constant int immediate
258 // - constant int immediate to floating-point number
259 // - floating-point number to floating-point number.
260
261 if (isOperandConstantImmediateInt(op2)) {
262 linkSlowCase(iter);
263
264 if (isSSE2Present()) {
265#if USE(ALTERNATE_JSIMMEDIATE)
266 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
267 addPtr(tagTypeNumberRegister, regT0);
268 m_assembler.movq_rr(regT0, X86::xmm0);
269#else
270 Jump fail1;
271 if (!m_codeBlock->isKnownNotImmediate(op1))
272 fail1 = emitJumpIfNotJSCell(regT0);
273
274 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
275 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
276#endif
277
278 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
279
280 m_assembler.movl_i32r(op2imm, regT1);
281 m_assembler.cvtsi2sd_rr(regT1, X86::xmm1);
282
283 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
284 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
285
286 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
287
288#if USE(ALTERNATE_JSIMMEDIATE)
289 fail1.link(this);
290#else
291 if (!m_codeBlock->isKnownNotImmediate(op1))
292 fail1.link(this);
293 fail2.link(this);
294#endif
295 }
296
297 emitPutJITStubArg(regT0, 1);
298 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
299 emitCTICall(JITStubs::cti_op_jless);
300 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
301
302 } else if (isOperandConstantImmediateInt(op1)) {
303 linkSlowCase(iter);
304
305 if (isSSE2Present()) {
306#if USE(ALTERNATE_JSIMMEDIATE)
307 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
308 addPtr(tagTypeNumberRegister, regT1);
309 m_assembler.movq_rr(regT1, X86::xmm1);
310#else
311 Jump fail1;
312 if (!m_codeBlock->isKnownNotImmediate(op2))
313 fail1 = emitJumpIfNotJSCell(regT1);
314
315 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
316 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
317#endif
318
319 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
320
321 m_assembler.movl_i32r(op1imm, regT0);
322 m_assembler.cvtsi2sd_rr(regT0, X86::xmm0);
323
324 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
325 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
326
327 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
328
329#if USE(ALTERNATE_JSIMMEDIATE)
330 fail1.link(this);
331#else
332 if (!m_codeBlock->isKnownNotImmediate(op2))
333 fail1.link(this);
334 fail2.link(this);
335#endif
336 }
337
338 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
339 emitPutJITStubArg(regT1, 2);
340 emitCTICall(JITStubs::cti_op_jless);
341 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
342
343 } else {
344 linkSlowCase(iter);
345
346 if (isSSE2Present()) {
347#if USE(ALTERNATE_JSIMMEDIATE)
348 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
349 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
350 addPtr(tagTypeNumberRegister, regT0);
351 addPtr(tagTypeNumberRegister, regT1);
352 m_assembler.movq_rr(regT0, X86::xmm0);
353 m_assembler.movq_rr(regT1, X86::xmm1);
354#else
355 Jump fail1;
356 if (!m_codeBlock->isKnownNotImmediate(op1))
357 fail1 = emitJumpIfNotJSCell(regT0);
358
359 Jump fail2;
360 if (!m_codeBlock->isKnownNotImmediate(op2))
361 fail2 = emitJumpIfNotJSCell(regT1);
362
363 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
364 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
365 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
366 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
367#endif
368
369 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
370 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
371
372 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
373
374#if USE(ALTERNATE_JSIMMEDIATE)
375 fail1.link(this);
376 fail2.link(this);
377#else
378 if (!m_codeBlock->isKnownNotImmediate(op1))
379 fail1.link(this);
380 if (!m_codeBlock->isKnownNotImmediate(op2))
381 fail2.link(this);
382 fail3.link(this);
383 fail4.link(this);
384#endif
385 }
386
387 linkSlowCase(iter);
388 emitPutJITStubArg(regT0, 1);
389 emitPutJITStubArg(regT1, 2);
390 emitCTICall(JITStubs::cti_op_jless);
391 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
392 }
393}
394
395void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
396{
397 if (isOperandConstantImmediateInt(op1)) {
398 emitGetVirtualRegister(op2, regT0);
399 emitJumpSlowCaseIfNotImmediateInteger(regT0);
400#if USE(ALTERNATE_JSIMMEDIATE)
401 int32_t imm = getConstantOperandImmediateInt(op1);
402 andPtr(Imm32(imm), regT0);
403 if (imm >= 0)
404 emitFastArithIntToImmNoCheck(regT0, regT0);
405#else
406 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
407#endif
408 } else if (isOperandConstantImmediateInt(op2)) {
409 emitGetVirtualRegister(op1, regT0);
410 emitJumpSlowCaseIfNotImmediateInteger(regT0);
411#if USE(ALTERNATE_JSIMMEDIATE)
412 int32_t imm = getConstantOperandImmediateInt(op2);
413 andPtr(Imm32(imm), regT0);
414 if (imm >= 0)
415 emitFastArithIntToImmNoCheck(regT0, regT0);
416#else
417 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
418#endif
419 } else {
420 emitGetVirtualRegisters(op1, regT0, op2, regT1);
421 andPtr(regT1, regT0);
422 emitJumpSlowCaseIfNotImmediateInteger(regT0);
423 }
424 emitPutVirtualRegister(result);
425}
426void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
427{
428 linkSlowCase(iter);
429 if (isOperandConstantImmediateInt(op1)) {
430 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
431 emitPutJITStubArg(regT0, 2);
432 } else if (isOperandConstantImmediateInt(op2)) {
433 emitPutJITStubArg(regT0, 1);
434 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
435 } else {
436 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
437 emitPutJITStubArg(regT1, 2);
438 }
439 emitCTICall(JITStubs::cti_op_bitand);
440 emitPutVirtualRegister(result);
441}
442
443#if PLATFORM(X86) || PLATFORM(X86_64)
444void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
445{
446 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
447 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
448 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
449#if USE(ALTERNATE_JSIMMEDIATE)
450 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
451 m_assembler.cdq();
452 m_assembler.idivl_r(X86::ecx);
453#else
454 emitFastArithDeTagImmediate(X86::eax);
455 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
456 m_assembler.cdq();
457 m_assembler.idivl_r(X86::ecx);
458 signExtend32ToPtr(X86::edx, X86::edx);
459#endif
460 emitFastArithReTagImmediate(X86::edx, X86::eax);
461 emitPutVirtualRegister(result);
462}
463void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
464{
465#if USE(ALTERNATE_JSIMMEDIATE)
466 linkSlowCase(iter);
467 linkSlowCase(iter);
468 linkSlowCase(iter);
469#else
470 Jump notImm1 = getSlowCase(iter);
471 Jump notImm2 = getSlowCase(iter);
472 linkSlowCase(iter);
473 emitFastArithReTagImmediate(X86::eax, X86::eax);
474 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
475 notImm1.link(this);
476 notImm2.link(this);
477#endif
478 emitPutJITStubArg(X86::eax, 1);
479 emitPutJITStubArg(X86::ecx, 2);
480 emitCTICall(JITStubs::cti_op_mod);
481 emitPutVirtualRegister(result);
482}
483#else
484void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
485{
486 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
487 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
488 emitCTICall(JITStubs::cti_op_mod);
489 emitPutVirtualRegister(result);
490}
491void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
492{
493 ASSERT_NOT_REACHED();
494}
495#endif
496
497void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
498{
499 emitGetVirtualRegister(srcDst, regT0);
500 move(regT0, regT1);
501 emitJumpSlowCaseIfNotImmediateInteger(regT0);
502#if USE(ALTERNATE_JSIMMEDIATE)
503 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
504 emitFastArithIntToImmNoCheck(regT1, regT1);
505#else
506 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
507 signExtend32ToPtr(regT1, regT1);
508#endif
509 emitPutVirtualRegister(srcDst, regT1);
510 emitPutVirtualRegister(result);
511}
512void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
513{
514 linkSlowCase(iter);
515 linkSlowCase(iter);
516 emitPutJITStubArg(regT0, 1);
517 emitCTICall(JITStubs::cti_op_post_inc);
518 emitPutVirtualRegister(srcDst, regT1);
519 emitPutVirtualRegister(result);
520}
521
522void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
523{
524 emitGetVirtualRegister(srcDst, regT0);
525 move(regT0, regT1);
526 emitJumpSlowCaseIfNotImmediateInteger(regT0);
527#if USE(ALTERNATE_JSIMMEDIATE)
528 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
529 emitFastArithIntToImmNoCheck(regT1, regT1);
530#else
531 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
532 signExtend32ToPtr(regT1, regT1);
533#endif
534 emitPutVirtualRegister(srcDst, regT1);
535 emitPutVirtualRegister(result);
536}
537void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
538{
539 linkSlowCase(iter);
540 linkSlowCase(iter);
541 emitPutJITStubArg(regT0, 1);
542 emitCTICall(JITStubs::cti_op_post_dec);
543 emitPutVirtualRegister(srcDst, regT1);
544 emitPutVirtualRegister(result);
545}
546
547void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
548{
549 emitGetVirtualRegister(srcDst, regT0);
550 emitJumpSlowCaseIfNotImmediateInteger(regT0);
551#if USE(ALTERNATE_JSIMMEDIATE)
552 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
553 emitFastArithIntToImmNoCheck(regT0, regT0);
554#else
555 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
556 signExtend32ToPtr(regT0, regT0);
557#endif
558 emitPutVirtualRegister(srcDst);
559}
560void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
561{
562 Jump notImm = getSlowCase(iter);
563 linkSlowCase(iter);
564 emitGetVirtualRegister(srcDst, regT0);
565 notImm.link(this);
566 emitPutJITStubArg(regT0, 1);
567 emitCTICall(JITStubs::cti_op_pre_inc);
568 emitPutVirtualRegister(srcDst);
569}
570
571void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
572{
573 emitGetVirtualRegister(srcDst, regT0);
574 emitJumpSlowCaseIfNotImmediateInteger(regT0);
575#if USE(ALTERNATE_JSIMMEDIATE)
576 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
577 emitFastArithIntToImmNoCheck(regT0, regT0);
578#else
579 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
580 signExtend32ToPtr(regT0, regT0);
581#endif
582 emitPutVirtualRegister(srcDst);
583}
584void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
585{
586 Jump notImm = getSlowCase(iter);
587 linkSlowCase(iter);
588 emitGetVirtualRegister(srcDst, regT0);
589 notImm.link(this);
590 emitPutJITStubArg(regT0, 1);
591 emitCTICall(JITStubs::cti_op_pre_dec);
592 emitPutVirtualRegister(srcDst);
593}
594
595
596#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
597
598void JIT::compileFastArith_op_add(Instruction* currentInstruction)
599{
600 unsigned result = currentInstruction[1].u.operand;
601 unsigned op1 = currentInstruction[2].u.operand;
602 unsigned op2 = currentInstruction[3].u.operand;
603
604 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
605 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
606 emitCTICall(JITStubs::cti_op_add);
607 emitPutVirtualRegister(result);
608}
609void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
610{
611 ASSERT_NOT_REACHED();
612}
613
614void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
615{
616 unsigned result = currentInstruction[1].u.operand;
617 unsigned op1 = currentInstruction[2].u.operand;
618 unsigned op2 = currentInstruction[3].u.operand;
619
620 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
621 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
622 emitCTICall(JITStubs::cti_op_mul);
623 emitPutVirtualRegister(result);
624}
625void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
626{
627 ASSERT_NOT_REACHED();
628}
629
630void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
631{
632 unsigned result = currentInstruction[1].u.operand;
633 unsigned op1 = currentInstruction[2].u.operand;
634 unsigned op2 = currentInstruction[3].u.operand;
635
636 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
637 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
638 emitCTICall(JITStubs::cti_op_sub);
639 emitPutVirtualRegister(result);
640}
641void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
642{
643 ASSERT_NOT_REACHED();
644}
645
646#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
647
648void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
649{
650 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
651 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
652 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
653 if (opcodeID == op_add)
654 addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
655 else if (opcodeID == op_sub)
656 addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
657 else {
658 ASSERT(opcodeID == op_mul);
659 addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
660 addSlowCase(branchTest32(Zero, X86::eax));
661 }
662 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
663}
664
665void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types)
666{
667 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
668 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
669
670 Jump notImm1 = getSlowCase(iter);
671 Jump notImm2 = getSlowCase(iter);
672
673 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
674 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
675 linkSlowCase(iter);
676 emitGetVirtualRegister(op1, X86::eax);
677
678 Label stubFunctionCall(this);
679 emitPutJITStubArg(X86::eax, 1);
680 emitPutJITStubArg(X86::edx, 2);
681 if (opcodeID == op_add)
682 emitCTICall(JITStubs::cti_op_add);
683 else if (opcodeID == op_sub)
684 emitCTICall(JITStubs::cti_op_sub);
685 else {
686 ASSERT(opcodeID == op_mul);
687 emitCTICall(JITStubs::cti_op_mul);
688 }
689 Jump end = jump();
690
691 // if we get here, eax is not an int32, edx not yet checked.
692 notImm1.link(this);
693 if (!types.first().definitelyIsNumber())
694 emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
695 if (!types.second().definitelyIsNumber())
696 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
697 addPtr(tagTypeNumberRegister, X86::eax);
698 m_assembler.movq_rr(X86::eax, X86::xmm1);
699 Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
700 m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
701 Jump op2wasInteger = jump();
702
703 // if we get here, eax IS an int32, edx is not.
704 notImm2.link(this);
705 if (!types.second().definitelyIsNumber())
706 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
707 m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
708 op2isDouble.link(this);
709 addPtr(tagTypeNumberRegister, X86::edx);
710 m_assembler.movq_rr(X86::edx, X86::xmm2);
711 op2wasInteger.link(this);
712
713 if (opcodeID == op_add)
714 m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
715 else if (opcodeID == op_sub)
716 m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
717 else {
718 ASSERT(opcodeID == op_mul);
719 m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
720 }
721 m_assembler.movq_rr(X86::xmm1, X86::eax);
722 subPtr(tagTypeNumberRegister, X86::eax);
723
724 end.link(this);
725}
726
727void JIT::compileFastArith_op_add(Instruction* currentInstruction)
728{
729 unsigned result = currentInstruction[1].u.operand;
730 unsigned op1 = currentInstruction[2].u.operand;
731 unsigned op2 = currentInstruction[3].u.operand;
732 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
733
734 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
735 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
736 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
737 emitCTICall(JITStubs::cti_op_add);
738 emitPutVirtualRegister(result);
739 return;
740 }
741
742 if (isOperandConstantImmediateInt(op1)) {
743 emitGetVirtualRegister(op2, X86::eax);
744 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
745 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
746 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
747 } else if (isOperandConstantImmediateInt(op2)) {
748 emitGetVirtualRegister(op1, X86::eax);
749 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
750 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
751 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
752 } else
753 compileBinaryArithOp(op_add, result, op1, op2, types);
754
755 emitPutVirtualRegister(result);
756}
757void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
758{
759 unsigned result = currentInstruction[1].u.operand;
760 unsigned op1 = currentInstruction[2].u.operand;
761 unsigned op2 = currentInstruction[3].u.operand;
762 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
763
764 if (isOperandConstantImmediateInt(op1)) {
765 linkSlowCase(iter);
766 linkSlowCase(iter);
767 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
768 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
769 emitCTICall(JITStubs::cti_op_add);
770 } else if (isOperandConstantImmediateInt(op2)) {
771 linkSlowCase(iter);
772 linkSlowCase(iter);
773 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
774 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
775 emitCTICall(JITStubs::cti_op_add);
776 } else
777 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
778
779 emitPutVirtualRegister(result);
780}
781
782void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
783{
784 unsigned result = currentInstruction[1].u.operand;
785 unsigned op1 = currentInstruction[2].u.operand;
786 unsigned op2 = currentInstruction[3].u.operand;
787 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
788
789 // For now, only plant a fast int case if the constant operand is greater than zero.
790 int32_t value;
791 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
792 emitGetVirtualRegister(op2, X86::eax);
793 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
794 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
795 emitFastArithReTagImmediate(X86::eax, X86::eax);
796 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
797 emitGetVirtualRegister(op1, X86::eax);
798 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
799 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
800 emitFastArithReTagImmediate(X86::eax, X86::eax);
801 } else
802 compileBinaryArithOp(op_mul, result, op1, op2, types);
803
804 emitPutVirtualRegister(result);
805}
806void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
807{
808 unsigned result = currentInstruction[1].u.operand;
809 unsigned op1 = currentInstruction[2].u.operand;
810 unsigned op2 = currentInstruction[3].u.operand;
811 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
812
813 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
814 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
815 linkSlowCase(iter);
816 linkSlowCase(iter);
817 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
818 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
819 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
820 emitCTICall(JITStubs::cti_op_mul);
821 } else
822 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
823
824 emitPutVirtualRegister(result);
825}
826
827void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
828{
829 unsigned result = currentInstruction[1].u.operand;
830 unsigned op1 = currentInstruction[2].u.operand;
831 unsigned op2 = currentInstruction[3].u.operand;
832 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
833
834 compileBinaryArithOp(op_sub, result, op1, op2, types);
835
836 emitPutVirtualRegister(result);
837}
838void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
839{
840 unsigned result = currentInstruction[1].u.operand;
841 unsigned op1 = currentInstruction[2].u.operand;
842 unsigned op2 = currentInstruction[3].u.operand;
843 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
844
845 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
846
847 emitPutVirtualRegister(result);
848}
849
850#else
851
852typedef X86Assembler::JmpSrc JmpSrc;
853typedef X86Assembler::JmpDst JmpDst;
854typedef X86Assembler::XMMRegisterID XMMRegisterID;
855
856
857void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
858{
859 Structure* numberStructure = m_globalData->numberStructure.get();
860 JmpSrc wasJSNumberCell1;
861 JmpSrc wasJSNumberCell2;
862
863 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
864
865 if (types.second().isReusable() && isSSE2Present()) {
866 ASSERT(types.second().mightBeNumber());
867
868 // Check op2 is a number
869 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
870 JmpSrc op2imm = __ jne();
871 if (!types.second().definitelyIsNumber()) {
872 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
873 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
874 addSlowCase(__ jne());
875 }
876
877 // (1) In this case src2 is a reusable number cell.
878 // Slow case if src1 is not a number type.
879 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
880 JmpSrc op1imm = __ jne();
881 if (!types.first().definitelyIsNumber()) {
882 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
883 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
884 addSlowCase(__ jne());
885 }
886
887 // (1a) if we get here, src1 is also a number cell
888 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
889 JmpSrc loadedDouble = __ jmp();
890 // (1b) if we get here, src1 is an immediate
891 __ linkJump(op1imm, __ label());
892 emitFastArithImmToInt(X86::eax);
893 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
894 // (1c)
895 __ linkJump(loadedDouble, __ label());
896 if (opcodeID == op_add)
897 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
898 else if (opcodeID == op_sub)
899 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
900 else {
901 ASSERT(opcodeID == op_mul);
902 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
903 }
904
905 // Store the result to the JSNumberCell and jump.
906 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx);
907 __ movl_rr(X86::edx, X86::eax);
908 emitPutVirtualRegister(dst);
909 wasJSNumberCell2 = __ jmp();
910
911 // (2) This handles cases where src2 is an immediate number.
912 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
913 __ linkJump(op2imm, __ label());
914 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
915 } else if (types.first().isReusable() && isSSE2Present()) {
916 ASSERT(types.first().mightBeNumber());
917
918 // Check op1 is a number
919 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
920 JmpSrc op1imm = __ jne();
921 if (!types.first().definitelyIsNumber()) {
922 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
923 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
924 addSlowCase(__ jne());
925 }
926
927 // (1) In this case src1 is a reusable number cell.
928 // Slow case if src2 is not a number type.
929 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
930 JmpSrc op2imm = __ jne();
931 if (!types.second().definitelyIsNumber()) {
932 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
933 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
934 addSlowCase(__ jne());
935 }
936
937 // (1a) if we get here, src2 is also a number cell
938 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
939 JmpSrc loadedDouble = __ jmp();
940 // (1b) if we get here, src2 is an immediate
941 __ linkJump(op2imm, __ label());
942 emitFastArithImmToInt(X86::edx);
943 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
944 // (1c)
945 __ linkJump(loadedDouble, __ label());
946 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
947 if (opcodeID == op_add)
948 __ addsd_rr(X86::xmm1, X86::xmm0);
949 else if (opcodeID == op_sub)
950 __ subsd_rr(X86::xmm1, X86::xmm0);
951 else {
952 ASSERT(opcodeID == op_mul);
953 __ mulsd_rr(X86::xmm1, X86::xmm0);
954 }
955 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
956 emitPutVirtualRegister(dst);
957
958 // Store the result to the JSNumberCell and jump.
959 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
960 emitPutVirtualRegister(dst);
961 wasJSNumberCell1 = __ jmp();
962
963 // (2) This handles cases where src1 is an immediate number.
964 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
965 __ linkJump(op1imm, __ label());
966 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
967 } else
968 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
969
970 if (opcodeID == op_add) {
971 emitFastArithDeTagImmediate(X86::eax);
972 __ addl_rr(X86::edx, X86::eax);
973 addSlowCase(__ jo());
974 } else if (opcodeID == op_sub) {
975 __ subl_rr(X86::edx, X86::eax);
976 addSlowCase(__ jo());
977 signExtend32ToPtr(X86::eax, X86::eax);
978 emitFastArithReTagImmediate(X86::eax, X86::eax);
979 } else {
980 ASSERT(opcodeID == op_mul);
981 // convert eax & edx from JSImmediates to ints, and check if either are zero
982 emitFastArithImmToInt(X86::edx);
983 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
984 __ testl_rr(X86::edx, X86::edx);
985 JmpSrc op2NonZero = __ jne();
986 op1Zero.link(this);
987 // if either input is zero, add the two together, and check if the result is < 0.
988 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
989 __ movl_rr(X86::eax, X86::ecx);
990 __ addl_rr(X86::edx, X86::ecx);
991 addSlowCase(__ js());
992 // Skip the above check if neither input is zero
993 __ linkJump(op2NonZero, __ label());
994 __ imull_rr(X86::edx, X86::eax);
995 addSlowCase(__ jo());
996 signExtend32ToPtr(X86::eax, X86::eax);
997 emitFastArithReTagImmediate(X86::eax, X86::eax);
998 }
999 emitPutVirtualRegister(dst);
1000
1001 if (types.second().isReusable() && isSSE2Present()) {
1002 __ linkJump(wasJSNumberCell2, __ label());
1003 }
1004 else if (types.first().isReusable() && isSSE2Present()) {
1005 __ linkJump(wasJSNumberCell1, __ label());
1006 }
1007}
1008
1009void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1010{
1011 linkSlowCase(iter);
1012 if (types.second().isReusable() && isSSE2Present()) {
1013 if (!types.first().definitelyIsNumber()) {
1014 linkSlowCaseIfNotJSCell(iter, src1);
1015 linkSlowCase(iter);
1016 }
1017 if (!types.second().definitelyIsNumber()) {
1018 linkSlowCaseIfNotJSCell(iter, src2);
1019 linkSlowCase(iter);
1020 }
1021 } else if (types.first().isReusable() && isSSE2Present()) {
1022 if (!types.first().definitelyIsNumber()) {
1023 linkSlowCaseIfNotJSCell(iter, src1);
1024 linkSlowCase(iter);
1025 }
1026 if (!types.second().definitelyIsNumber()) {
1027 linkSlowCaseIfNotJSCell(iter, src2);
1028 linkSlowCase(iter);
1029 }
1030 }
1031 linkSlowCase(iter);
1032
1033 // additional entry point to handle -0 cases.
1034 if (opcodeID == op_mul)
1035 linkSlowCase(iter);
1036
1037 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
1038 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
1039 if (opcodeID == op_add)
1040 emitCTICall(JITStubs::cti_op_add);
1041 else if (opcodeID == op_sub)
1042 emitCTICall(JITStubs::cti_op_sub);
1043 else {
1044 ASSERT(opcodeID == op_mul);
1045 emitCTICall(JITStubs::cti_op_mul);
1046 }
1047 emitPutVirtualRegister(dst);
1048}
1049
1050void JIT::compileFastArith_op_add(Instruction* currentInstruction)
1051{
1052 unsigned result = currentInstruction[1].u.operand;
1053 unsigned op1 = currentInstruction[2].u.operand;
1054 unsigned op2 = currentInstruction[3].u.operand;
1055
1056 if (isOperandConstantImmediateInt(op1)) {
1057 emitGetVirtualRegister(op2, X86::eax);
1058 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1059 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
1060 signExtend32ToPtr(X86::eax, X86::eax);
1061 emitPutVirtualRegister(result);
1062 } else if (isOperandConstantImmediateInt(op2)) {
1063 emitGetVirtualRegister(op1, X86::eax);
1064 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1065 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
1066 signExtend32ToPtr(X86::eax, X86::eax);
1067 emitPutVirtualRegister(result);
1068 } else {
1069 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1070 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1071 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1072 else {
1073 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1074 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1075 emitCTICall(JITStubs::cti_op_add);
1076 emitPutVirtualRegister(result);
1077 }
1078 }
1079}
1080void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1081{
1082 unsigned result = currentInstruction[1].u.operand;
1083 unsigned op1 = currentInstruction[2].u.operand;
1084 unsigned op2 = currentInstruction[3].u.operand;
1085
1086 if (isOperandConstantImmediateInt(op1)) {
1087 Jump notImm = getSlowCase(iter);
1088 linkSlowCase(iter);
1089 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
1090 notImm.link(this);
1091 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1092 emitPutJITStubArg(X86::eax, 2);
1093 emitCTICall(JITStubs::cti_op_add);
1094 emitPutVirtualRegister(result);
1095 } else if (isOperandConstantImmediateInt(op2)) {
1096 Jump notImm = getSlowCase(iter);
1097 linkSlowCase(iter);
1098 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
1099 notImm.link(this);
1100 emitPutJITStubArg(X86::eax, 1);
1101 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1102 emitCTICall(JITStubs::cti_op_add);
1103 emitPutVirtualRegister(result);
1104 } else {
1105 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1106 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
1107 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
1108 }
1109}
1110
1111void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
1112{
1113 unsigned result = currentInstruction[1].u.operand;
1114 unsigned op1 = currentInstruction[2].u.operand;
1115 unsigned op2 = currentInstruction[3].u.operand;
1116
1117 // For now, only plant a fast int case if the constant operand is greater than zero.
1118 int32_t value;
1119 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1120 emitGetVirtualRegister(op2, X86::eax);
1121 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1122 emitFastArithDeTagImmediate(X86::eax);
1123 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1124 signExtend32ToPtr(X86::eax, X86::eax);
1125 emitFastArithReTagImmediate(X86::eax, X86::eax);
1126 emitPutVirtualRegister(result);
1127 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1128 emitGetVirtualRegister(op1, X86::eax);
1129 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1130 emitFastArithDeTagImmediate(X86::eax);
1131 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1132 signExtend32ToPtr(X86::eax, X86::eax);
1133 emitFastArithReTagImmediate(X86::eax, X86::eax);
1134 emitPutVirtualRegister(result);
1135 } else
1136 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1137}
1138void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1139{
1140 unsigned result = currentInstruction[1].u.operand;
1141 unsigned op1 = currentInstruction[2].u.operand;
1142 unsigned op2 = currentInstruction[3].u.operand;
1143
1144 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1145 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1146 linkSlowCase(iter);
1147 linkSlowCase(iter);
1148 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1149 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1150 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1151 emitCTICall(JITStubs::cti_op_mul);
1152 emitPutVirtualRegister(result);
1153 } else
1154 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1155}
1156
1157void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
1158{
1159 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1160}
1161void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1162{
1163 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1164}
1165
1166#endif
1167
1168} // namespace JSC
1169
1170#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.