source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 43409

Last change on this file since 43409 was 43409, checked in by [email protected], 16 years ago

2009-05-08 Geoffrey Garen <[email protected]>

Reviewed by Gavin Barraclough.


More abstraction for JITStub calls from JITed code.


Added a JITStubCall class that automatically handles things like assigning
arguments to different stack slots and storing return values. Deployed
the class in about a billion places. A bunch more places remain to be
fixed up, but this is a good stopping point for now.

  • jit/JIT.cpp: (JSC::JIT::emitTimeoutCheck): (JSC::JIT::privateCompileMainPass): (JSC::JIT::privateCompileSlowCases): (JSC::JIT::privateCompile):
  • jit/JIT.h: (JSC::JIT::JSRInfo::JSRInfo): (JSC::JITStubCall::JITStubCall): (JSC::JITStubCall::addArgument): (JSC::JITStubCall::call): (JSC::JITStubCall::): (JSC::CallEvalJITStub::CallEvalJITStub):
  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArithSlow_op_lshift): (JSC::JIT::compileFastArithSlow_op_rshift): (JSC::JIT::compileFastArithSlow_op_jnless): (JSC::JIT::compileFastArithSlow_op_bitand): (JSC::JIT::compileFastArithSlow_op_mod): (JSC::JIT::compileFastArith_op_mod): (JSC::JIT::compileFastArithSlow_op_post_inc): (JSC::JIT::compileFastArithSlow_op_post_dec): (JSC::JIT::compileFastArithSlow_op_pre_inc): (JSC::JIT::compileFastArithSlow_op_pre_dec): (JSC::JIT::compileFastArith_op_add): (JSC::JIT::compileFastArith_op_mul): (JSC::JIT::compileFastArith_op_sub): (JSC::JIT::compileBinaryArithOpSlowCase): (JSC::JIT::compileFastArithSlow_op_add): (JSC::JIT::compileFastArithSlow_op_mul):
  • jit/JITCall.cpp: (JSC::JIT::compileOpCall): (JSC::):
  • jit/JITPropertyAccess.cpp: (JSC::JIT::compileGetByIdHotPath): (JSC::JIT::compilePutByIdHotPath): (JSC::JIT::compileGetByIdSlowCase): (JSC::JIT::compilePutByIdSlowCase):
  • jit/JITStubs.cpp: (JSC::JITStubs::cti_op_resolve_func): (JSC::JITStubs::cti_op_resolve_with_base):
File size: 50.4 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47#if PLATFORM(MAC)
48
49static inline bool isSSE2Present()
50{
51 return true; // All X86 Macs are guaranteed to support at least SSE2
52}
53
54#else
55
56static bool isSSE2Present()
57{
58 static const int SSE2FeatureBit = 1 << 26;
59 struct SSE2Check {
60 SSE2Check()
61 {
62 int flags;
63#if COMPILER(MSVC)
64 _asm {
65 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 cpuid;
67 mov flags, edx;
68 }
69#elif COMPILER(GCC)
70 asm (
71 "movl $0x1, %%eax;"
72 "pushl %%ebx;"
73 "cpuid;"
74 "popl %%ebx;"
75 "movl %%edx, %0;"
76 : "=g" (flags)
77 :
78 : "%eax", "%ecx", "%edx"
79 );
80#else
81 flags = 0;
82#endif
83 present = (flags & SSE2FeatureBit) != 0;
84 }
85 bool present;
86 };
87 static SSE2Check check;
88 return check.present;
89}
90
91#endif
92
93namespace JSC {
94
95void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
96{
97 emitGetVirtualRegisters(op1, regT0, op2, regT2);
98 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
99 emitJumpSlowCaseIfNotImmediateInteger(regT0);
100 emitJumpSlowCaseIfNotImmediateInteger(regT2);
101 emitFastArithImmToInt(regT0);
102 emitFastArithImmToInt(regT2);
103#if !PLATFORM(X86)
104 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
105 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
106 and32(Imm32(0x1f), regT2);
107#endif
108 lshift32(regT2, regT0);
109#if !USE(ALTERNATE_JSIMMEDIATE)
110 addSlowCase(branchAdd32(Overflow, regT0, regT0));
111 signExtend32ToPtr(regT0, regT0);
112#endif
113 emitFastArithReTagImmediate(regT0, regT0);
114 emitPutVirtualRegister(result);
115}
116void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
117{
118#if USE(ALTERNATE_JSIMMEDIATE)
119 UNUSED_PARAM(op1);
120 UNUSED_PARAM(op2);
121 linkSlowCase(iter);
122 linkSlowCase(iter);
123#else
124 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
125 Jump notImm1 = getSlowCase(iter);
126 Jump notImm2 = getSlowCase(iter);
127 linkSlowCase(iter);
128 emitGetVirtualRegisters(op1, regT0, op2, regT2);
129 notImm1.link(this);
130 notImm2.link(this);
131#endif
132 JITStubCall stubCall(this, JITStubs::cti_op_lshift);
133 stubCall.addArgument(regT0);
134 stubCall.addArgument(regT2);
135 stubCall.call(result);
136}
137
138void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
139{
140 if (isOperandConstantImmediateInt(op2)) {
141 emitGetVirtualRegister(op1, regT0);
142 emitJumpSlowCaseIfNotImmediateInteger(regT0);
143 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
144#if USE(ALTERNATE_JSIMMEDIATE)
145 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
146#else
147 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
148#endif
149 } else {
150 emitGetVirtualRegisters(op1, regT0, op2, regT2);
151 if (isSSE2Present()) {
152 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
153#if USE(ALTERNATE_JSIMMEDIATE)
154 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
155 __ movq_rr(regT0, X86::xmm0);
156#else
157 emitJumpSlowCaseIfNotJSCell(regT0, op1);
158 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
159 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
160#endif
161 __ cvttsd2si_rr(X86::xmm0, regT0);
162 addSlowCase(branch32(Equal, regT0, Imm32(0x80000000)));
163#if !USE(ALTERNATE_JSIMMEDIATE)
164 add32(regT0, regT0);
165 addSlowCase(__ jo());
166#endif
167 lhsIsInt.link(this);
168 } else
169 emitJumpSlowCaseIfNotImmediateInteger(regT0);
170 emitJumpSlowCaseIfNotImmediateInteger(regT2);
171 emitFastArithImmToInt(regT2);
172#if !PLATFORM(X86)
173 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
174 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
175 and32(Imm32(0x1f), regT2);
176#endif
177#if USE(ALTERNATE_JSIMMEDIATE)
178 rshift32(regT2, regT0);
179#else
180 rshiftPtr(regT2, regT0);
181#endif
182 }
183#if USE(ALTERNATE_JSIMMEDIATE)
184 emitFastArithIntToImmNoCheck(regT0, regT0);
185#else
186 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
187#endif
188 emitPutVirtualRegister(result);
189}
190void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
191{
192 linkSlowCase(iter);
193 if (isOperandConstantImmediateInt(op2))
194 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
195 else {
196 if (isSSE2Present()) {
197#if USE(ALTERNATE_JSIMMEDIATE)
198 linkSlowCase(iter);
199#else
200 linkSlowCaseIfNotJSCell(iter, op1);
201 linkSlowCase(iter);
202 linkSlowCase(iter);
203#endif
204 linkSlowCase(iter);
205 // We're reloading op1 to regT0 as we can no longer guarantee that
206 // we have not munged the operand. It may have already been shifted
207 // correctly, but it still will not have been tagged.
208 emitGetVirtualRegister(op1, regT0);
209 } else {
210 linkSlowCase(iter);
211 linkSlowCase(iter);
212 }
213 emitPutJITStubArg(regT2, 2);
214 }
215
216 JITStubCall stubCall(this, JITStubs::cti_op_rshift);
217 stubCall.addArgument(regT0);
218 stubCall.call(result);
219}
220
221void JIT::compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target)
222{
223 // We generate inline code for the following cases in the fast path:
224 // - int immediate to constant int immediate
225 // - constant int immediate to int immediate
226 // - int immediate to int immediate
227
228 if (isOperandConstantImmediateInt(op2)) {
229 emitGetVirtualRegister(op1, regT0);
230 emitJumpSlowCaseIfNotImmediateInteger(regT0);
231#if USE(ALTERNATE_JSIMMEDIATE)
232 int32_t op2imm = getConstantOperandImmediateInt(op2);
233#else
234 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
235#endif
236 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
237 } else if (isOperandConstantImmediateInt(op1)) {
238 emitGetVirtualRegister(op2, regT1);
239 emitJumpSlowCaseIfNotImmediateInteger(regT1);
240#if USE(ALTERNATE_JSIMMEDIATE)
241 int32_t op1imm = getConstantOperandImmediateInt(op1);
242#else
243 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
244#endif
245 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
246 } else {
247 emitGetVirtualRegisters(op1, regT0, op2, regT1);
248 emitJumpSlowCaseIfNotImmediateInteger(regT0);
249 emitJumpSlowCaseIfNotImmediateInteger(regT1);
250
251 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
252 }
253}
254void JIT::compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
255{
256 // We generate inline code for the following cases in the slow path:
257 // - floating-point number to constant int immediate
258 // - constant int immediate to floating-point number
259 // - floating-point number to floating-point number.
260
261 if (isOperandConstantImmediateInt(op2)) {
262 linkSlowCase(iter);
263
264 if (isSSE2Present()) {
265#if USE(ALTERNATE_JSIMMEDIATE)
266 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
267 addPtr(tagTypeNumberRegister, regT0);
268 m_assembler.movq_rr(regT0, X86::xmm0);
269#else
270 Jump fail1;
271 if (!m_codeBlock->isKnownNotImmediate(op1))
272 fail1 = emitJumpIfNotJSCell(regT0);
273
274 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
275 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
276#endif
277
278 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
279
280 m_assembler.movl_i32r(op2imm, regT1);
281 m_assembler.cvtsi2sd_rr(regT1, X86::xmm1);
282
283 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
284 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
285
286 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
287
288#if USE(ALTERNATE_JSIMMEDIATE)
289 fail1.link(this);
290#else
291 if (!m_codeBlock->isKnownNotImmediate(op1))
292 fail1.link(this);
293 fail2.link(this);
294#endif
295 }
296
297 JITStubCall stubCall(this, JITStubs::cti_op_jless);
298 stubCall.addArgument(regT0);
299 stubCall.addArgument(op2, regT2);
300 stubCall.call();
301 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
302
303 } else if (isOperandConstantImmediateInt(op1)) {
304 linkSlowCase(iter);
305
306 if (isSSE2Present()) {
307#if USE(ALTERNATE_JSIMMEDIATE)
308 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
309 addPtr(tagTypeNumberRegister, regT1);
310 m_assembler.movq_rr(regT1, X86::xmm1);
311#else
312 Jump fail1;
313 if (!m_codeBlock->isKnownNotImmediate(op2))
314 fail1 = emitJumpIfNotJSCell(regT1);
315
316 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
317 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
318#endif
319
320 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
321
322 m_assembler.movl_i32r(op1imm, regT0);
323 m_assembler.cvtsi2sd_rr(regT0, X86::xmm0);
324
325 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
326 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
327
328 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
329
330#if USE(ALTERNATE_JSIMMEDIATE)
331 fail1.link(this);
332#else
333 if (!m_codeBlock->isKnownNotImmediate(op2))
334 fail1.link(this);
335 fail2.link(this);
336#endif
337 }
338
339 JITStubCall stubCall(this, JITStubs::cti_op_jless);
340 stubCall.addArgument(op1, regT2);
341 stubCall.addArgument(regT1);
342 stubCall.call();
343 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
344
345 } else {
346 linkSlowCase(iter);
347
348 if (isSSE2Present()) {
349#if USE(ALTERNATE_JSIMMEDIATE)
350 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
351 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
352 Jump fail3 = emitJumpIfImmediateInteger(regT1);
353 addPtr(tagTypeNumberRegister, regT0);
354 addPtr(tagTypeNumberRegister, regT1);
355 m_assembler.movq_rr(regT0, X86::xmm0);
356 m_assembler.movq_rr(regT1, X86::xmm1);
357#else
358 Jump fail1;
359 if (!m_codeBlock->isKnownNotImmediate(op1))
360 fail1 = emitJumpIfNotJSCell(regT0);
361
362 Jump fail2;
363 if (!m_codeBlock->isKnownNotImmediate(op2))
364 fail2 = emitJumpIfNotJSCell(regT1);
365
366 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
367 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
368 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
369 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
370#endif
371
372 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
373 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
374
375 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
376
377#if USE(ALTERNATE_JSIMMEDIATE)
378 fail1.link(this);
379 fail2.link(this);
380 fail3.link(this);
381#else
382 if (!m_codeBlock->isKnownNotImmediate(op1))
383 fail1.link(this);
384 if (!m_codeBlock->isKnownNotImmediate(op2))
385 fail2.link(this);
386 fail3.link(this);
387 fail4.link(this);
388#endif
389 }
390
391 linkSlowCase(iter);
392 JITStubCall stubCall(this, JITStubs::cti_op_jless);
393 stubCall.addArgument(regT0);
394 stubCall.addArgument(regT1);
395 stubCall.call();
396 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
397 }
398}
399
400void JIT::compileFastArith_op_jnlesseq(unsigned op1, unsigned op2, unsigned target)
401{
402 // We generate inline code for the following cases in the fast path:
403 // - int immediate to constant int immediate
404 // - constant int immediate to int immediate
405 // - int immediate to int immediate
406
407 if (isOperandConstantImmediateInt(op2)) {
408 emitGetVirtualRegister(op1, regT0);
409 emitJumpSlowCaseIfNotImmediateInteger(regT0);
410#if USE(ALTERNATE_JSIMMEDIATE)
411 int32_t op2imm = getConstantOperandImmediateInt(op2);
412#else
413 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
414#endif
415 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
416 } else if (isOperandConstantImmediateInt(op1)) {
417 emitGetVirtualRegister(op2, regT1);
418 emitJumpSlowCaseIfNotImmediateInteger(regT1);
419#if USE(ALTERNATE_JSIMMEDIATE)
420 int32_t op1imm = getConstantOperandImmediateInt(op1);
421#else
422 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
423#endif
424 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
425 } else {
426 emitGetVirtualRegisters(op1, regT0, op2, regT1);
427 emitJumpSlowCaseIfNotImmediateInteger(regT0);
428 emitJumpSlowCaseIfNotImmediateInteger(regT1);
429
430 addJump(branch32(GreaterThan, regT0, regT1), target + 3);
431 }
432}
433void JIT::compileFastArithSlow_op_jnlesseq(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
434{
435 // We generate inline code for the following cases in the slow path:
436 // - floating-point number to constant int immediate
437 // - constant int immediate to floating-point number
438 // - floating-point number to floating-point number.
439
440 if (isOperandConstantImmediateInt(op2)) {
441 linkSlowCase(iter);
442
443 if (isSSE2Present()) {
444#if USE(ALTERNATE_JSIMMEDIATE)
445 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
446 addPtr(tagTypeNumberRegister, regT0);
447 m_assembler.movq_rr(regT0, X86::xmm0);
448#else
449 Jump fail1;
450 if (!m_codeBlock->isKnownNotImmediate(op1))
451 fail1 = emitJumpIfNotJSCell(regT0);
452
453 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
454 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
455#endif
456
457 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
458
459 m_assembler.movl_i32r(op2imm, regT1);
460 m_assembler.cvtsi2sd_rr(regT1, X86::xmm1);
461
462 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
463 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
464
465 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
466
467#if USE(ALTERNATE_JSIMMEDIATE)
468 fail1.link(this);
469#else
470 if (!m_codeBlock->isKnownNotImmediate(op1))
471 fail1.link(this);
472 fail2.link(this);
473#endif
474 }
475
476 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
477 stubCall.addArgument(regT0);
478 stubCall.addArgument(op2, regT2);
479 stubCall.call();
480 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
481
482 } else if (isOperandConstantImmediateInt(op1)) {
483 linkSlowCase(iter);
484
485 if (isSSE2Present()) {
486#if USE(ALTERNATE_JSIMMEDIATE)
487 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
488 addPtr(tagTypeNumberRegister, regT1);
489 m_assembler.movq_rr(regT1, X86::xmm1);
490#else
491 Jump fail1;
492 if (!m_codeBlock->isKnownNotImmediate(op2))
493 fail1 = emitJumpIfNotJSCell(regT1);
494
495 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
496 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
497#endif
498
499 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
500
501 m_assembler.movl_i32r(op1imm, regT0);
502 m_assembler.cvtsi2sd_rr(regT0, X86::xmm0);
503
504 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
505 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
506
507 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
508
509#if USE(ALTERNATE_JSIMMEDIATE)
510 fail1.link(this);
511#else
512 if (!m_codeBlock->isKnownNotImmediate(op2))
513 fail1.link(this);
514 fail2.link(this);
515#endif
516 }
517
518 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
519 stubCall.addArgument(op1, regT2);
520 stubCall.addArgument(regT1);
521 stubCall.call();
522 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
523
524 } else {
525 linkSlowCase(iter);
526
527 if (isSSE2Present()) {
528#if USE(ALTERNATE_JSIMMEDIATE)
529 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
530 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
531 Jump fail3 = emitJumpIfImmediateInteger(regT1);
532 addPtr(tagTypeNumberRegister, regT0);
533 addPtr(tagTypeNumberRegister, regT1);
534 m_assembler.movq_rr(regT0, X86::xmm0);
535 m_assembler.movq_rr(regT1, X86::xmm1);
536#else
537 Jump fail1;
538 if (!m_codeBlock->isKnownNotImmediate(op1))
539 fail1 = emitJumpIfNotJSCell(regT0);
540
541 Jump fail2;
542 if (!m_codeBlock->isKnownNotImmediate(op2))
543 fail2 = emitJumpIfNotJSCell(regT1);
544
545 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
546 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
547 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
548 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
549#endif
550
551 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
552 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
553
554 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
555
556#if USE(ALTERNATE_JSIMMEDIATE)
557 fail1.link(this);
558 fail2.link(this);
559 fail3.link(this);
560#else
561 if (!m_codeBlock->isKnownNotImmediate(op1))
562 fail1.link(this);
563 if (!m_codeBlock->isKnownNotImmediate(op2))
564 fail2.link(this);
565 fail3.link(this);
566 fail4.link(this);
567#endif
568 }
569
570 linkSlowCase(iter);
571 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
572 stubCall.addArgument(regT0);
573 stubCall.addArgument(regT1);
574 stubCall.call();
575 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
576 }
577}
578
579void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
580{
581 if (isOperandConstantImmediateInt(op1)) {
582 emitGetVirtualRegister(op2, regT0);
583 emitJumpSlowCaseIfNotImmediateInteger(regT0);
584#if USE(ALTERNATE_JSIMMEDIATE)
585 int32_t imm = getConstantOperandImmediateInt(op1);
586 andPtr(Imm32(imm), regT0);
587 if (imm >= 0)
588 emitFastArithIntToImmNoCheck(regT0, regT0);
589#else
590 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
591#endif
592 } else if (isOperandConstantImmediateInt(op2)) {
593 emitGetVirtualRegister(op1, regT0);
594 emitJumpSlowCaseIfNotImmediateInteger(regT0);
595#if USE(ALTERNATE_JSIMMEDIATE)
596 int32_t imm = getConstantOperandImmediateInt(op2);
597 andPtr(Imm32(imm), regT0);
598 if (imm >= 0)
599 emitFastArithIntToImmNoCheck(regT0, regT0);
600#else
601 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
602#endif
603 } else {
604 emitGetVirtualRegisters(op1, regT0, op2, regT1);
605 andPtr(regT1, regT0);
606 emitJumpSlowCaseIfNotImmediateInteger(regT0);
607 }
608 emitPutVirtualRegister(result);
609}
610void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
611{
612 linkSlowCase(iter);
613 if (isOperandConstantImmediateInt(op1)) {
614 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
615 stubCall.addArgument(op1, regT2);
616 stubCall.addArgument(regT0);
617 stubCall.call(result);
618 } else if (isOperandConstantImmediateInt(op2)) {
619 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
620 stubCall.addArgument(regT0);
621 stubCall.addArgument(op2, regT2);
622 stubCall.call(result);
623 } else {
624 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
625 stubCall.addArgument(op1, regT2);
626 stubCall.addArgument(regT1);
627 stubCall.call(result);
628 }
629}
630
631#if PLATFORM(X86) || PLATFORM(X86_64)
632void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
633{
634 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
635 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
636 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
637#if USE(ALTERNATE_JSIMMEDIATE)
638 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
639 m_assembler.cdq();
640 m_assembler.idivl_r(X86::ecx);
641#else
642 emitFastArithDeTagImmediate(X86::eax);
643 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
644 m_assembler.cdq();
645 m_assembler.idivl_r(X86::ecx);
646 signExtend32ToPtr(X86::edx, X86::edx);
647#endif
648 emitFastArithReTagImmediate(X86::edx, X86::eax);
649 emitPutVirtualRegister(result);
650}
651void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
652{
653#if USE(ALTERNATE_JSIMMEDIATE)
654 linkSlowCase(iter);
655 linkSlowCase(iter);
656 linkSlowCase(iter);
657#else
658 Jump notImm1 = getSlowCase(iter);
659 Jump notImm2 = getSlowCase(iter);
660 linkSlowCase(iter);
661 emitFastArithReTagImmediate(X86::eax, X86::eax);
662 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
663 notImm1.link(this);
664 notImm2.link(this);
665#endif
666 JITStubCall stubCall(this, JITStubs::cti_op_mod);
667 stubCall.addArgument(X86::eax);
668 stubCall.addArgument(X86::ecx);
669 stubCall.call(result);
670}
671#else
672void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
673{
674 JITStubCall stubCall(this, JITStubs::cti_op_mod);
675 stubCall.addArgument(op1, regT2);
676 stubCall.addArgument(op2, regT2);
677 stubCall.call(result);
678}
679void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
680{
681 ASSERT_NOT_REACHED();
682}
683#endif
684
685void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
686{
687 emitGetVirtualRegister(srcDst, regT0);
688 move(regT0, regT1);
689 emitJumpSlowCaseIfNotImmediateInteger(regT0);
690#if USE(ALTERNATE_JSIMMEDIATE)
691 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
692 emitFastArithIntToImmNoCheck(regT1, regT1);
693#else
694 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
695 signExtend32ToPtr(regT1, regT1);
696#endif
697 emitPutVirtualRegister(srcDst, regT1);
698 emitPutVirtualRegister(result);
699}
700
701void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
702{
703 linkSlowCase(iter);
704 linkSlowCase(iter);
705 JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
706 stubCall.addArgument(regT0);
707 stubCall.call(result, srcDst);
708}
709
710void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
711{
712 emitGetVirtualRegister(srcDst, regT0);
713 move(regT0, regT1);
714 emitJumpSlowCaseIfNotImmediateInteger(regT0);
715#if USE(ALTERNATE_JSIMMEDIATE)
716 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
717 emitFastArithIntToImmNoCheck(regT1, regT1);
718#else
719 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
720 signExtend32ToPtr(regT1, regT1);
721#endif
722 emitPutVirtualRegister(srcDst, regT1);
723 emitPutVirtualRegister(result);
724}
725void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
726{
727 linkSlowCase(iter);
728 linkSlowCase(iter);
729 JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
730 stubCall.addArgument(regT0);
731 stubCall.call(result, srcDst);
732}
733
734void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
735{
736 emitGetVirtualRegister(srcDst, regT0);
737 emitJumpSlowCaseIfNotImmediateInteger(regT0);
738#if USE(ALTERNATE_JSIMMEDIATE)
739 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
740 emitFastArithIntToImmNoCheck(regT0, regT0);
741#else
742 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
743 signExtend32ToPtr(regT0, regT0);
744#endif
745 emitPutVirtualRegister(srcDst);
746}
747void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
748{
749 Jump notImm = getSlowCase(iter);
750 linkSlowCase(iter);
751 emitGetVirtualRegister(srcDst, regT0);
752 notImm.link(this);
753 JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
754 stubCall.addArgument(regT0);
755 stubCall.call(srcDst);
756}
757
758void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
759{
760 emitGetVirtualRegister(srcDst, regT0);
761 emitJumpSlowCaseIfNotImmediateInteger(regT0);
762#if USE(ALTERNATE_JSIMMEDIATE)
763 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
764 emitFastArithIntToImmNoCheck(regT0, regT0);
765#else
766 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
767 signExtend32ToPtr(regT0, regT0);
768#endif
769 emitPutVirtualRegister(srcDst);
770}
771void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
772{
773 Jump notImm = getSlowCase(iter);
774 linkSlowCase(iter);
775 emitGetVirtualRegister(srcDst, regT0);
776 notImm.link(this);
777 JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
778 stubCall.addArgument(regT0);
779 stubCall.call(srcDst);
780}
781
782
783#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
784
785void JIT::compileFastArith_op_add(Instruction* currentInstruction)
786{
787 unsigned result = currentInstruction[1].u.operand;
788 unsigned op1 = currentInstruction[2].u.operand;
789 unsigned op2 = currentInstruction[3].u.operand;
790
791 JITStubCall stubCall(this, JITStubs::cti_op_add);
792 stubCall.addArgument(op1, regT2);
793 stubCall.addArgument(op2, regT2);
794 stubCall.call(result);
795}
796
797void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
798{
799 ASSERT_NOT_REACHED();
800}
801
802void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
803{
804 unsigned result = currentInstruction[1].u.operand;
805 unsigned op1 = currentInstruction[2].u.operand;
806 unsigned op2 = currentInstruction[3].u.operand;
807
808 JITStubCall stubCall(this, JITStubs::cti_op_mul);
809 stubCall.addArgument(op1, regT2);
810 stubCall.addArgument(op2, regT2);
811 stubCall.call(result);
812}
813
814void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
815{
816 ASSERT_NOT_REACHED();
817}
818
819void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
820{
821 unsigned result = currentInstruction[1].u.operand;
822 unsigned op1 = currentInstruction[2].u.operand;
823 unsigned op2 = currentInstruction[3].u.operand;
824
825 JITStubCall stubCall(this, JITStubs::cti_op_sub);
826 stubCall.addArgument(op1, regT2);
827 stubCall.addArgument(op2, regT2);
828 stubCall.call(result);
829}
830
831void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
832{
833 ASSERT_NOT_REACHED();
834}
835
836#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
837
838void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
839{
840 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
841 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
842 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
843 if (opcodeID == op_add)
844 addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
845 else if (opcodeID == op_sub)
846 addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
847 else {
848 ASSERT(opcodeID == op_mul);
849 addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
850 addSlowCase(branchTest32(Zero, X86::eax));
851 }
852 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
853}
854
855void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
856{
857 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
858 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
859
860 Jump notImm1 = getSlowCase(iter);
861 Jump notImm2 = getSlowCase(iter);
862
863 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
864 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
865 linkSlowCase(iter);
866 emitGetVirtualRegister(op1, X86::eax);
867
868 Label stubFunctionCall(this);
869 JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
870 stubCall.addArgument(X86::eax);
871 stubCall.addArgument(X86::edx);
872 stubCall.call(result);
873 Jump end = jump();
874
875 // if we get here, eax is not an int32, edx not yet checked.
876 notImm1.link(this);
877 if (!types.first().definitelyIsNumber())
878 emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
879 if (!types.second().definitelyIsNumber())
880 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
881 addPtr(tagTypeNumberRegister, X86::eax);
882 m_assembler.movq_rr(X86::eax, X86::xmm1);
883 Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
884 m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
885 Jump op2wasInteger = jump();
886
887 // if we get here, eax IS an int32, edx is not.
888 notImm2.link(this);
889 if (!types.second().definitelyIsNumber())
890 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
891 m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
892 op2isDouble.link(this);
893 addPtr(tagTypeNumberRegister, X86::edx);
894 m_assembler.movq_rr(X86::edx, X86::xmm2);
895 op2wasInteger.link(this);
896
897 if (opcodeID == op_add)
898 m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
899 else if (opcodeID == op_sub)
900 m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
901 else {
902 ASSERT(opcodeID == op_mul);
903 m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
904 }
905 m_assembler.movq_rr(X86::xmm1, X86::eax);
906 subPtr(tagTypeNumberRegister, X86::eax);
907 emitPutVirtualRegister(result, X86::eax);
908
909 end.link(this);
910}
911
912void JIT::compileFastArith_op_add(Instruction* currentInstruction)
913{
914 unsigned result = currentInstruction[1].u.operand;
915 unsigned op1 = currentInstruction[2].u.operand;
916 unsigned op2 = currentInstruction[3].u.operand;
917 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
918
919 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
920 JITStubCall stubCall(this, JITStubs::cti_op_add);
921 stubCall.addArgument(op1, X86::ecx);
922 stubCall.addArgument(op2, X86::ecx);
923 stubCall.call(result);
924 return;
925 }
926
927 if (isOperandConstantImmediateInt(op1)) {
928 emitGetVirtualRegister(op2, X86::eax);
929 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
930 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
931 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
932 } else if (isOperandConstantImmediateInt(op2)) {
933 emitGetVirtualRegister(op1, X86::eax);
934 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
935 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
936 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
937 } else
938 compileBinaryArithOp(op_add, result, op1, op2, types);
939
940 emitPutVirtualRegister(result);
941}
942void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
943{
944 unsigned result = currentInstruction[1].u.operand;
945 unsigned op1 = currentInstruction[2].u.operand;
946 unsigned op2 = currentInstruction[3].u.operand;
947
948 if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
949 linkSlowCase(iter);
950 linkSlowCase(iter);
951 JITStubCall stubCall(this, JITStubs::cti_op_add);
952 stubCall.addArgument(op1, X86::ecx);
953 stubCall.addArgument(op2, X86::ecx);
954 stubCall.call(result);
955 } else
956 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
957}
958
959void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
960{
961 unsigned result = currentInstruction[1].u.operand;
962 unsigned op1 = currentInstruction[2].u.operand;
963 unsigned op2 = currentInstruction[3].u.operand;
964 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
965
966 // For now, only plant a fast int case if the constant operand is greater than zero.
967 int32_t value;
968 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
969 emitGetVirtualRegister(op2, X86::eax);
970 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
971 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
972 emitFastArithReTagImmediate(X86::eax, X86::eax);
973 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
974 emitGetVirtualRegister(op1, X86::eax);
975 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
976 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
977 emitFastArithReTagImmediate(X86::eax, X86::eax);
978 } else
979 compileBinaryArithOp(op_mul, result, op1, op2, types);
980
981 emitPutVirtualRegister(result);
982}
983void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
984{
985 unsigned result = currentInstruction[1].u.operand;
986 unsigned op1 = currentInstruction[2].u.operand;
987 unsigned op2 = currentInstruction[3].u.operand;
988 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
989
990 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
991 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
992 linkSlowCase(iter);
993 linkSlowCase(iter);
994 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
995 JITStubCall stubCall(this, JITStubs::cti_op_mul);
996 stubCall.addArgument(op1, X86::ecx);
997 stubCall.addArgument(op2, X86::ecx);
998 stubCall.call(result);
999 } else
1000 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
1001}
1002
1003void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
1004{
1005 unsigned result = currentInstruction[1].u.operand;
1006 unsigned op1 = currentInstruction[2].u.operand;
1007 unsigned op2 = currentInstruction[3].u.operand;
1008 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1009
1010 compileBinaryArithOp(op_sub, result, op1, op2, types);
1011
1012 emitPutVirtualRegister(result);
1013}
1014void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1015{
1016 unsigned result = currentInstruction[1].u.operand;
1017 unsigned op1 = currentInstruction[2].u.operand;
1018 unsigned op2 = currentInstruction[3].u.operand;
1019 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1020
1021 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
1022}
1023
1024#else
1025
1026typedef X86Assembler::JmpSrc JmpSrc;
1027typedef X86Assembler::JmpDst JmpDst;
1028typedef X86Assembler::XMMRegisterID XMMRegisterID;
1029
1030
1031void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1032{
1033 Structure* numberStructure = m_globalData->numberStructure.get();
1034 JmpSrc wasJSNumberCell1;
1035 JmpSrc wasJSNumberCell2;
1036
1037 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
1038
1039 if (types.second().isReusable() && isSSE2Present()) {
1040 ASSERT(types.second().mightBeNumber());
1041
1042 // Check op2 is a number
1043 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
1044 JmpSrc op2imm = __ jne();
1045 if (!types.second().definitelyIsNumber()) {
1046 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
1047 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
1048 addSlowCase(__ jne());
1049 }
1050
1051 // (1) In this case src2 is a reusable number cell.
1052 // Slow case if src1 is not a number type.
1053 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
1054 JmpSrc op1imm = __ jne();
1055 if (!types.first().definitelyIsNumber()) {
1056 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
1057 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
1058 addSlowCase(__ jne());
1059 }
1060
1061 // (1a) if we get here, src1 is also a number cell
1062 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
1063 JmpSrc loadedDouble = __ jmp();
1064 // (1b) if we get here, src1 is an immediate
1065 __ linkJump(op1imm, __ label());
1066 emitFastArithImmToInt(X86::eax);
1067 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
1068 // (1c)
1069 __ linkJump(loadedDouble, __ label());
1070 if (opcodeID == op_add)
1071 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1072 else if (opcodeID == op_sub)
1073 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1074 else {
1075 ASSERT(opcodeID == op_mul);
1076 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1077 }
1078
1079 // Store the result to the JSNumberCell and jump.
1080 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx);
1081 __ movl_rr(X86::edx, X86::eax);
1082 emitPutVirtualRegister(dst);
1083 wasJSNumberCell2 = __ jmp();
1084
1085 // (2) This handles cases where src2 is an immediate number.
1086 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
1087 __ linkJump(op2imm, __ label());
1088 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1089 } else if (types.first().isReusable() && isSSE2Present()) {
1090 ASSERT(types.first().mightBeNumber());
1091
1092 // Check op1 is a number
1093 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
1094 JmpSrc op1imm = __ jne();
1095 if (!types.first().definitelyIsNumber()) {
1096 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
1097 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
1098 addSlowCase(__ jne());
1099 }
1100
1101 // (1) In this case src1 is a reusable number cell.
1102 // Slow case if src2 is not a number type.
1103 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
1104 JmpSrc op2imm = __ jne();
1105 if (!types.second().definitelyIsNumber()) {
1106 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
1107 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
1108 addSlowCase(__ jne());
1109 }
1110
1111 // (1a) if we get here, src2 is also a number cell
1112 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
1113 JmpSrc loadedDouble = __ jmp();
1114 // (1b) if we get here, src2 is an immediate
1115 __ linkJump(op2imm, __ label());
1116 emitFastArithImmToInt(X86::edx);
1117 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
1118 // (1c)
1119 __ linkJump(loadedDouble, __ label());
1120 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
1121 if (opcodeID == op_add)
1122 __ addsd_rr(X86::xmm1, X86::xmm0);
1123 else if (opcodeID == op_sub)
1124 __ subsd_rr(X86::xmm1, X86::xmm0);
1125 else {
1126 ASSERT(opcodeID == op_mul);
1127 __ mulsd_rr(X86::xmm1, X86::xmm0);
1128 }
1129 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
1130 emitPutVirtualRegister(dst);
1131
1132 // Store the result to the JSNumberCell and jump.
1133 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
1134 emitPutVirtualRegister(dst);
1135 wasJSNumberCell1 = __ jmp();
1136
1137 // (2) This handles cases where src1 is an immediate number.
1138 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
1139 __ linkJump(op1imm, __ label());
1140 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
1141 } else
1142 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
1143
1144 if (opcodeID == op_add) {
1145 emitFastArithDeTagImmediate(X86::eax);
1146 __ addl_rr(X86::edx, X86::eax);
1147 addSlowCase(__ jo());
1148 } else if (opcodeID == op_sub) {
1149 __ subl_rr(X86::edx, X86::eax);
1150 addSlowCase(__ jo());
1151 signExtend32ToPtr(X86::eax, X86::eax);
1152 emitFastArithReTagImmediate(X86::eax, X86::eax);
1153 } else {
1154 ASSERT(opcodeID == op_mul);
1155 // convert eax & edx from JSImmediates to ints, and check if either are zero
1156 emitFastArithImmToInt(X86::edx);
1157 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
1158 __ testl_rr(X86::edx, X86::edx);
1159 JmpSrc op2NonZero = __ jne();
1160 op1Zero.link(this);
1161 // if either input is zero, add the two together, and check if the result is < 0.
1162 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
1163 __ movl_rr(X86::eax, X86::ecx);
1164 __ addl_rr(X86::edx, X86::ecx);
1165 addSlowCase(__ js());
1166 // Skip the above check if neither input is zero
1167 __ linkJump(op2NonZero, __ label());
1168 __ imull_rr(X86::edx, X86::eax);
1169 addSlowCase(__ jo());
1170 signExtend32ToPtr(X86::eax, X86::eax);
1171 emitFastArithReTagImmediate(X86::eax, X86::eax);
1172 }
1173 emitPutVirtualRegister(dst);
1174
1175 if (types.second().isReusable() && isSSE2Present()) {
1176 __ linkJump(wasJSNumberCell2, __ label());
1177 }
1178 else if (types.first().isReusable() && isSSE2Present()) {
1179 __ linkJump(wasJSNumberCell1, __ label());
1180 }
1181}
1182
1183void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1184{
1185 linkSlowCase(iter);
1186 if (types.second().isReusable() && isSSE2Present()) {
1187 if (!types.first().definitelyIsNumber()) {
1188 linkSlowCaseIfNotJSCell(iter, src1);
1189 linkSlowCase(iter);
1190 }
1191 if (!types.second().definitelyIsNumber()) {
1192 linkSlowCaseIfNotJSCell(iter, src2);
1193 linkSlowCase(iter);
1194 }
1195 } else if (types.first().isReusable() && isSSE2Present()) {
1196 if (!types.first().definitelyIsNumber()) {
1197 linkSlowCaseIfNotJSCell(iter, src1);
1198 linkSlowCase(iter);
1199 }
1200 if (!types.second().definitelyIsNumber()) {
1201 linkSlowCaseIfNotJSCell(iter, src2);
1202 linkSlowCase(iter);
1203 }
1204 }
1205 linkSlowCase(iter);
1206
1207 // additional entry point to handle -0 cases.
1208 if (opcodeID == op_mul)
1209 linkSlowCase(iter);
1210
1211 JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
1212 stubCall.addArgument(src1, X86::ecx);
1213 stubCall.addArgument(src2, X86::ecx);
1214 stubCall.call(dst);
1215}
1216
1217void JIT::compileFastArith_op_add(Instruction* currentInstruction)
1218{
1219 unsigned result = currentInstruction[1].u.operand;
1220 unsigned op1 = currentInstruction[2].u.operand;
1221 unsigned op2 = currentInstruction[3].u.operand;
1222
1223 if (isOperandConstantImmediateInt(op1)) {
1224 emitGetVirtualRegister(op2, X86::eax);
1225 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1226 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
1227 signExtend32ToPtr(X86::eax, X86::eax);
1228 emitPutVirtualRegister(result);
1229 } else if (isOperandConstantImmediateInt(op2)) {
1230 emitGetVirtualRegister(op1, X86::eax);
1231 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1232 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
1233 signExtend32ToPtr(X86::eax, X86::eax);
1234 emitPutVirtualRegister(result);
1235 } else {
1236 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1237 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1238 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1239 else {
1240 JITStubCall stubCall(this, JITStubs::cti_op_add);
1241 stubCall.addArgument(op1, X86::ecx);
1242 stubCall.addArgument(op2, X86::ecx);
1243 stubCall.call(result);
1244 }
1245 }
1246}
1247
1248void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1249{
1250 unsigned result = currentInstruction[1].u.operand;
1251 unsigned op1 = currentInstruction[2].u.operand;
1252 unsigned op2 = currentInstruction[3].u.operand;
1253
1254 if (isOperandConstantImmediateInt(op1)) {
1255 Jump notImm = getSlowCase(iter);
1256 linkSlowCase(iter);
1257 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
1258 notImm.link(this);
1259 JITStubCall stubCall(this, JITStubs::cti_op_add);
1260 stubCall.addArgument(op1, X86::ecx);
1261 stubCall.addArgument(X86::eax);
1262 stubCall.call(result);
1263 } else if (isOperandConstantImmediateInt(op2)) {
1264 Jump notImm = getSlowCase(iter);
1265 linkSlowCase(iter);
1266 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
1267 notImm.link(this);
1268 JITStubCall stubCall(this, JITStubs::cti_op_add);
1269 stubCall.addArgument(X86::eax);
1270 stubCall.addArgument(op2, X86::ecx);
1271 stubCall.call(result);
1272 } else {
1273 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1274 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
1275 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
1276 }
1277}
1278
1279void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
1280{
1281 unsigned result = currentInstruction[1].u.operand;
1282 unsigned op1 = currentInstruction[2].u.operand;
1283 unsigned op2 = currentInstruction[3].u.operand;
1284
1285 // For now, only plant a fast int case if the constant operand is greater than zero.
1286 int32_t value;
1287 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1288 emitGetVirtualRegister(op2, X86::eax);
1289 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1290 emitFastArithDeTagImmediate(X86::eax);
1291 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1292 signExtend32ToPtr(X86::eax, X86::eax);
1293 emitFastArithReTagImmediate(X86::eax, X86::eax);
1294 emitPutVirtualRegister(result);
1295 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1296 emitGetVirtualRegister(op1, X86::eax);
1297 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1298 emitFastArithDeTagImmediate(X86::eax);
1299 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1300 signExtend32ToPtr(X86::eax, X86::eax);
1301 emitFastArithReTagImmediate(X86::eax, X86::eax);
1302 emitPutVirtualRegister(result);
1303 } else
1304 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1305}
1306void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1307{
1308 unsigned result = currentInstruction[1].u.operand;
1309 unsigned op1 = currentInstruction[2].u.operand;
1310 unsigned op2 = currentInstruction[3].u.operand;
1311
1312 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1313 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1314 linkSlowCase(iter);
1315 linkSlowCase(iter);
1316 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1317 JITStubCall stubCall(this, JITStubs::cti_op_mul);
1318 stubCall.addArgument(op1, X86::ecx);
1319 stubCall.addArgument(op2, X86::ecx);
1320 stubCall.call(result);
1321 } else
1322 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1323}
1324
1325void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
1326{
1327 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1328}
1329void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1330{
1331 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1332}
1333
1334#endif
1335
1336} // namespace JSC
1337
1338#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.