source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 43401

Last change on this file since 43401 was 43401, checked in by [email protected], 16 years ago

2009-05-08 Cameron Zwarich <[email protected]>

Reviewed by Maciej Stachowiak.

Add a new opcode jnlesseq, and optimize its compilation in the JIT using
techniques similar to what were used to optimize jnless in r43363.

This gives a 0.7% speedup on SunSpider, particularly on the tests 3d-cube,
control-flow-recursive, date-format-xparb, and string-base64.

  • bytecode/CodeBlock.cpp: (JSC::CodeBlock::dump): Add support for dumping op_jnlesseq.
  • bytecode/Opcode.h: Add op_jnlesseq to the list of opcodes.
  • bytecompiler/BytecodeGenerator.cpp: (JSC::BytecodeGenerator::emitJumpIfFalse): Add a peephole optimization for op_jnlesseq when emitting lesseq followed by a jump.
  • interpreter/Interpreter.cpp: (JSC::Interpreter::privateExecute): Add case for op_jnlesseq.
  • jit/JIT.cpp: (JSC::JIT::privateCompileMainPass): Add case for op_jnlesseq. (JSC::JIT::privateCompileSlowCases): Add case for op_jnlesseq.
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::compileFastArith_op_jnlesseq): Added. (JSC::JIT::compileFastArithSlow_op_jnlesseq): Added.
  • jit/JITStubs.cpp: (JSC::JITStubs::cti_op_jlesseq): Added.
  • jit/JITStubs.h:
File size: 50.8 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JSArray.h"
34#include "JSFunction.h"
35#include "Interpreter.h"
36#include "ResultType.h"
37#include "SamplingTool.h"
38
39#ifndef NDEBUG
40#include <stdio.h>
41#endif
42
43#define __ m_assembler.
44
45using namespace std;
46
47#if PLATFORM(MAC)
48
49static inline bool isSSE2Present()
50{
51 return true; // All X86 Macs are guaranteed to support at least SSE2
52}
53
54#else
55
56static bool isSSE2Present()
57{
58 static const int SSE2FeatureBit = 1 << 26;
59 struct SSE2Check {
60 SSE2Check()
61 {
62 int flags;
63#if COMPILER(MSVC)
64 _asm {
65 mov eax, 1 // cpuid function 1 gives us the standard feature set
66 cpuid;
67 mov flags, edx;
68 }
69#elif COMPILER(GCC)
70 asm (
71 "movl $0x1, %%eax;"
72 "pushl %%ebx;"
73 "cpuid;"
74 "popl %%ebx;"
75 "movl %%edx, %0;"
76 : "=g" (flags)
77 :
78 : "%eax", "%ecx", "%edx"
79 );
80#else
81 flags = 0;
82#endif
83 present = (flags & SSE2FeatureBit) != 0;
84 }
85 bool present;
86 };
87 static SSE2Check check;
88 return check.present;
89}
90
91#endif
92
93namespace JSC {
94
95void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
96{
97 emitGetVirtualRegisters(op1, regT0, op2, regT2);
98 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
99 emitJumpSlowCaseIfNotImmediateInteger(regT0);
100 emitJumpSlowCaseIfNotImmediateInteger(regT2);
101 emitFastArithImmToInt(regT0);
102 emitFastArithImmToInt(regT2);
103#if !PLATFORM(X86)
104 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
105 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
106 and32(Imm32(0x1f), regT2);
107#endif
108 lshift32(regT2, regT0);
109#if !USE(ALTERNATE_JSIMMEDIATE)
110 addSlowCase(branchAdd32(Overflow, regT0, regT0));
111 signExtend32ToPtr(regT0, regT0);
112#endif
113 emitFastArithReTagImmediate(regT0, regT0);
114 emitPutVirtualRegister(result);
115}
116void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
117{
118#if USE(ALTERNATE_JSIMMEDIATE)
119 UNUSED_PARAM(op1);
120 UNUSED_PARAM(op2);
121 linkSlowCase(iter);
122 linkSlowCase(iter);
123#else
124 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
125 Jump notImm1 = getSlowCase(iter);
126 Jump notImm2 = getSlowCase(iter);
127 linkSlowCase(iter);
128 emitGetVirtualRegisters(op1, regT0, op2, regT2);
129 notImm1.link(this);
130 notImm2.link(this);
131#endif
132 emitPutJITStubArg(regT0, 1);
133 emitPutJITStubArg(regT2, 2);
134 emitCTICall(JITStubs::cti_op_lshift);
135 emitPutVirtualRegister(result);
136}
137
138void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
139{
140 if (isOperandConstantImmediateInt(op2)) {
141 emitGetVirtualRegister(op1, regT0);
142 emitJumpSlowCaseIfNotImmediateInteger(regT0);
143 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
144#if USE(ALTERNATE_JSIMMEDIATE)
145 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
146#else
147 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
148#endif
149 } else {
150 emitGetVirtualRegisters(op1, regT0, op2, regT2);
151 if (isSSE2Present()) {
152 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
153#if USE(ALTERNATE_JSIMMEDIATE)
154 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
155 __ movq_rr(regT0, X86::xmm0);
156#else
157 emitJumpSlowCaseIfNotJSCell(regT0, op1);
158 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
159 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
160#endif
161 __ cvttsd2si_rr(X86::xmm0, regT0);
162 addSlowCase(branch32(Equal, regT0, Imm32(0x80000000)));
163#if !USE(ALTERNATE_JSIMMEDIATE)
164 add32(regT0, regT0);
165 addSlowCase(__ jo());
166#endif
167 lhsIsInt.link(this);
168 } else
169 emitJumpSlowCaseIfNotImmediateInteger(regT0);
170 emitJumpSlowCaseIfNotImmediateInteger(regT2);
171 emitFastArithImmToInt(regT2);
172#if !PLATFORM(X86)
173 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
174 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
175 and32(Imm32(0x1f), regT2);
176#endif
177#if USE(ALTERNATE_JSIMMEDIATE)
178 rshift32(regT2, regT0);
179#else
180 rshiftPtr(regT2, regT0);
181#endif
182 }
183#if USE(ALTERNATE_JSIMMEDIATE)
184 emitFastArithIntToImmNoCheck(regT0, regT0);
185#else
186 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
187#endif
188 emitPutVirtualRegister(result);
189}
190void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
191{
192 linkSlowCase(iter);
193 if (isOperandConstantImmediateInt(op2))
194 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
195 else {
196 if (isSSE2Present()) {
197#if USE(ALTERNATE_JSIMMEDIATE)
198 linkSlowCase(iter);
199#else
200 linkSlowCaseIfNotJSCell(iter, op1);
201 linkSlowCase(iter);
202 linkSlowCase(iter);
203#endif
204 linkSlowCase(iter);
205 // We're reloading op1 to regT0 as we can no longer guarantee that
206 // we have not munged the operand. It may have already been shifted
207 // correctly, but it still will not have been tagged.
208 emitGetVirtualRegister(op1, regT0);
209 } else {
210 linkSlowCase(iter);
211 linkSlowCase(iter);
212 }
213 emitPutJITStubArg(regT2, 2);
214 }
215
216 emitPutJITStubArg(regT0, 1);
217 emitCTICall(JITStubs::cti_op_rshift);
218 emitPutVirtualRegister(result);
219}
220
221void JIT::compileFastArith_op_jnless(unsigned op1, unsigned op2, unsigned target)
222{
223 // We generate inline code for the following cases in the fast path:
224 // - int immediate to constant int immediate
225 // - constant int immediate to int immediate
226 // - int immediate to int immediate
227
228 if (isOperandConstantImmediateInt(op2)) {
229 emitGetVirtualRegister(op1, regT0);
230 emitJumpSlowCaseIfNotImmediateInteger(regT0);
231#if USE(ALTERNATE_JSIMMEDIATE)
232 int32_t op2imm = getConstantOperandImmediateInt(op2);
233#else
234 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
235#endif
236 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
237 } else if (isOperandConstantImmediateInt(op1)) {
238 emitGetVirtualRegister(op2, regT1);
239 emitJumpSlowCaseIfNotImmediateInteger(regT1);
240#if USE(ALTERNATE_JSIMMEDIATE)
241 int32_t op1imm = getConstantOperandImmediateInt(op1);
242#else
243 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
244#endif
245 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
246 } else {
247 emitGetVirtualRegisters(op1, regT0, op2, regT1);
248 emitJumpSlowCaseIfNotImmediateInteger(regT0);
249 emitJumpSlowCaseIfNotImmediateInteger(regT1);
250
251 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
252 }
253}
254void JIT::compileFastArithSlow_op_jnless(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
255{
256 // We generate inline code for the following cases in the slow path:
257 // - floating-point number to constant int immediate
258 // - constant int immediate to floating-point number
259 // - floating-point number to floating-point number.
260
261 if (isOperandConstantImmediateInt(op2)) {
262 linkSlowCase(iter);
263
264 if (isSSE2Present()) {
265#if USE(ALTERNATE_JSIMMEDIATE)
266 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
267 addPtr(tagTypeNumberRegister, regT0);
268 m_assembler.movq_rr(regT0, X86::xmm0);
269#else
270 Jump fail1;
271 if (!m_codeBlock->isKnownNotImmediate(op1))
272 fail1 = emitJumpIfNotJSCell(regT0);
273
274 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
275 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
276#endif
277
278 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
279
280 m_assembler.movl_i32r(op2imm, regT1);
281 m_assembler.cvtsi2sd_rr(regT1, X86::xmm1);
282
283 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
284 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
285
286 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
287
288#if USE(ALTERNATE_JSIMMEDIATE)
289 fail1.link(this);
290#else
291 if (!m_codeBlock->isKnownNotImmediate(op1))
292 fail1.link(this);
293 fail2.link(this);
294#endif
295 }
296
297 emitPutJITStubArg(regT0, 1);
298 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
299 emitCTICall(JITStubs::cti_op_jless);
300 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
301
302 } else if (isOperandConstantImmediateInt(op1)) {
303 linkSlowCase(iter);
304
305 if (isSSE2Present()) {
306#if USE(ALTERNATE_JSIMMEDIATE)
307 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
308 addPtr(tagTypeNumberRegister, regT1);
309 m_assembler.movq_rr(regT1, X86::xmm1);
310#else
311 Jump fail1;
312 if (!m_codeBlock->isKnownNotImmediate(op2))
313 fail1 = emitJumpIfNotJSCell(regT1);
314
315 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
316 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
317#endif
318
319 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
320
321 m_assembler.movl_i32r(op1imm, regT0);
322 m_assembler.cvtsi2sd_rr(regT0, X86::xmm0);
323
324 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
325 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
326
327 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
328
329#if USE(ALTERNATE_JSIMMEDIATE)
330 fail1.link(this);
331#else
332 if (!m_codeBlock->isKnownNotImmediate(op2))
333 fail1.link(this);
334 fail2.link(this);
335#endif
336 }
337
338 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
339 emitPutJITStubArg(regT1, 2);
340 emitCTICall(JITStubs::cti_op_jless);
341 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
342
343 } else {
344 linkSlowCase(iter);
345
346 if (isSSE2Present()) {
347#if USE(ALTERNATE_JSIMMEDIATE)
348 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
349 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
350 Jump fail3 = emitJumpIfImmediateInteger(regT1);
351 addPtr(tagTypeNumberRegister, regT0);
352 addPtr(tagTypeNumberRegister, regT1);
353 m_assembler.movq_rr(regT0, X86::xmm0);
354 m_assembler.movq_rr(regT1, X86::xmm1);
355#else
356 Jump fail1;
357 if (!m_codeBlock->isKnownNotImmediate(op1))
358 fail1 = emitJumpIfNotJSCell(regT0);
359
360 Jump fail2;
361 if (!m_codeBlock->isKnownNotImmediate(op2))
362 fail2 = emitJumpIfNotJSCell(regT1);
363
364 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
365 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
366 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
367 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
368#endif
369
370 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
371 emitJumpSlowToHot(Jump::Jump(m_assembler.jbe()), target + 3);
372
373 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
374
375#if USE(ALTERNATE_JSIMMEDIATE)
376 fail1.link(this);
377 fail2.link(this);
378 fail3.link(this);
379#else
380 if (!m_codeBlock->isKnownNotImmediate(op1))
381 fail1.link(this);
382 if (!m_codeBlock->isKnownNotImmediate(op2))
383 fail2.link(this);
384 fail3.link(this);
385 fail4.link(this);
386#endif
387 }
388
389 linkSlowCase(iter);
390 emitPutJITStubArg(regT0, 1);
391 emitPutJITStubArg(regT1, 2);
392 emitCTICall(JITStubs::cti_op_jless);
393 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
394 }
395}
396
397void JIT::compileFastArith_op_jnlesseq(unsigned op1, unsigned op2, unsigned target)
398{
399 // We generate inline code for the following cases in the fast path:
400 // - int immediate to constant int immediate
401 // - constant int immediate to int immediate
402 // - int immediate to int immediate
403
404 if (isOperandConstantImmediateInt(op2)) {
405 emitGetVirtualRegister(op1, regT0);
406 emitJumpSlowCaseIfNotImmediateInteger(regT0);
407#if USE(ALTERNATE_JSIMMEDIATE)
408 int32_t op2imm = getConstantOperandImmediateInt(op2);
409#else
410 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
411#endif
412 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
413 } else if (isOperandConstantImmediateInt(op1)) {
414 emitGetVirtualRegister(op2, regT1);
415 emitJumpSlowCaseIfNotImmediateInteger(regT1);
416#if USE(ALTERNATE_JSIMMEDIATE)
417 int32_t op1imm = getConstantOperandImmediateInt(op1);
418#else
419 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
420#endif
421 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
422 } else {
423 emitGetVirtualRegisters(op1, regT0, op2, regT1);
424 emitJumpSlowCaseIfNotImmediateInteger(regT0);
425 emitJumpSlowCaseIfNotImmediateInteger(regT1);
426
427 addJump(branch32(GreaterThan, regT0, regT1), target + 3);
428 }
429}
430void JIT::compileFastArithSlow_op_jnlesseq(unsigned op1, unsigned op2, unsigned target, Vector<SlowCaseEntry>::iterator& iter)
431{
432 // We generate inline code for the following cases in the slow path:
433 // - floating-point number to constant int immediate
434 // - constant int immediate to floating-point number
435 // - floating-point number to floating-point number.
436
437 if (isOperandConstantImmediateInt(op2)) {
438 linkSlowCase(iter);
439
440 if (isSSE2Present()) {
441#if USE(ALTERNATE_JSIMMEDIATE)
442 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
443 addPtr(tagTypeNumberRegister, regT0);
444 m_assembler.movq_rr(regT0, X86::xmm0);
445#else
446 Jump fail1;
447 if (!m_codeBlock->isKnownNotImmediate(op1))
448 fail1 = emitJumpIfNotJSCell(regT0);
449
450 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
451 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
452#endif
453
454 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
455
456 m_assembler.movl_i32r(op2imm, regT1);
457 m_assembler.cvtsi2sd_rr(regT1, X86::xmm1);
458
459 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
460 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
461
462 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
463
464#if USE(ALTERNATE_JSIMMEDIATE)
465 fail1.link(this);
466#else
467 if (!m_codeBlock->isKnownNotImmediate(op1))
468 fail1.link(this);
469 fail2.link(this);
470#endif
471 }
472
473 emitPutJITStubArg(regT0, 1);
474 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
475 emitCTICall(JITStubs::cti_op_jlesseq);
476 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
477
478 } else if (isOperandConstantImmediateInt(op1)) {
479 linkSlowCase(iter);
480
481 if (isSSE2Present()) {
482#if USE(ALTERNATE_JSIMMEDIATE)
483 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
484 addPtr(tagTypeNumberRegister, regT1);
485 m_assembler.movq_rr(regT1, X86::xmm1);
486#else
487 Jump fail1;
488 if (!m_codeBlock->isKnownNotImmediate(op2))
489 fail1 = emitJumpIfNotJSCell(regT1);
490
491 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
492 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
493#endif
494
495 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
496
497 m_assembler.movl_i32r(op1imm, regT0);
498 m_assembler.cvtsi2sd_rr(regT0, X86::xmm0);
499
500 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
501 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
502
503 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
504
505#if USE(ALTERNATE_JSIMMEDIATE)
506 fail1.link(this);
507#else
508 if (!m_codeBlock->isKnownNotImmediate(op2))
509 fail1.link(this);
510 fail2.link(this);
511#endif
512 }
513
514 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
515 emitPutJITStubArg(regT1, 2);
516 emitCTICall(JITStubs::cti_op_jlesseq);
517 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
518
519 } else {
520 linkSlowCase(iter);
521
522 if (isSSE2Present()) {
523#if USE(ALTERNATE_JSIMMEDIATE)
524 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
525 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
526 Jump fail3 = emitJumpIfImmediateInteger(regT1);
527 addPtr(tagTypeNumberRegister, regT0);
528 addPtr(tagTypeNumberRegister, regT1);
529 m_assembler.movq_rr(regT0, X86::xmm0);
530 m_assembler.movq_rr(regT1, X86::xmm1);
531#else
532 Jump fail1;
533 if (!m_codeBlock->isKnownNotImmediate(op1))
534 fail1 = emitJumpIfNotJSCell(regT0);
535
536 Jump fail2;
537 if (!m_codeBlock->isKnownNotImmediate(op2))
538 fail2 = emitJumpIfNotJSCell(regT1);
539
540 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
541 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
542 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT0, X86::xmm0);
543 m_assembler.movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), regT1, X86::xmm1);
544#endif
545
546 m_assembler.ucomisd_rr(X86::xmm0, X86::xmm1);
547 emitJumpSlowToHot(Jump::Jump(m_assembler.jb()), target + 3);
548
549 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
550
551#if USE(ALTERNATE_JSIMMEDIATE)
552 fail1.link(this);
553 fail2.link(this);
554 fail3.link(this);
555#else
556 if (!m_codeBlock->isKnownNotImmediate(op1))
557 fail1.link(this);
558 if (!m_codeBlock->isKnownNotImmediate(op2))
559 fail2.link(this);
560 fail3.link(this);
561 fail4.link(this);
562#endif
563 }
564
565 linkSlowCase(iter);
566 emitPutJITStubArg(regT0, 1);
567 emitPutJITStubArg(regT1, 2);
568 emitCTICall(JITStubs::cti_op_jlesseq);
569 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
570 }
571}
572
573void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
574{
575 if (isOperandConstantImmediateInt(op1)) {
576 emitGetVirtualRegister(op2, regT0);
577 emitJumpSlowCaseIfNotImmediateInteger(regT0);
578#if USE(ALTERNATE_JSIMMEDIATE)
579 int32_t imm = getConstantOperandImmediateInt(op1);
580 andPtr(Imm32(imm), regT0);
581 if (imm >= 0)
582 emitFastArithIntToImmNoCheck(regT0, regT0);
583#else
584 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
585#endif
586 } else if (isOperandConstantImmediateInt(op2)) {
587 emitGetVirtualRegister(op1, regT0);
588 emitJumpSlowCaseIfNotImmediateInteger(regT0);
589#if USE(ALTERNATE_JSIMMEDIATE)
590 int32_t imm = getConstantOperandImmediateInt(op2);
591 andPtr(Imm32(imm), regT0);
592 if (imm >= 0)
593 emitFastArithIntToImmNoCheck(regT0, regT0);
594#else
595 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
596#endif
597 } else {
598 emitGetVirtualRegisters(op1, regT0, op2, regT1);
599 andPtr(regT1, regT0);
600 emitJumpSlowCaseIfNotImmediateInteger(regT0);
601 }
602 emitPutVirtualRegister(result);
603}
604void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
605{
606 linkSlowCase(iter);
607 if (isOperandConstantImmediateInt(op1)) {
608 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
609 emitPutJITStubArg(regT0, 2);
610 } else if (isOperandConstantImmediateInt(op2)) {
611 emitPutJITStubArg(regT0, 1);
612 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
613 } else {
614 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
615 emitPutJITStubArg(regT1, 2);
616 }
617 emitCTICall(JITStubs::cti_op_bitand);
618 emitPutVirtualRegister(result);
619}
620
621#if PLATFORM(X86) || PLATFORM(X86_64)
622void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
623{
624 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
625 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
626 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
627#if USE(ALTERNATE_JSIMMEDIATE)
628 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
629 m_assembler.cdq();
630 m_assembler.idivl_r(X86::ecx);
631#else
632 emitFastArithDeTagImmediate(X86::eax);
633 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
634 m_assembler.cdq();
635 m_assembler.idivl_r(X86::ecx);
636 signExtend32ToPtr(X86::edx, X86::edx);
637#endif
638 emitFastArithReTagImmediate(X86::edx, X86::eax);
639 emitPutVirtualRegister(result);
640}
641void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
642{
643#if USE(ALTERNATE_JSIMMEDIATE)
644 linkSlowCase(iter);
645 linkSlowCase(iter);
646 linkSlowCase(iter);
647#else
648 Jump notImm1 = getSlowCase(iter);
649 Jump notImm2 = getSlowCase(iter);
650 linkSlowCase(iter);
651 emitFastArithReTagImmediate(X86::eax, X86::eax);
652 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
653 notImm1.link(this);
654 notImm2.link(this);
655#endif
656 emitPutJITStubArg(X86::eax, 1);
657 emitPutJITStubArg(X86::ecx, 2);
658 emitCTICall(JITStubs::cti_op_mod);
659 emitPutVirtualRegister(result);
660}
661#else
662void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
663{
664 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
665 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
666 emitCTICall(JITStubs::cti_op_mod);
667 emitPutVirtualRegister(result);
668}
669void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
670{
671 ASSERT_NOT_REACHED();
672}
673#endif
674
675void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
676{
677 emitGetVirtualRegister(srcDst, regT0);
678 move(regT0, regT1);
679 emitJumpSlowCaseIfNotImmediateInteger(regT0);
680#if USE(ALTERNATE_JSIMMEDIATE)
681 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
682 emitFastArithIntToImmNoCheck(regT1, regT1);
683#else
684 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
685 signExtend32ToPtr(regT1, regT1);
686#endif
687 emitPutVirtualRegister(srcDst, regT1);
688 emitPutVirtualRegister(result);
689}
690void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
691{
692 linkSlowCase(iter);
693 linkSlowCase(iter);
694 emitPutJITStubArg(regT0, 1);
695 emitCTICall(JITStubs::cti_op_post_inc);
696 emitPutVirtualRegister(srcDst, regT1);
697 emitPutVirtualRegister(result);
698}
699
700void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
701{
702 emitGetVirtualRegister(srcDst, regT0);
703 move(regT0, regT1);
704 emitJumpSlowCaseIfNotImmediateInteger(regT0);
705#if USE(ALTERNATE_JSIMMEDIATE)
706 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
707 emitFastArithIntToImmNoCheck(regT1, regT1);
708#else
709 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
710 signExtend32ToPtr(regT1, regT1);
711#endif
712 emitPutVirtualRegister(srcDst, regT1);
713 emitPutVirtualRegister(result);
714}
715void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
716{
717 linkSlowCase(iter);
718 linkSlowCase(iter);
719 emitPutJITStubArg(regT0, 1);
720 emitCTICall(JITStubs::cti_op_post_dec);
721 emitPutVirtualRegister(srcDst, regT1);
722 emitPutVirtualRegister(result);
723}
724
725void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
726{
727 emitGetVirtualRegister(srcDst, regT0);
728 emitJumpSlowCaseIfNotImmediateInteger(regT0);
729#if USE(ALTERNATE_JSIMMEDIATE)
730 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
731 emitFastArithIntToImmNoCheck(regT0, regT0);
732#else
733 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
734 signExtend32ToPtr(regT0, regT0);
735#endif
736 emitPutVirtualRegister(srcDst);
737}
738void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
739{
740 Jump notImm = getSlowCase(iter);
741 linkSlowCase(iter);
742 emitGetVirtualRegister(srcDst, regT0);
743 notImm.link(this);
744 emitPutJITStubArg(regT0, 1);
745 emitCTICall(JITStubs::cti_op_pre_inc);
746 emitPutVirtualRegister(srcDst);
747}
748
749void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
750{
751 emitGetVirtualRegister(srcDst, regT0);
752 emitJumpSlowCaseIfNotImmediateInteger(regT0);
753#if USE(ALTERNATE_JSIMMEDIATE)
754 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
755 emitFastArithIntToImmNoCheck(regT0, regT0);
756#else
757 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
758 signExtend32ToPtr(regT0, regT0);
759#endif
760 emitPutVirtualRegister(srcDst);
761}
762void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
763{
764 Jump notImm = getSlowCase(iter);
765 linkSlowCase(iter);
766 emitGetVirtualRegister(srcDst, regT0);
767 notImm.link(this);
768 emitPutJITStubArg(regT0, 1);
769 emitCTICall(JITStubs::cti_op_pre_dec);
770 emitPutVirtualRegister(srcDst);
771}
772
773
774#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
775
776void JIT::compileFastArith_op_add(Instruction* currentInstruction)
777{
778 unsigned result = currentInstruction[1].u.operand;
779 unsigned op1 = currentInstruction[2].u.operand;
780 unsigned op2 = currentInstruction[3].u.operand;
781
782 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
783 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
784 emitCTICall(JITStubs::cti_op_add);
785 emitPutVirtualRegister(result);
786}
787void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
788{
789 ASSERT_NOT_REACHED();
790}
791
792void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
793{
794 unsigned result = currentInstruction[1].u.operand;
795 unsigned op1 = currentInstruction[2].u.operand;
796 unsigned op2 = currentInstruction[3].u.operand;
797
798 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
799 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
800 emitCTICall(JITStubs::cti_op_mul);
801 emitPutVirtualRegister(result);
802}
803void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
804{
805 ASSERT_NOT_REACHED();
806}
807
808void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
809{
810 unsigned result = currentInstruction[1].u.operand;
811 unsigned op1 = currentInstruction[2].u.operand;
812 unsigned op2 = currentInstruction[3].u.operand;
813
814 emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
815 emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
816 emitCTICall(JITStubs::cti_op_sub);
817 emitPutVirtualRegister(result);
818}
819void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
820{
821 ASSERT_NOT_REACHED();
822}
823
824#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
825
826void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
827{
828 emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
829 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
830 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
831 if (opcodeID == op_add)
832 addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
833 else if (opcodeID == op_sub)
834 addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
835 else {
836 ASSERT(opcodeID == op_mul);
837 addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
838 addSlowCase(branchTest32(Zero, X86::eax));
839 }
840 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
841}
842
843void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types)
844{
845 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
846 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
847
848 Jump notImm1 = getSlowCase(iter);
849 Jump notImm2 = getSlowCase(iter);
850
851 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
852 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
853 linkSlowCase(iter);
854 emitGetVirtualRegister(op1, X86::eax);
855
856 Label stubFunctionCall(this);
857 emitPutJITStubArg(X86::eax, 1);
858 emitPutJITStubArg(X86::edx, 2);
859 if (opcodeID == op_add)
860 emitCTICall(JITStubs::cti_op_add);
861 else if (opcodeID == op_sub)
862 emitCTICall(JITStubs::cti_op_sub);
863 else {
864 ASSERT(opcodeID == op_mul);
865 emitCTICall(JITStubs::cti_op_mul);
866 }
867 Jump end = jump();
868
869 // if we get here, eax is not an int32, edx not yet checked.
870 notImm1.link(this);
871 if (!types.first().definitelyIsNumber())
872 emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
873 if (!types.second().definitelyIsNumber())
874 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
875 addPtr(tagTypeNumberRegister, X86::eax);
876 m_assembler.movq_rr(X86::eax, X86::xmm1);
877 Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
878 m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
879 Jump op2wasInteger = jump();
880
881 // if we get here, eax IS an int32, edx is not.
882 notImm2.link(this);
883 if (!types.second().definitelyIsNumber())
884 emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
885 m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
886 op2isDouble.link(this);
887 addPtr(tagTypeNumberRegister, X86::edx);
888 m_assembler.movq_rr(X86::edx, X86::xmm2);
889 op2wasInteger.link(this);
890
891 if (opcodeID == op_add)
892 m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
893 else if (opcodeID == op_sub)
894 m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
895 else {
896 ASSERT(opcodeID == op_mul);
897 m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
898 }
899 m_assembler.movq_rr(X86::xmm1, X86::eax);
900 subPtr(tagTypeNumberRegister, X86::eax);
901
902 end.link(this);
903}
904
905void JIT::compileFastArith_op_add(Instruction* currentInstruction)
906{
907 unsigned result = currentInstruction[1].u.operand;
908 unsigned op1 = currentInstruction[2].u.operand;
909 unsigned op2 = currentInstruction[3].u.operand;
910 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
911
912 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
913 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
914 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
915 emitCTICall(JITStubs::cti_op_add);
916 emitPutVirtualRegister(result);
917 return;
918 }
919
920 if (isOperandConstantImmediateInt(op1)) {
921 emitGetVirtualRegister(op2, X86::eax);
922 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
923 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
924 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
925 } else if (isOperandConstantImmediateInt(op2)) {
926 emitGetVirtualRegister(op1, X86::eax);
927 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
928 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
929 emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
930 } else
931 compileBinaryArithOp(op_add, result, op1, op2, types);
932
933 emitPutVirtualRegister(result);
934}
935void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
936{
937 unsigned result = currentInstruction[1].u.operand;
938 unsigned op1 = currentInstruction[2].u.operand;
939 unsigned op2 = currentInstruction[3].u.operand;
940 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
941
942 if (isOperandConstantImmediateInt(op1)) {
943 linkSlowCase(iter);
944 linkSlowCase(iter);
945 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
946 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
947 emitCTICall(JITStubs::cti_op_add);
948 } else if (isOperandConstantImmediateInt(op2)) {
949 linkSlowCase(iter);
950 linkSlowCase(iter);
951 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
952 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
953 emitCTICall(JITStubs::cti_op_add);
954 } else
955 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
956
957 emitPutVirtualRegister(result);
958}
959
960void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
961{
962 unsigned result = currentInstruction[1].u.operand;
963 unsigned op1 = currentInstruction[2].u.operand;
964 unsigned op2 = currentInstruction[3].u.operand;
965 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
966
967 // For now, only plant a fast int case if the constant operand is greater than zero.
968 int32_t value;
969 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
970 emitGetVirtualRegister(op2, X86::eax);
971 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
972 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
973 emitFastArithReTagImmediate(X86::eax, X86::eax);
974 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
975 emitGetVirtualRegister(op1, X86::eax);
976 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
977 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
978 emitFastArithReTagImmediate(X86::eax, X86::eax);
979 } else
980 compileBinaryArithOp(op_mul, result, op1, op2, types);
981
982 emitPutVirtualRegister(result);
983}
984void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
985{
986 unsigned result = currentInstruction[1].u.operand;
987 unsigned op1 = currentInstruction[2].u.operand;
988 unsigned op2 = currentInstruction[3].u.operand;
989 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
990
991 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
992 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
993 linkSlowCase(iter);
994 linkSlowCase(iter);
995 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
996 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
997 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
998 emitCTICall(JITStubs::cti_op_mul);
999 } else
1000 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
1001
1002 emitPutVirtualRegister(result);
1003}
1004
1005void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
1006{
1007 unsigned result = currentInstruction[1].u.operand;
1008 unsigned op1 = currentInstruction[2].u.operand;
1009 unsigned op2 = currentInstruction[3].u.operand;
1010 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1011
1012 compileBinaryArithOp(op_sub, result, op1, op2, types);
1013
1014 emitPutVirtualRegister(result);
1015}
1016void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1017{
1018 unsigned result = currentInstruction[1].u.operand;
1019 unsigned op1 = currentInstruction[2].u.operand;
1020 unsigned op2 = currentInstruction[3].u.operand;
1021 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1022
1023 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
1024
1025 emitPutVirtualRegister(result);
1026}
1027
1028#else
1029
1030typedef X86Assembler::JmpSrc JmpSrc;
1031typedef X86Assembler::JmpDst JmpDst;
1032typedef X86Assembler::XMMRegisterID XMMRegisterID;
1033
1034
1035void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1036{
1037 Structure* numberStructure = m_globalData->numberStructure.get();
1038 JmpSrc wasJSNumberCell1;
1039 JmpSrc wasJSNumberCell2;
1040
1041 emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
1042
1043 if (types.second().isReusable() && isSSE2Present()) {
1044 ASSERT(types.second().mightBeNumber());
1045
1046 // Check op2 is a number
1047 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
1048 JmpSrc op2imm = __ jne();
1049 if (!types.second().definitelyIsNumber()) {
1050 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
1051 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
1052 addSlowCase(__ jne());
1053 }
1054
1055 // (1) In this case src2 is a reusable number cell.
1056 // Slow case if src1 is not a number type.
1057 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
1058 JmpSrc op1imm = __ jne();
1059 if (!types.first().definitelyIsNumber()) {
1060 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
1061 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
1062 addSlowCase(__ jne());
1063 }
1064
1065 // (1a) if we get here, src1 is also a number cell
1066 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
1067 JmpSrc loadedDouble = __ jmp();
1068 // (1b) if we get here, src1 is an immediate
1069 __ linkJump(op1imm, __ label());
1070 emitFastArithImmToInt(X86::eax);
1071 __ cvtsi2sd_rr(X86::eax, X86::xmm0);
1072 // (1c)
1073 __ linkJump(loadedDouble, __ label());
1074 if (opcodeID == op_add)
1075 __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1076 else if (opcodeID == op_sub)
1077 __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1078 else {
1079 ASSERT(opcodeID == op_mul);
1080 __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
1081 }
1082
1083 // Store the result to the JSNumberCell and jump.
1084 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx);
1085 __ movl_rr(X86::edx, X86::eax);
1086 emitPutVirtualRegister(dst);
1087 wasJSNumberCell2 = __ jmp();
1088
1089 // (2) This handles cases where src2 is an immediate number.
1090 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
1091 __ linkJump(op2imm, __ label());
1092 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1093 } else if (types.first().isReusable() && isSSE2Present()) {
1094 ASSERT(types.first().mightBeNumber());
1095
1096 // Check op1 is a number
1097 __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
1098 JmpSrc op1imm = __ jne();
1099 if (!types.first().definitelyIsNumber()) {
1100 emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
1101 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
1102 addSlowCase(__ jne());
1103 }
1104
1105 // (1) In this case src1 is a reusable number cell.
1106 // Slow case if src2 is not a number type.
1107 __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
1108 JmpSrc op2imm = __ jne();
1109 if (!types.second().definitelyIsNumber()) {
1110 emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
1111 __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
1112 addSlowCase(__ jne());
1113 }
1114
1115 // (1a) if we get here, src2 is also a number cell
1116 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
1117 JmpSrc loadedDouble = __ jmp();
1118 // (1b) if we get here, src2 is an immediate
1119 __ linkJump(op2imm, __ label());
1120 emitFastArithImmToInt(X86::edx);
1121 __ cvtsi2sd_rr(X86::edx, X86::xmm1);
1122 // (1c)
1123 __ linkJump(loadedDouble, __ label());
1124 __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
1125 if (opcodeID == op_add)
1126 __ addsd_rr(X86::xmm1, X86::xmm0);
1127 else if (opcodeID == op_sub)
1128 __ subsd_rr(X86::xmm1, X86::xmm0);
1129 else {
1130 ASSERT(opcodeID == op_mul);
1131 __ mulsd_rr(X86::xmm1, X86::xmm0);
1132 }
1133 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
1134 emitPutVirtualRegister(dst);
1135
1136 // Store the result to the JSNumberCell and jump.
1137 __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
1138 emitPutVirtualRegister(dst);
1139 wasJSNumberCell1 = __ jmp();
1140
1141 // (2) This handles cases where src1 is an immediate number.
1142 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
1143 __ linkJump(op1imm, __ label());
1144 emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
1145 } else
1146 emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
1147
1148 if (opcodeID == op_add) {
1149 emitFastArithDeTagImmediate(X86::eax);
1150 __ addl_rr(X86::edx, X86::eax);
1151 addSlowCase(__ jo());
1152 } else if (opcodeID == op_sub) {
1153 __ subl_rr(X86::edx, X86::eax);
1154 addSlowCase(__ jo());
1155 signExtend32ToPtr(X86::eax, X86::eax);
1156 emitFastArithReTagImmediate(X86::eax, X86::eax);
1157 } else {
1158 ASSERT(opcodeID == op_mul);
1159 // convert eax & edx from JSImmediates to ints, and check if either are zero
1160 emitFastArithImmToInt(X86::edx);
1161 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
1162 __ testl_rr(X86::edx, X86::edx);
1163 JmpSrc op2NonZero = __ jne();
1164 op1Zero.link(this);
1165 // if either input is zero, add the two together, and check if the result is < 0.
1166 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
1167 __ movl_rr(X86::eax, X86::ecx);
1168 __ addl_rr(X86::edx, X86::ecx);
1169 addSlowCase(__ js());
1170 // Skip the above check if neither input is zero
1171 __ linkJump(op2NonZero, __ label());
1172 __ imull_rr(X86::edx, X86::eax);
1173 addSlowCase(__ jo());
1174 signExtend32ToPtr(X86::eax, X86::eax);
1175 emitFastArithReTagImmediate(X86::eax, X86::eax);
1176 }
1177 emitPutVirtualRegister(dst);
1178
1179 if (types.second().isReusable() && isSSE2Present()) {
1180 __ linkJump(wasJSNumberCell2, __ label());
1181 }
1182 else if (types.first().isReusable() && isSSE2Present()) {
1183 __ linkJump(wasJSNumberCell1, __ label());
1184 }
1185}
1186
1187void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1188{
1189 linkSlowCase(iter);
1190 if (types.second().isReusable() && isSSE2Present()) {
1191 if (!types.first().definitelyIsNumber()) {
1192 linkSlowCaseIfNotJSCell(iter, src1);
1193 linkSlowCase(iter);
1194 }
1195 if (!types.second().definitelyIsNumber()) {
1196 linkSlowCaseIfNotJSCell(iter, src2);
1197 linkSlowCase(iter);
1198 }
1199 } else if (types.first().isReusable() && isSSE2Present()) {
1200 if (!types.first().definitelyIsNumber()) {
1201 linkSlowCaseIfNotJSCell(iter, src1);
1202 linkSlowCase(iter);
1203 }
1204 if (!types.second().definitelyIsNumber()) {
1205 linkSlowCaseIfNotJSCell(iter, src2);
1206 linkSlowCase(iter);
1207 }
1208 }
1209 linkSlowCase(iter);
1210
1211 // additional entry point to handle -0 cases.
1212 if (opcodeID == op_mul)
1213 linkSlowCase(iter);
1214
1215 emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
1216 emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
1217 if (opcodeID == op_add)
1218 emitCTICall(JITStubs::cti_op_add);
1219 else if (opcodeID == op_sub)
1220 emitCTICall(JITStubs::cti_op_sub);
1221 else {
1222 ASSERT(opcodeID == op_mul);
1223 emitCTICall(JITStubs::cti_op_mul);
1224 }
1225 emitPutVirtualRegister(dst);
1226}
1227
1228void JIT::compileFastArith_op_add(Instruction* currentInstruction)
1229{
1230 unsigned result = currentInstruction[1].u.operand;
1231 unsigned op1 = currentInstruction[2].u.operand;
1232 unsigned op2 = currentInstruction[3].u.operand;
1233
1234 if (isOperandConstantImmediateInt(op1)) {
1235 emitGetVirtualRegister(op2, X86::eax);
1236 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1237 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
1238 signExtend32ToPtr(X86::eax, X86::eax);
1239 emitPutVirtualRegister(result);
1240 } else if (isOperandConstantImmediateInt(op2)) {
1241 emitGetVirtualRegister(op1, X86::eax);
1242 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1243 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
1244 signExtend32ToPtr(X86::eax, X86::eax);
1245 emitPutVirtualRegister(result);
1246 } else {
1247 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1248 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1249 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1250 else {
1251 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1252 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1253 emitCTICall(JITStubs::cti_op_add);
1254 emitPutVirtualRegister(result);
1255 }
1256 }
1257}
1258void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1259{
1260 unsigned result = currentInstruction[1].u.operand;
1261 unsigned op1 = currentInstruction[2].u.operand;
1262 unsigned op2 = currentInstruction[3].u.operand;
1263
1264 if (isOperandConstantImmediateInt(op1)) {
1265 Jump notImm = getSlowCase(iter);
1266 linkSlowCase(iter);
1267 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
1268 notImm.link(this);
1269 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1270 emitPutJITStubArg(X86::eax, 2);
1271 emitCTICall(JITStubs::cti_op_add);
1272 emitPutVirtualRegister(result);
1273 } else if (isOperandConstantImmediateInt(op2)) {
1274 Jump notImm = getSlowCase(iter);
1275 linkSlowCase(iter);
1276 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
1277 notImm.link(this);
1278 emitPutJITStubArg(X86::eax, 1);
1279 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1280 emitCTICall(JITStubs::cti_op_add);
1281 emitPutVirtualRegister(result);
1282 } else {
1283 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1284 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
1285 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
1286 }
1287}
1288
1289void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
1290{
1291 unsigned result = currentInstruction[1].u.operand;
1292 unsigned op1 = currentInstruction[2].u.operand;
1293 unsigned op2 = currentInstruction[3].u.operand;
1294
1295 // For now, only plant a fast int case if the constant operand is greater than zero.
1296 int32_t value;
1297 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1298 emitGetVirtualRegister(op2, X86::eax);
1299 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1300 emitFastArithDeTagImmediate(X86::eax);
1301 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1302 signExtend32ToPtr(X86::eax, X86::eax);
1303 emitFastArithReTagImmediate(X86::eax, X86::eax);
1304 emitPutVirtualRegister(result);
1305 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1306 emitGetVirtualRegister(op1, X86::eax);
1307 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1308 emitFastArithDeTagImmediate(X86::eax);
1309 addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
1310 signExtend32ToPtr(X86::eax, X86::eax);
1311 emitFastArithReTagImmediate(X86::eax, X86::eax);
1312 emitPutVirtualRegister(result);
1313 } else
1314 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1315}
1316void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1317{
1318 unsigned result = currentInstruction[1].u.operand;
1319 unsigned op1 = currentInstruction[2].u.operand;
1320 unsigned op2 = currentInstruction[3].u.operand;
1321
1322 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1323 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1324 linkSlowCase(iter);
1325 linkSlowCase(iter);
1326 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1327 emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
1328 emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
1329 emitCTICall(JITStubs::cti_op_mul);
1330 emitPutVirtualRegister(result);
1331 } else
1332 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1333}
1334
1335void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
1336{
1337 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1338}
1339void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1340{
1341 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1342}
1343
1344#endif
1345
1346} // namespace JSC
1347
1348#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.