source: webkit/trunk/JavaScriptCore/jit/JITArithmetic.cpp@ 43781

Last change on this file since 43781 was 43781, checked in by [email protected], 16 years ago

2009-05-15 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Add FP support to the MacroAssembler, port JITArithmetic over to make use of this. Also add
API to determine whether FP support is available 'MacroAssembler::supportsFloatingPoint()',
FP is presently only supported on SSE2 platforms, not x87. On platforms where a suitable
hardware FPU is not available 'supportsFloatingPoint()' may simply return false, and all
other methods ASSERT_NOT_REACHED().

  • assembler/AbstractMacroAssembler.h:
  • assembler/MacroAssemblerX86.h: (JSC::MacroAssemblerX86::MacroAssemblerX86): (JSC::MacroAssemblerX86::branch32): (JSC::MacroAssemblerX86::branchPtrWithPatch): (JSC::MacroAssemblerX86::supportsFloatingPoint):
  • assembler/MacroAssemblerX86Common.h: (JSC::MacroAssemblerX86Common::): (JSC::MacroAssemblerX86Common::loadDouble): (JSC::MacroAssemblerX86Common::storeDouble): (JSC::MacroAssemblerX86Common::addDouble): (JSC::MacroAssemblerX86Common::subDouble): (JSC::MacroAssemblerX86Common::mulDouble): (JSC::MacroAssemblerX86Common::convertInt32ToDouble): (JSC::MacroAssemblerX86Common::branchDouble): (JSC::MacroAssemblerX86Common::branchTruncateDoubleToInt32): (JSC::MacroAssemblerX86Common::branch32): (JSC::MacroAssemblerX86Common::branch16): (JSC::MacroAssemblerX86Common::branchTest32): (JSC::MacroAssemblerX86Common::branchAdd32): (JSC::MacroAssemblerX86Common::branchMul32): (JSC::MacroAssemblerX86Common::branchSub32): (JSC::MacroAssemblerX86Common::set32): (JSC::MacroAssemblerX86Common::setTest32): (JSC::MacroAssemblerX86Common::x86Condition): (JSC::MacroAssemblerX86Common::isSSE2Present):
  • assembler/MacroAssemblerX86_64.h: (JSC::MacroAssemblerX86_64::movePtrToDouble): (JSC::MacroAssemblerX86_64::moveDoubleToPtr): (JSC::MacroAssemblerX86_64::setPtr): (JSC::MacroAssemblerX86_64::branchPtr): (JSC::MacroAssemblerX86_64::branchTestPtr): (JSC::MacroAssemblerX86_64::branchAddPtr): (JSC::MacroAssemblerX86_64::branchSubPtr): (JSC::MacroAssemblerX86_64::supportsFloatingPoint):
  • assembler/X86Assembler.h:
  • jit/JIT.cpp: (JSC::JIT::JIT):
  • jit/JIT.h:
  • jit/JITArithmetic.cpp: (JSC::JIT::emit_op_rshift): (JSC::JIT::emitSlow_op_rshift): (JSC::JIT::emitSlow_op_jnless): (JSC::JIT::emitSlow_op_jnlesseq): (JSC::JIT::compileBinaryArithOp): (JSC::JIT::compileBinaryArithOpSlowCase): (JSC::JIT::emit_op_add): (JSC::JIT::emitSlow_op_add): (JSC::JIT::emit_op_mul): (JSC::JIT::emitSlow_op_mul):
  • jit/JITPropertyAccess.cpp: (JSC::JIT::privateCompilePutByIdTransition):
File size: 50.9 KB
Line 
1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JITStubCall.h"
34#include "JSArray.h"
35#include "JSFunction.h"
36#include "Interpreter.h"
37#include "ResultType.h"
38#include "SamplingTool.h"
39
40#ifndef NDEBUG
41#include <stdio.h>
42#endif
43
44
45using namespace std;
46
47namespace JSC {
48
49void JIT::emit_op_lshift(Instruction* currentInstruction)
50{
51 unsigned result = currentInstruction[1].u.operand;
52 unsigned op1 = currentInstruction[2].u.operand;
53 unsigned op2 = currentInstruction[3].u.operand;
54
55 emitGetVirtualRegisters(op1, regT0, op2, regT2);
56 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
57 emitJumpSlowCaseIfNotImmediateInteger(regT0);
58 emitJumpSlowCaseIfNotImmediateInteger(regT2);
59 emitFastArithImmToInt(regT0);
60 emitFastArithImmToInt(regT2);
61#if !PLATFORM(X86)
62 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
63 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
64 and32(Imm32(0x1f), regT2);
65#endif
66 lshift32(regT2, regT0);
67#if !USE(ALTERNATE_JSIMMEDIATE)
68 addSlowCase(branchAdd32(Overflow, regT0, regT0));
69 signExtend32ToPtr(regT0, regT0);
70#endif
71 emitFastArithReTagImmediate(regT0, regT0);
72 emitPutVirtualRegister(result);
73}
74
75void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
76{
77 unsigned result = currentInstruction[1].u.operand;
78 unsigned op1 = currentInstruction[2].u.operand;
79 unsigned op2 = currentInstruction[3].u.operand;
80
81#if USE(ALTERNATE_JSIMMEDIATE)
82 UNUSED_PARAM(op1);
83 UNUSED_PARAM(op2);
84 linkSlowCase(iter);
85 linkSlowCase(iter);
86#else
87 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
88 Jump notImm1 = getSlowCase(iter);
89 Jump notImm2 = getSlowCase(iter);
90 linkSlowCase(iter);
91 emitGetVirtualRegisters(op1, regT0, op2, regT2);
92 notImm1.link(this);
93 notImm2.link(this);
94#endif
95 JITStubCall stubCall(this, JITStubs::cti_op_lshift);
96 stubCall.addArgument(regT0);
97 stubCall.addArgument(regT2);
98 stubCall.call(result);
99}
100
101void JIT::emit_op_rshift(Instruction* currentInstruction)
102{
103 unsigned result = currentInstruction[1].u.operand;
104 unsigned op1 = currentInstruction[2].u.operand;
105 unsigned op2 = currentInstruction[3].u.operand;
106
107 if (isOperandConstantImmediateInt(op2)) {
108 emitGetVirtualRegister(op1, regT0);
109 emitJumpSlowCaseIfNotImmediateInteger(regT0);
110 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
111#if USE(ALTERNATE_JSIMMEDIATE)
112 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
113#else
114 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
115#endif
116 } else {
117 emitGetVirtualRegisters(op1, regT0, op2, regT2);
118 if (supportsFloatingPoint()) {
119 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
120#if USE(ALTERNATE_JSIMMEDIATE)
121 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
122 movePtrToDouble(regT0, fpRegT0);
123#else
124 emitJumpSlowCaseIfNotJSCell(regT0, op1);
125 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
126 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
127#endif
128 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
129#if !USE(ALTERNATE_JSIMMEDIATE)
130 addSlowCase(branchAdd32(Overflow, regT0, regT0));
131#endif
132 lhsIsInt.link(this);
133 } else
134 emitJumpSlowCaseIfNotImmediateInteger(regT0);
135 emitJumpSlowCaseIfNotImmediateInteger(regT2);
136 emitFastArithImmToInt(regT2);
137#if !PLATFORM(X86)
138 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
139 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
140 and32(Imm32(0x1f), regT2);
141#endif
142#if USE(ALTERNATE_JSIMMEDIATE)
143 rshift32(regT2, regT0);
144#else
145 rshiftPtr(regT2, regT0);
146#endif
147 }
148#if USE(ALTERNATE_JSIMMEDIATE)
149 emitFastArithIntToImmNoCheck(regT0, regT0);
150#else
151 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
152#endif
153 emitPutVirtualRegister(result);
154}
155
156void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
157{
158 unsigned result = currentInstruction[1].u.operand;
159 unsigned op1 = currentInstruction[2].u.operand;
160 unsigned op2 = currentInstruction[3].u.operand;
161
162 linkSlowCase(iter);
163 JITStubCall stubCall(this, JITStubs::cti_op_rshift);
164
165 if (isOperandConstantImmediateInt(op2)) {
166 stubCall.addArgument(regT0);
167 stubCall.addArgument(op2, regT2);
168 } else {
169 if (supportsFloatingPoint()) {
170#if USE(ALTERNATE_JSIMMEDIATE)
171 linkSlowCase(iter);
172#else
173 linkSlowCaseIfNotJSCell(iter, op1);
174 linkSlowCase(iter);
175 linkSlowCase(iter);
176#endif
177 linkSlowCase(iter);
178 // We're reloading op1 to regT0 as we can no longer guarantee that
179 // we have not munged the operand. It may have already been shifted
180 // correctly, but it still will not have been tagged.
181 stubCall.addArgument(op1, regT0);
182 stubCall.addArgument(regT2);
183 } else {
184 linkSlowCase(iter);
185 linkSlowCase(iter);
186 stubCall.addArgument(regT0);
187 stubCall.addArgument(regT2);
188 }
189 }
190
191 stubCall.call(result);
192}
193
194void JIT::emit_op_jnless(Instruction* currentInstruction)
195{
196 unsigned op1 = currentInstruction[1].u.operand;
197 unsigned op2 = currentInstruction[2].u.operand;
198 unsigned target = currentInstruction[3].u.operand;
199
200 // We generate inline code for the following cases in the fast path:
201 // - int immediate to constant int immediate
202 // - constant int immediate to int immediate
203 // - int immediate to int immediate
204
205 if (isOperandConstantImmediateInt(op2)) {
206 emitGetVirtualRegister(op1, regT0);
207 emitJumpSlowCaseIfNotImmediateInteger(regT0);
208#if USE(ALTERNATE_JSIMMEDIATE)
209 int32_t op2imm = getConstantOperandImmediateInt(op2);
210#else
211 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
212#endif
213 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
214 } else if (isOperandConstantImmediateInt(op1)) {
215 emitGetVirtualRegister(op2, regT1);
216 emitJumpSlowCaseIfNotImmediateInteger(regT1);
217#if USE(ALTERNATE_JSIMMEDIATE)
218 int32_t op1imm = getConstantOperandImmediateInt(op1);
219#else
220 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
221#endif
222 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
223 } else {
224 emitGetVirtualRegisters(op1, regT0, op2, regT1);
225 emitJumpSlowCaseIfNotImmediateInteger(regT0);
226 emitJumpSlowCaseIfNotImmediateInteger(regT1);
227
228 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
229 }
230}
231
232void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
233{
234 unsigned op1 = currentInstruction[1].u.operand;
235 unsigned op2 = currentInstruction[2].u.operand;
236 unsigned target = currentInstruction[3].u.operand;
237
238 // We generate inline code for the following cases in the slow path:
239 // - floating-point number to constant int immediate
240 // - constant int immediate to floating-point number
241 // - floating-point number to floating-point number.
242
243 if (isOperandConstantImmediateInt(op2)) {
244 linkSlowCase(iter);
245
246 if (supportsFloatingPoint()) {
247#if USE(ALTERNATE_JSIMMEDIATE)
248 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
249 addPtr(tagTypeNumberRegister, regT0);
250 movePtrToDouble(regT0, fpRegT0);
251#else
252 Jump fail1;
253 if (!m_codeBlock->isKnownNotImmediate(op1))
254 fail1 = emitJumpIfNotJSCell(regT0);
255
256 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
257 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
258#endif
259
260 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
261
262 move(Imm32(op2imm), regT1);
263 convertInt32ToDouble(regT1, fpRegT1);
264
265 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
266
267 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
268
269#if USE(ALTERNATE_JSIMMEDIATE)
270 fail1.link(this);
271#else
272 if (!m_codeBlock->isKnownNotImmediate(op1))
273 fail1.link(this);
274 fail2.link(this);
275#endif
276 }
277
278 JITStubCall stubCall(this, JITStubs::cti_op_jless);
279 stubCall.addArgument(regT0);
280 stubCall.addArgument(op2, regT2);
281 stubCall.call();
282 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
283
284 } else if (isOperandConstantImmediateInt(op1)) {
285 linkSlowCase(iter);
286
287 if (supportsFloatingPoint()) {
288#if USE(ALTERNATE_JSIMMEDIATE)
289 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
290 addPtr(tagTypeNumberRegister, regT1);
291 movePtrToDouble(regT1, fpRegT1);
292#else
293 Jump fail1;
294 if (!m_codeBlock->isKnownNotImmediate(op2))
295 fail1 = emitJumpIfNotJSCell(regT1);
296
297 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
298 loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
299#endif
300
301 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
302
303 move(Imm32(op1imm), regT0);
304 convertInt32ToDouble(regT0, fpRegT0);
305
306 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
307
308 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
309
310#if USE(ALTERNATE_JSIMMEDIATE)
311 fail1.link(this);
312#else
313 if (!m_codeBlock->isKnownNotImmediate(op2))
314 fail1.link(this);
315 fail2.link(this);
316#endif
317 }
318
319 JITStubCall stubCall(this, JITStubs::cti_op_jless);
320 stubCall.addArgument(op1, regT2);
321 stubCall.addArgument(regT1);
322 stubCall.call();
323 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
324
325 } else {
326 linkSlowCase(iter);
327
328 if (supportsFloatingPoint()) {
329#if USE(ALTERNATE_JSIMMEDIATE)
330 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
331 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
332 Jump fail3 = emitJumpIfImmediateInteger(regT1);
333 addPtr(tagTypeNumberRegister, regT0);
334 addPtr(tagTypeNumberRegister, regT1);
335 movePtrToDouble(regT0, fpRegT0);
336 movePtrToDouble(regT1, fpRegT1);
337#else
338 Jump fail1;
339 if (!m_codeBlock->isKnownNotImmediate(op1))
340 fail1 = emitJumpIfNotJSCell(regT0);
341
342 Jump fail2;
343 if (!m_codeBlock->isKnownNotImmediate(op2))
344 fail2 = emitJumpIfNotJSCell(regT1);
345
346 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
347 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
348 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
349 loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
350#endif
351
352 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
353
354 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
355
356#if USE(ALTERNATE_JSIMMEDIATE)
357 fail1.link(this);
358 fail2.link(this);
359 fail3.link(this);
360#else
361 if (!m_codeBlock->isKnownNotImmediate(op1))
362 fail1.link(this);
363 if (!m_codeBlock->isKnownNotImmediate(op2))
364 fail2.link(this);
365 fail3.link(this);
366 fail4.link(this);
367#endif
368 }
369
370 linkSlowCase(iter);
371 JITStubCall stubCall(this, JITStubs::cti_op_jless);
372 stubCall.addArgument(regT0);
373 stubCall.addArgument(regT1);
374 stubCall.call();
375 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
376 }
377}
378
379void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
380{
381 unsigned op1 = currentInstruction[1].u.operand;
382 unsigned op2 = currentInstruction[2].u.operand;
383 unsigned target = currentInstruction[3].u.operand;
384
385 // We generate inline code for the following cases in the fast path:
386 // - int immediate to constant int immediate
387 // - constant int immediate to int immediate
388 // - int immediate to int immediate
389
390 if (isOperandConstantImmediateInt(op2)) {
391 emitGetVirtualRegister(op1, regT0);
392 emitJumpSlowCaseIfNotImmediateInteger(regT0);
393#if USE(ALTERNATE_JSIMMEDIATE)
394 int32_t op2imm = getConstantOperandImmediateInt(op2);
395#else
396 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
397#endif
398 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
399 } else if (isOperandConstantImmediateInt(op1)) {
400 emitGetVirtualRegister(op2, regT1);
401 emitJumpSlowCaseIfNotImmediateInteger(regT1);
402#if USE(ALTERNATE_JSIMMEDIATE)
403 int32_t op1imm = getConstantOperandImmediateInt(op1);
404#else
405 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
406#endif
407 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
408 } else {
409 emitGetVirtualRegisters(op1, regT0, op2, regT1);
410 emitJumpSlowCaseIfNotImmediateInteger(regT0);
411 emitJumpSlowCaseIfNotImmediateInteger(regT1);
412
413 addJump(branch32(GreaterThan, regT0, regT1), target + 3);
414 }
415}
416
417void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
418{
419 unsigned op1 = currentInstruction[1].u.operand;
420 unsigned op2 = currentInstruction[2].u.operand;
421 unsigned target = currentInstruction[3].u.operand;
422
423 // We generate inline code for the following cases in the slow path:
424 // - floating-point number to constant int immediate
425 // - constant int immediate to floating-point number
426 // - floating-point number to floating-point number.
427
428 if (isOperandConstantImmediateInt(op2)) {
429 linkSlowCase(iter);
430
431 if (supportsFloatingPoint()) {
432#if USE(ALTERNATE_JSIMMEDIATE)
433 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
434 addPtr(tagTypeNumberRegister, regT0);
435 movePtrToDouble(regT0, fpRegT0);
436#else
437 Jump fail1;
438 if (!m_codeBlock->isKnownNotImmediate(op1))
439 fail1 = emitJumpIfNotJSCell(regT0);
440
441 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
442 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
443#endif
444
445 int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
446
447 move(Imm32(op2imm), regT1);
448 convertInt32ToDouble(regT1, fpRegT1);
449
450 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
451
452 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
453
454#if USE(ALTERNATE_JSIMMEDIATE)
455 fail1.link(this);
456#else
457 if (!m_codeBlock->isKnownNotImmediate(op1))
458 fail1.link(this);
459 fail2.link(this);
460#endif
461 }
462
463 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
464 stubCall.addArgument(regT0);
465 stubCall.addArgument(op2, regT2);
466 stubCall.call();
467 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
468
469 } else if (isOperandConstantImmediateInt(op1)) {
470 linkSlowCase(iter);
471
472 if (supportsFloatingPoint()) {
473#if USE(ALTERNATE_JSIMMEDIATE)
474 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
475 addPtr(tagTypeNumberRegister, regT1);
476 movePtrToDouble(regT1, fpRegT1);
477#else
478 Jump fail1;
479 if (!m_codeBlock->isKnownNotImmediate(op2))
480 fail1 = emitJumpIfNotJSCell(regT1);
481
482 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
483 loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
484#endif
485
486 int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
487
488 move(Imm32(op1imm), regT0);
489 convertInt32ToDouble(regT0, fpRegT0);
490
491 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
492
493 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
494
495#if USE(ALTERNATE_JSIMMEDIATE)
496 fail1.link(this);
497#else
498 if (!m_codeBlock->isKnownNotImmediate(op2))
499 fail1.link(this);
500 fail2.link(this);
501#endif
502 }
503
504 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
505 stubCall.addArgument(op1, regT2);
506 stubCall.addArgument(regT1);
507 stubCall.call();
508 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
509
510 } else {
511 linkSlowCase(iter);
512
513 if (supportsFloatingPoint()) {
514#if USE(ALTERNATE_JSIMMEDIATE)
515 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
516 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
517 Jump fail3 = emitJumpIfImmediateInteger(regT1);
518 addPtr(tagTypeNumberRegister, regT0);
519 addPtr(tagTypeNumberRegister, regT1);
520 movePtrToDouble(regT0, fpRegT0);
521 movePtrToDouble(regT1, fpRegT1);
522#else
523 Jump fail1;
524 if (!m_codeBlock->isKnownNotImmediate(op1))
525 fail1 = emitJumpIfNotJSCell(regT0);
526
527 Jump fail2;
528 if (!m_codeBlock->isKnownNotImmediate(op2))
529 fail2 = emitJumpIfNotJSCell(regT1);
530
531 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
532 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
533 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
534 loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
535#endif
536
537 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
538
539 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
540
541#if USE(ALTERNATE_JSIMMEDIATE)
542 fail1.link(this);
543 fail2.link(this);
544 fail3.link(this);
545#else
546 if (!m_codeBlock->isKnownNotImmediate(op1))
547 fail1.link(this);
548 if (!m_codeBlock->isKnownNotImmediate(op2))
549 fail2.link(this);
550 fail3.link(this);
551 fail4.link(this);
552#endif
553 }
554
555 linkSlowCase(iter);
556 JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
557 stubCall.addArgument(regT0);
558 stubCall.addArgument(regT1);
559 stubCall.call();
560 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
561 }
562}
563
564void JIT::emit_op_bitand(Instruction* currentInstruction)
565{
566 unsigned result = currentInstruction[1].u.operand;
567 unsigned op1 = currentInstruction[2].u.operand;
568 unsigned op2 = currentInstruction[3].u.operand;
569
570 if (isOperandConstantImmediateInt(op1)) {
571 emitGetVirtualRegister(op2, regT0);
572 emitJumpSlowCaseIfNotImmediateInteger(regT0);
573#if USE(ALTERNATE_JSIMMEDIATE)
574 int32_t imm = getConstantOperandImmediateInt(op1);
575 andPtr(Imm32(imm), regT0);
576 if (imm >= 0)
577 emitFastArithIntToImmNoCheck(regT0, regT0);
578#else
579 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
580#endif
581 } else if (isOperandConstantImmediateInt(op2)) {
582 emitGetVirtualRegister(op1, regT0);
583 emitJumpSlowCaseIfNotImmediateInteger(regT0);
584#if USE(ALTERNATE_JSIMMEDIATE)
585 int32_t imm = getConstantOperandImmediateInt(op2);
586 andPtr(Imm32(imm), regT0);
587 if (imm >= 0)
588 emitFastArithIntToImmNoCheck(regT0, regT0);
589#else
590 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
591#endif
592 } else {
593 emitGetVirtualRegisters(op1, regT0, op2, regT1);
594 andPtr(regT1, regT0);
595 emitJumpSlowCaseIfNotImmediateInteger(regT0);
596 }
597 emitPutVirtualRegister(result);
598}
599
600void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
601{
602 unsigned result = currentInstruction[1].u.operand;
603 unsigned op1 = currentInstruction[2].u.operand;
604 unsigned op2 = currentInstruction[3].u.operand;
605
606 linkSlowCase(iter);
607 if (isOperandConstantImmediateInt(op1)) {
608 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
609 stubCall.addArgument(op1, regT2);
610 stubCall.addArgument(regT0);
611 stubCall.call(result);
612 } else if (isOperandConstantImmediateInt(op2)) {
613 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
614 stubCall.addArgument(regT0);
615 stubCall.addArgument(op2, regT2);
616 stubCall.call(result);
617 } else {
618 JITStubCall stubCall(this, JITStubs::cti_op_bitand);
619 stubCall.addArgument(op1, regT2);
620 stubCall.addArgument(regT1);
621 stubCall.call(result);
622 }
623}
624
625void JIT::emit_op_post_inc(Instruction* currentInstruction)
626{
627 unsigned result = currentInstruction[1].u.operand;
628 unsigned srcDst = currentInstruction[2].u.operand;
629
630 emitGetVirtualRegister(srcDst, regT0);
631 move(regT0, regT1);
632 emitJumpSlowCaseIfNotImmediateInteger(regT0);
633#if USE(ALTERNATE_JSIMMEDIATE)
634 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
635 emitFastArithIntToImmNoCheck(regT1, regT1);
636#else
637 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
638 signExtend32ToPtr(regT1, regT1);
639#endif
640 emitPutVirtualRegister(srcDst, regT1);
641 emitPutVirtualRegister(result);
642}
643
644void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
645{
646 unsigned result = currentInstruction[1].u.operand;
647 unsigned srcDst = currentInstruction[2].u.operand;
648
649 linkSlowCase(iter);
650 linkSlowCase(iter);
651 JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
652 stubCall.addArgument(regT0);
653 stubCall.addArgument(Imm32(srcDst));
654 stubCall.call(result);
655}
656
657void JIT::emit_op_post_dec(Instruction* currentInstruction)
658{
659 unsigned result = currentInstruction[1].u.operand;
660 unsigned srcDst = currentInstruction[2].u.operand;
661
662 emitGetVirtualRegister(srcDst, regT0);
663 move(regT0, regT1);
664 emitJumpSlowCaseIfNotImmediateInteger(regT0);
665#if USE(ALTERNATE_JSIMMEDIATE)
666 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
667 emitFastArithIntToImmNoCheck(regT1, regT1);
668#else
669 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
670 signExtend32ToPtr(regT1, regT1);
671#endif
672 emitPutVirtualRegister(srcDst, regT1);
673 emitPutVirtualRegister(result);
674}
675
676void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
677{
678 unsigned result = currentInstruction[1].u.operand;
679 unsigned srcDst = currentInstruction[2].u.operand;
680
681 linkSlowCase(iter);
682 linkSlowCase(iter);
683 JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
684 stubCall.addArgument(regT0);
685 stubCall.addArgument(Imm32(srcDst));
686 stubCall.call(result);
687}
688
689void JIT::emit_op_pre_inc(Instruction* currentInstruction)
690{
691 unsigned srcDst = currentInstruction[1].u.operand;
692
693 emitGetVirtualRegister(srcDst, regT0);
694 emitJumpSlowCaseIfNotImmediateInteger(regT0);
695#if USE(ALTERNATE_JSIMMEDIATE)
696 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
697 emitFastArithIntToImmNoCheck(regT0, regT0);
698#else
699 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
700 signExtend32ToPtr(regT0, regT0);
701#endif
702 emitPutVirtualRegister(srcDst);
703}
704
705void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
706{
707 unsigned srcDst = currentInstruction[1].u.operand;
708
709 Jump notImm = getSlowCase(iter);
710 linkSlowCase(iter);
711 emitGetVirtualRegister(srcDst, regT0);
712 notImm.link(this);
713 JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
714 stubCall.addArgument(regT0);
715 stubCall.call(srcDst);
716}
717
718void JIT::emit_op_pre_dec(Instruction* currentInstruction)
719{
720 unsigned srcDst = currentInstruction[1].u.operand;
721
722 emitGetVirtualRegister(srcDst, regT0);
723 emitJumpSlowCaseIfNotImmediateInteger(regT0);
724#if USE(ALTERNATE_JSIMMEDIATE)
725 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
726 emitFastArithIntToImmNoCheck(regT0, regT0);
727#else
728 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
729 signExtend32ToPtr(regT0, regT0);
730#endif
731 emitPutVirtualRegister(srcDst);
732}
733
734void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
735{
736 unsigned srcDst = currentInstruction[1].u.operand;
737
738 Jump notImm = getSlowCase(iter);
739 linkSlowCase(iter);
740 emitGetVirtualRegister(srcDst, regT0);
741 notImm.link(this);
742 JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
743 stubCall.addArgument(regT0);
744 stubCall.call(srcDst);
745}
746
747/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
748
749#if PLATFORM(X86) || PLATFORM(X86_64)
750
751void JIT::emit_op_mod(Instruction* currentInstruction)
752{
753 unsigned result = currentInstruction[1].u.operand;
754 unsigned op1 = currentInstruction[2].u.operand;
755 unsigned op2 = currentInstruction[3].u.operand;
756
757 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
758 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
759 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
760#if USE(ALTERNATE_JSIMMEDIATE)
761 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
762 m_assembler.cdq();
763 m_assembler.idivl_r(X86::ecx);
764#else
765 emitFastArithDeTagImmediate(X86::eax);
766 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
767 m_assembler.cdq();
768 m_assembler.idivl_r(X86::ecx);
769 signExtend32ToPtr(X86::edx, X86::edx);
770#endif
771 emitFastArithReTagImmediate(X86::edx, X86::eax);
772 emitPutVirtualRegister(result);
773}
774
775void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
776{
777 unsigned result = currentInstruction[1].u.operand;
778
779#if USE(ALTERNATE_JSIMMEDIATE)
780 linkSlowCase(iter);
781 linkSlowCase(iter);
782 linkSlowCase(iter);
783#else
784 Jump notImm1 = getSlowCase(iter);
785 Jump notImm2 = getSlowCase(iter);
786 linkSlowCase(iter);
787 emitFastArithReTagImmediate(X86::eax, X86::eax);
788 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
789 notImm1.link(this);
790 notImm2.link(this);
791#endif
792 JITStubCall stubCall(this, JITStubs::cti_op_mod);
793 stubCall.addArgument(X86::eax);
794 stubCall.addArgument(X86::ecx);
795 stubCall.call(result);
796}
797
798#else // PLATFORM(X86) || PLATFORM(X86_64)
799
800void JIT::emit_op_mod(Instruction* currentInstruction)
801{
802 unsigned result = currentInstruction[1].u.operand;
803 unsigned op1 = currentInstruction[2].u.operand;
804 unsigned op2 = currentInstruction[3].u.operand;
805
806 JITStubCall stubCall(this, JITStubs::cti_op_mod);
807 stubCall.addArgument(op1, regT2);
808 stubCall.addArgument(op2, regT2);
809 stubCall.call(result);
810}
811
812void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
813{
814 ASSERT_NOT_REACHED();
815}
816
817#endif // PLATFORM(X86) || PLATFORM(X86_64)
818
819/* ------------------------------ END: OP_MOD ------------------------------ */
820
821#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
822
823/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
824
825void JIT::emit_op_add(Instruction* currentInstruction)
826{
827 unsigned result = currentInstruction[1].u.operand;
828 unsigned op1 = currentInstruction[2].u.operand;
829 unsigned op2 = currentInstruction[3].u.operand;
830
831 JITStubCall stubCall(this, JITStubs::cti_op_add);
832 stubCall.addArgument(op1, regT2);
833 stubCall.addArgument(op2, regT2);
834 stubCall.call(result);
835}
836
837void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
838{
839 ASSERT_NOT_REACHED();
840}
841
842void JIT::emit_op_mul(Instruction* currentInstruction)
843{
844 unsigned result = currentInstruction[1].u.operand;
845 unsigned op1 = currentInstruction[2].u.operand;
846 unsigned op2 = currentInstruction[3].u.operand;
847
848 JITStubCall stubCall(this, JITStubs::cti_op_mul);
849 stubCall.addArgument(op1, regT2);
850 stubCall.addArgument(op2, regT2);
851 stubCall.call(result);
852}
853
854void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
855{
856 ASSERT_NOT_REACHED();
857}
858
859void JIT::emit_op_sub(Instruction* currentInstruction)
860{
861 unsigned result = currentInstruction[1].u.operand;
862 unsigned op1 = currentInstruction[2].u.operand;
863 unsigned op2 = currentInstruction[3].u.operand;
864
865 JITStubCall stubCall(this, JITStubs::cti_op_sub);
866 stubCall.addArgument(op1, regT2);
867 stubCall.addArgument(op2, regT2);
868 stubCall.call(result);
869}
870
871void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
872{
873 ASSERT_NOT_REACHED();
874}
875
876#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
877
878/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
879
880void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
881{
882 emitGetVirtualRegisters(op1, regT0, op2, regT1);
883 emitJumpSlowCaseIfNotImmediateInteger(regT0);
884 emitJumpSlowCaseIfNotImmediateInteger(regT1);
885 if (opcodeID == op_add)
886 addSlowCase(branchAdd32(Overflow, regT1, regT0));
887 else if (opcodeID == op_sub)
888 addSlowCase(branchSub32(Overflow, regT1, regT0));
889 else {
890 ASSERT(opcodeID == op_mul);
891 addSlowCase(branchMul32(Overflow, regT1, regT0));
892 addSlowCase(branchTest32(Zero, regT0));
893 }
894 emitFastArithIntToImmNoCheck(regT0, regT0);
895}
896
897void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
898{
899 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
900 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
901
902 Jump notImm1 = getSlowCase(iter);
903 Jump notImm2 = getSlowCase(iter);
904
905 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
906 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
907 linkSlowCase(iter);
908 emitGetVirtualRegister(op1, regT0);
909
910 Label stubFunctionCall(this);
911 JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
912 stubCall.addArgument(regT0);
913 stubCall.addArgument(regT1);
914 stubCall.call(result);
915 Jump end = jump();
916
917 // if we get here, eax is not an int32, edx not yet checked.
918 notImm1.link(this);
919 if (!types.first().definitelyIsNumber())
920 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
921 if (!types.second().definitelyIsNumber())
922 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
923 addPtr(tagTypeNumberRegister, regT0);
924 movePtrToDouble(regT0, fpRegT1);
925 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
926 convertInt32ToDouble(regT1, fpRegT2);
927 Jump op2wasInteger = jump();
928
929 // if we get here, eax IS an int32, edx is not.
930 notImm2.link(this);
931 if (!types.second().definitelyIsNumber())
932 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
933 convertInt32ToDouble(regT0, fpRegT1);
934 op2isDouble.link(this);
935 addPtr(tagTypeNumberRegister, regT1);
936 movePtrToDouble(regT1, fpRegT2);
937 op2wasInteger.link(this);
938
939 if (opcodeID == op_add)
940 addDouble(fpRegT2, fpRegT1);
941 else if (opcodeID == op_sub)
942 subDouble(fpRegT2, fpRegT1);
943 else {
944 ASSERT(opcodeID == op_mul);
945 mulDouble(fpRegT2, fpRegT1);
946 }
947 moveDoubleToPtr(fpRegT1, regT0);
948 subPtr(tagTypeNumberRegister, regT0);
949 emitPutVirtualRegister(result, regT0);
950
951 end.link(this);
952}
953
954void JIT::emit_op_add(Instruction* currentInstruction)
955{
956 unsigned result = currentInstruction[1].u.operand;
957 unsigned op1 = currentInstruction[2].u.operand;
958 unsigned op2 = currentInstruction[3].u.operand;
959 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
960
961 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
962 JITStubCall stubCall(this, JITStubs::cti_op_add);
963 stubCall.addArgument(op1, regT2);
964 stubCall.addArgument(op2, regT2);
965 stubCall.call(result);
966 return;
967 }
968
969 if (isOperandConstantImmediateInt(op1)) {
970 emitGetVirtualRegister(op2, regT0);
971 emitJumpSlowCaseIfNotImmediateInteger(regT0);
972 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
973 emitFastArithIntToImmNoCheck(regT0, regT0);
974 } else if (isOperandConstantImmediateInt(op2)) {
975 emitGetVirtualRegister(op1, regT0);
976 emitJumpSlowCaseIfNotImmediateInteger(regT0);
977 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
978 emitFastArithIntToImmNoCheck(regT0, regT0);
979 } else
980 compileBinaryArithOp(op_add, result, op1, op2, types);
981
982 emitPutVirtualRegister(result);
983}
984
985void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
986{
987 unsigned result = currentInstruction[1].u.operand;
988 unsigned op1 = currentInstruction[2].u.operand;
989 unsigned op2 = currentInstruction[3].u.operand;
990
991 if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
992 linkSlowCase(iter);
993 linkSlowCase(iter);
994 JITStubCall stubCall(this, JITStubs::cti_op_add);
995 stubCall.addArgument(op1, regT2);
996 stubCall.addArgument(op2, regT2);
997 stubCall.call(result);
998 } else
999 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1000}
1001
1002void JIT::emit_op_mul(Instruction* currentInstruction)
1003{
1004 unsigned result = currentInstruction[1].u.operand;
1005 unsigned op1 = currentInstruction[2].u.operand;
1006 unsigned op2 = currentInstruction[3].u.operand;
1007 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1008
1009 // For now, only plant a fast int case if the constant operand is greater than zero.
1010 int32_t value;
1011 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1012 emitGetVirtualRegister(op2, regT0);
1013 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1014 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1015 emitFastArithReTagImmediate(regT0, regT0);
1016 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1017 emitGetVirtualRegister(op1, regT0);
1018 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1019 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1020 emitFastArithReTagImmediate(regT0, regT0);
1021 } else
1022 compileBinaryArithOp(op_mul, result, op1, op2, types);
1023
1024 emitPutVirtualRegister(result);
1025}
1026
1027void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1028{
1029 unsigned result = currentInstruction[1].u.operand;
1030 unsigned op1 = currentInstruction[2].u.operand;
1031 unsigned op2 = currentInstruction[3].u.operand;
1032 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1033
1034 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1035 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1036 linkSlowCase(iter);
1037 linkSlowCase(iter);
1038 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1039 JITStubCall stubCall(this, JITStubs::cti_op_mul);
1040 stubCall.addArgument(op1, regT2);
1041 stubCall.addArgument(op2, regT2);
1042 stubCall.call(result);
1043 } else
1044 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
1045}
1046
1047void JIT::emit_op_sub(Instruction* currentInstruction)
1048{
1049 unsigned result = currentInstruction[1].u.operand;
1050 unsigned op1 = currentInstruction[2].u.operand;
1051 unsigned op2 = currentInstruction[3].u.operand;
1052 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1053
1054 compileBinaryArithOp(op_sub, result, op1, op2, types);
1055
1056 emitPutVirtualRegister(result);
1057}
1058
1059void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1060{
1061 unsigned result = currentInstruction[1].u.operand;
1062 unsigned op1 = currentInstruction[2].u.operand;
1063 unsigned op2 = currentInstruction[3].u.operand;
1064 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1065
1066 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
1067}
1068
1069#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
1070
1071/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1072
1073typedef X86Assembler::JmpSrc JmpSrc;
1074typedef X86Assembler::JmpDst JmpDst;
1075typedef X86Assembler::XMMRegisterID XMMRegisterID;
1076
1077
1078void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1079{
1080 Structure* numberStructure = m_globalData->numberStructure.get();
1081 Jump wasJSNumberCell1;
1082 Jump wasJSNumberCell2;
1083
1084 emitGetVirtualRegisters(src1, regT0, src2, regT1);
1085
1086 if (types.second().isReusable() && supportsFloatingPoint()) {
1087 ASSERT(types.second().mightBeNumber());
1088
1089 // Check op2 is a number
1090 Jump op2imm = emitJumpIfImmediateInteger(regT1);
1091 if (!types.second().definitelyIsNumber()) {
1092 emitJumpSlowCaseIfNotJSCell(regT1, src2);
1093 addSlowCase(checkStructure(regT1, numberStructure));
1094 }
1095
1096 // (1) In this case src2 is a reusable number cell.
1097 // Slow case if src1 is not a number type.
1098 Jump op1imm = emitJumpIfImmediateInteger(regT0);
1099 if (!types.first().definitelyIsNumber()) {
1100 emitJumpSlowCaseIfNotJSCell(regT0, src1);
1101 addSlowCase(checkStructure(regT0, numberStructure));
1102 }
1103
1104 // (1a) if we get here, src1 is also a number cell
1105 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
1106 Jump loadedDouble = jump();
1107 // (1b) if we get here, src1 is an immediate
1108 op1imm.link(this);
1109 emitFastArithImmToInt(regT0);
1110 convertInt32ToDouble(regT0, fpRegT0);
1111 // (1c)
1112 loadedDouble.link(this);
1113 if (opcodeID == op_add)
1114 addDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
1115 else if (opcodeID == op_sub)
1116 subDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
1117 else {
1118 ASSERT(opcodeID == op_mul);
1119 mulDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
1120 }
1121
1122 // Store the result to the JSNumberCell and jump.
1123 storeDouble(fpRegT0, Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)));
1124 move(regT1, regT0);
1125 emitPutVirtualRegister(dst);
1126 wasJSNumberCell2 = jump();
1127
1128 // (2) This handles cases where src2 is an immediate number.
1129 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
1130 op2imm.link(this);
1131 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1132 } else if (types.first().isReusable() && supportsFloatingPoint()) {
1133 ASSERT(types.first().mightBeNumber());
1134
1135 // Check op1 is a number
1136 Jump op1imm = emitJumpIfImmediateInteger(regT0);
1137 if (!types.first().definitelyIsNumber()) {
1138 emitJumpSlowCaseIfNotJSCell(regT0, src1);
1139 addSlowCase(checkStructure(regT0, numberStructure));
1140 }
1141
1142 // (1) In this case src1 is a reusable number cell.
1143 // Slow case if src2 is not a number type.
1144 Jump op2imm = emitJumpIfImmediateInteger(regT1);
1145 if (!types.second().definitelyIsNumber()) {
1146 emitJumpSlowCaseIfNotJSCell(regT1, src2);
1147 addSlowCase(checkStructure(regT1, numberStructure));
1148 }
1149
1150 // (1a) if we get here, src2 is also a number cell
1151 loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
1152 Jump loadedDouble = jump();
1153 // (1b) if we get here, src2 is an immediate
1154 op2imm.link(this);
1155 emitFastArithImmToInt(regT1);
1156 convertInt32ToDouble(regT1, fpRegT1);
1157 // (1c)
1158 loadedDouble.link(this);
1159 loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
1160 if (opcodeID == op_add)
1161 addDouble(fpRegT1, fpRegT0);
1162 else if (opcodeID == op_sub)
1163 subDouble(fpRegT1, fpRegT0);
1164 else {
1165 ASSERT(opcodeID == op_mul);
1166 mulDouble(fpRegT1, fpRegT0);
1167 }
1168 storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
1169 emitPutVirtualRegister(dst);
1170
1171 // Store the result to the JSNumberCell and jump.
1172 storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
1173 emitPutVirtualRegister(dst);
1174 wasJSNumberCell1 = jump();
1175
1176 // (2) This handles cases where src1 is an immediate number.
1177 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
1178 op1imm.link(this);
1179 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1180 } else
1181 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
1182
1183 if (opcodeID == op_add) {
1184 emitFastArithDeTagImmediate(regT0);
1185 addSlowCase(branchAdd32(Overflow, regT1, regT0));
1186 } else if (opcodeID == op_sub) {
1187 addSlowCase(branchSub32(Overflow, regT1, regT0));
1188 signExtend32ToPtr(regT0, regT0);
1189 emitFastArithReTagImmediate(regT0, regT0);
1190 } else {
1191 ASSERT(opcodeID == op_mul);
1192 // convert eax & edx from JSImmediates to ints, and check if either are zero
1193 emitFastArithImmToInt(regT1);
1194 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
1195 Jump op2NonZero = branchTest32(NonZero, regT1);
1196 op1Zero.link(this);
1197 // if either input is zero, add the two together, and check if the result is < 0.
1198 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
1199 move(regT0, regT2);
1200 addSlowCase(branchAdd32(Signed, regT1, regT2));
1201 // Skip the above check if neither input is zero
1202 op2NonZero.link(this);
1203 addSlowCase(branchMul32(Overflow, regT1, regT0));
1204 signExtend32ToPtr(regT0, regT0);
1205 emitFastArithReTagImmediate(regT0, regT0);
1206 }
1207 emitPutVirtualRegister(dst);
1208
1209 if (types.second().isReusable() && supportsFloatingPoint())
1210 wasJSNumberCell2.link(this);
1211 else if (types.first().isReusable() && supportsFloatingPoint())
1212 wasJSNumberCell1.link(this);
1213}
1214
1215void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1216{
1217 linkSlowCase(iter);
1218 if (types.second().isReusable() && supportsFloatingPoint()) {
1219 if (!types.first().definitelyIsNumber()) {
1220 linkSlowCaseIfNotJSCell(iter, src1);
1221 linkSlowCase(iter);
1222 }
1223 if (!types.second().definitelyIsNumber()) {
1224 linkSlowCaseIfNotJSCell(iter, src2);
1225 linkSlowCase(iter);
1226 }
1227 } else if (types.first().isReusable() && supportsFloatingPoint()) {
1228 if (!types.first().definitelyIsNumber()) {
1229 linkSlowCaseIfNotJSCell(iter, src1);
1230 linkSlowCase(iter);
1231 }
1232 if (!types.second().definitelyIsNumber()) {
1233 linkSlowCaseIfNotJSCell(iter, src2);
1234 linkSlowCase(iter);
1235 }
1236 }
1237 linkSlowCase(iter);
1238
1239 // additional entry point to handle -0 cases.
1240 if (opcodeID == op_mul)
1241 linkSlowCase(iter);
1242
1243 JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
1244 stubCall.addArgument(src1, regT2);
1245 stubCall.addArgument(src2, regT2);
1246 stubCall.call(dst);
1247}
1248
1249void JIT::emit_op_add(Instruction* currentInstruction)
1250{
1251 unsigned result = currentInstruction[1].u.operand;
1252 unsigned op1 = currentInstruction[2].u.operand;
1253 unsigned op2 = currentInstruction[3].u.operand;
1254
1255 if (isOperandConstantImmediateInt(op1)) {
1256 emitGetVirtualRegister(op2, regT0);
1257 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1258 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
1259 signExtend32ToPtr(regT0, regT0);
1260 emitPutVirtualRegister(result);
1261 } else if (isOperandConstantImmediateInt(op2)) {
1262 emitGetVirtualRegister(op1, regT0);
1263 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1264 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
1265 signExtend32ToPtr(regT0, regT0);
1266 emitPutVirtualRegister(result);
1267 } else {
1268 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1269 if (types.first().mightBeNumber() && types.second().mightBeNumber())
1270 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1271 else {
1272 JITStubCall stubCall(this, JITStubs::cti_op_add);
1273 stubCall.addArgument(op1, regT2);
1274 stubCall.addArgument(op2, regT2);
1275 stubCall.call(result);
1276 }
1277 }
1278}
1279
1280void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1281{
1282 unsigned result = currentInstruction[1].u.operand;
1283 unsigned op1 = currentInstruction[2].u.operand;
1284 unsigned op2 = currentInstruction[3].u.operand;
1285
1286 if (isOperandConstantImmediateInt(op1)) {
1287 Jump notImm = getSlowCase(iter);
1288 linkSlowCase(iter);
1289 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
1290 notImm.link(this);
1291 JITStubCall stubCall(this, JITStubs::cti_op_add);
1292 stubCall.addArgument(op1, regT2);
1293 stubCall.addArgument(regT0);
1294 stubCall.call(result);
1295 } else if (isOperandConstantImmediateInt(op2)) {
1296 Jump notImm = getSlowCase(iter);
1297 linkSlowCase(iter);
1298 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
1299 notImm.link(this);
1300 JITStubCall stubCall(this, JITStubs::cti_op_add);
1301 stubCall.addArgument(regT0);
1302 stubCall.addArgument(op2, regT2);
1303 stubCall.call(result);
1304 } else {
1305 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1306 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
1307 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
1308 }
1309}
1310
1311void JIT::emit_op_mul(Instruction* currentInstruction)
1312{
1313 unsigned result = currentInstruction[1].u.operand;
1314 unsigned op1 = currentInstruction[2].u.operand;
1315 unsigned op2 = currentInstruction[3].u.operand;
1316
1317 // For now, only plant a fast int case if the constant operand is greater than zero.
1318 int32_t value;
1319 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1320 emitGetVirtualRegister(op2, regT0);
1321 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1322 emitFastArithDeTagImmediate(regT0);
1323 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1324 signExtend32ToPtr(regT0, regT0);
1325 emitFastArithReTagImmediate(regT0, regT0);
1326 emitPutVirtualRegister(result);
1327 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1328 emitGetVirtualRegister(op1, regT0);
1329 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1330 emitFastArithDeTagImmediate(regT0);
1331 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1332 signExtend32ToPtr(regT0, regT0);
1333 emitFastArithReTagImmediate(regT0, regT0);
1334 emitPutVirtualRegister(result);
1335 } else
1336 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1337}
1338
1339void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1340{
1341 unsigned result = currentInstruction[1].u.operand;
1342 unsigned op1 = currentInstruction[2].u.operand;
1343 unsigned op2 = currentInstruction[3].u.operand;
1344
1345 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1346 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1347 linkSlowCase(iter);
1348 linkSlowCase(iter);
1349 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1350 JITStubCall stubCall(this, JITStubs::cti_op_mul);
1351 stubCall.addArgument(op1, regT2);
1352 stubCall.addArgument(op2, regT2);
1353 stubCall.call(result);
1354 } else
1355 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1356}
1357
1358void JIT::emit_op_sub(Instruction* currentInstruction)
1359{
1360 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1361}
1362
1363void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1364{
1365 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1366}
1367
1368#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
1369
1370/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1371
1372} // namespace JSC
1373
1374#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.