source: webkit/trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp

Last change on this file was 292445, checked in by [email protected], 3 years ago

[JSC] Strictly annotate pointers with TrustedImmPtr in CCallHelpers
https://bugs.webkit.org/show_bug.cgi?id=238827

Reviewed by Mark Lam.

This allows us to detect pointer use in DFG easy. This is important in unlinked DFG development.
We also consistently use m_graph instead of m_jit.graph() in DFG::SpeculativeJIT.

We also purge CodeBlock* embedding in DFG code completely. Now we load it from the cfr
instead (compileLogShadowChickenTail and callOperationWithCallFrameRollbackOnException).

  • dfg/DFGArrayifySlowPathGenerator.h:
  • dfg/DFGCallArrayAllocatorSlowPathGenerator.h:
  • dfg/DFGCallCreateDirectArgumentsSlowPathGenerator.h:
  • dfg/DFGJITCompiler.cpp:

(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):

  • dfg/DFGOSRExit.cpp:

(JSC::DFG::OSRExit::OSRExit):
(JSC::DFG::OSRExit::compileExit):

  • dfg/DFGOSRExitCompilerCommon.cpp:

(JSC::DFG::osrWriteBarrier):

  • dfg/DFGSpeculativeJIT.cpp:

(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
(JSC::DFG::SpeculativeJIT::emitGetCallee):
(JSC::DFG::SpeculativeJIT::emitOSRExitFuzzCheck):
(JSC::DFG::SpeculativeJIT::speculationCheck):
(JSC::DFG::SpeculativeJIT::silentSavePlanForGPR):
(JSC::DFG::SpeculativeJIT::checkArray):
(JSC::DFG::SpeculativeJIT::useChildren):
(JSC::DFG::SpeculativeJIT::compileGetById):
(JSC::DFG::SpeculativeJIT::compileGetByIdFlush):
(JSC::DFG::SpeculativeJIT::compileDeleteById):
(JSC::DFG::SpeculativeJIT::compileDeleteByVal):
(JSC::DFG::SpeculativeJIT::compileInById):
(JSC::DFG::SpeculativeJIT::compileInByVal):
(JSC::DFG::SpeculativeJIT::compileHasPrivate):
(JSC::DFG::SpeculativeJIT::compileStringSlice):
(JSC::DFG::SpeculativeJIT::compileLoopHint):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::checkArgumentTypes):
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::createOSREntries):
(JSC::DFG::SpeculativeJIT::linkOSREntries):
(JSC::DFG::SpeculativeJIT::compileCheckTraps):
(JSC::DFG::SpeculativeJIT::compileContiguousPutByVal):
(JSC::DFG::SpeculativeJIT::compileDoublePutByVal):
(JSC::DFG::SpeculativeJIT::compilePutByVal):
(JSC::DFG::SpeculativeJIT::compileGetByValOnString):
(JSC::DFG::compileClampDoubleToByte):
(JSC::DFG::SpeculativeJIT::jumpForTypedArrayOutOfBounds):
(JSC::DFG::SpeculativeJIT::compilePutByValForIntTypedArray):

  • dfg/DFGSpeculativeJIT.h:

(JSC::DFG::SpeculativeJIT::nextBlock):
(JSC::DFG::SpeculativeJIT::masqueradesAsUndefinedWatchpointIsStillValid):
(JSC::DFG::SpeculativeJIT::identifierUID):
(JSC::DFG::SpeculativeJIT::callOperationWithCallFrameRollbackOnException):

  • dfg/DFGSpeculativeJIT32_64.cpp:

(JSC::DFG::SpeculativeJIT::cachedGetById):
(JSC::DFG::SpeculativeJIT::cachedGetByIdWithThis):
(JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compileToBooleanObjectOrOther):
(JSC::DFG::SpeculativeJIT::compileToBoolean):
(JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compileGetByVal):
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::compileArithRandom):

  • dfg/DFGSpeculativeJIT64.cpp:

(JSC::DFG::SpeculativeJIT::fillJSValue):
(JSC::DFG::SpeculativeJIT::cachedGetById):
(JSC::DFG::SpeculativeJIT::cachedGetByIdWithThis):
(JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNullOrUndefined):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNullOrUndefined):
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Strict):
(JSC::DFG::SpeculativeJIT::fillSpeculateInt52):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::fillSpeculateBigInt32):
(JSC::DFG::SpeculativeJIT::compileCompareEqPtr):
(JSC::DFG::SpeculativeJIT::compileToBooleanObjectOrOther):
(JSC::DFG::SpeculativeJIT::compileToBoolean):
(JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
(JSC::DFG::SpeculativeJIT::emitUntypedBranch):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compileGetByVal):
(JSC::DFG::SpeculativeJIT::compileNewTypedArrayWithInt52Size):
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::compileArithRandom):
(JSC::DFG::SpeculativeJIT::compileDateGet):

  • ftl/FTLLowerDFGToB3.cpp:

(JSC::FTL::DFG::LowerDFGToB3::compileUnaryMathIC):
(JSC::FTL::DFG::LowerDFGToB3::compileBinaryMathIC):
(JSC::FTL::DFG::LowerDFGToB3::getPrivateName):
(JSC::FTL::DFG::LowerDFGToB3::compilePrivateBrandAccess):
(JSC::FTL::DFG::LowerDFGToB3::compilePutPrivateName):
(JSC::FTL::DFG::LowerDFGToB3::cachedPutById):
(JSC::FTL::DFG::LowerDFGToB3::compileGetByValImpl):
(JSC::FTL::DFG::LowerDFGToB3::compilePutByVal):
(JSC::FTL::DFG::LowerDFGToB3::compileDelBy):
(JSC::FTL::DFG::LowerDFGToB3::compileCreateActivation):
(JSC::FTL::DFG::LowerDFGToB3::compileNewFunction):
(JSC::FTL::DFG::LowerDFGToB3::compileCreateDirectArguments):
(JSC::FTL::DFG::LowerDFGToB3::compileObjectKeysOrObjectGetOwnPropertyNames):
(JSC::FTL::DFG::LowerDFGToB3::compileNewStringObject):
(JSC::FTL::DFG::LowerDFGToB3::emitNewTypedArrayWithSize):
(JSC::FTL::DFG::LowerDFGToB3::compileMakeRope):
(JSC::FTL::DFG::LowerDFGToB3::compileNotifyWrite):
(JSC::FTL::DFG::LowerDFGToB3::compileCompareStrictEq):

  • ftl/FTLOSRExitCompiler.cpp:

(JSC::FTL::compileStub):

  • jit/CCallHelpers.cpp:
  • jit/CCallHelpers.h:

(JSC::CCallHelpers::std::is_pointer<CURRENT_ARGUMENT_TYPE>::value): Deleted.

  • jit/JIT.cpp:

(JSC::JIT::emitEnterOptimizationCheck):

  • jit/JITArithmetic.cpp:

(JSC::JIT::emitMathICFast):
(JSC::JIT::emitMathICSlow):

  • jit/JITOpcodes.cpp:

(JSC::JIT::emitSlow_op_new_object):
(JSC::JIT::emit_op_catch):
(JSC::JIT::emit_op_switch_imm):
(JSC::JIT::op_enter_handlerGenerator):
(JSC::JIT::emit_op_debug):
(JSC::JIT::emitSlow_op_loop_hint):
(JSC::JIT::emit_op_new_regexp):
(JSC::JIT::emitNewFuncCommon):
(JSC::JIT::emitNewFuncExprCommon):
(JSC::JIT::emit_op_profile_type):

  • jit/JITPropertyAccess.cpp:

(JSC::JIT::emit_op_put_getter_by_id):
(JSC::JIT::emit_op_put_setter_by_id):
(JSC::JIT::emit_op_put_getter_setter_by_id):
(JSC::JIT::emitWriteBarrier):

  • wasm/js/WasmToJS.cpp:

(JSC::Wasm::wasmToJS):

  • Property svn:eol-style set to native
File size: 39.7 KB
Line 
1/*
2 * Copyright (C) 2008-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(JIT)
29#include "JIT.h"
30
31#include "ArithProfile.h"
32#include "BytecodeGenerator.h"
33#include "CodeBlock.h"
34#include "JITBitAndGenerator.h"
35#include "JITBitOrGenerator.h"
36#include "JITBitXorGenerator.h"
37#include "JITDivGenerator.h"
38#include "JITInlines.h"
39#include "JITLeftShiftGenerator.h"
40#include "JITMathIC.h"
41#include "JITOperations.h"
42#include "ResultType.h"
43#include "SlowPathCall.h"
44
45namespace JSC {
46
47void JIT::emit_op_jless(const JSInstruction* currentInstruction)
48{
49 emit_compareAndJump<OpJless>(currentInstruction, LessThan);
50}
51
52void JIT::emit_op_jlesseq(const JSInstruction* currentInstruction)
53{
54 emit_compareAndJump<OpJlesseq>(currentInstruction, LessThanOrEqual);
55}
56
57void JIT::emit_op_jgreater(const JSInstruction* currentInstruction)
58{
59 emit_compareAndJump<OpJgreater>(currentInstruction, GreaterThan);
60}
61
62void JIT::emit_op_jgreatereq(const JSInstruction* currentInstruction)
63{
64 emit_compareAndJump<OpJgreatereq>(currentInstruction, GreaterThanOrEqual);
65}
66
67void JIT::emit_op_jnless(const JSInstruction* currentInstruction)
68{
69 emit_compareAndJump<OpJnless>(currentInstruction, GreaterThanOrEqual);
70}
71
72void JIT::emit_op_jnlesseq(const JSInstruction* currentInstruction)
73{
74 emit_compareAndJump<OpJnlesseq>(currentInstruction, GreaterThan);
75}
76
77void JIT::emit_op_jngreater(const JSInstruction* currentInstruction)
78{
79 emit_compareAndJump<OpJngreater>(currentInstruction, LessThanOrEqual);
80}
81
82void JIT::emit_op_jngreatereq(const JSInstruction* currentInstruction)
83{
84 emit_compareAndJump<OpJngreatereq>(currentInstruction, LessThan);
85}
86
87void JIT::emitSlow_op_jless(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
88{
89 emit_compareAndJumpSlow<OpJless>(currentInstruction, DoubleLessThanAndOrdered, operationCompareLess, false, iter);
90}
91
92void JIT::emitSlow_op_jlesseq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
93{
94 emit_compareAndJumpSlow<OpJlesseq>(currentInstruction, DoubleLessThanOrEqualAndOrdered, operationCompareLessEq, false, iter);
95}
96
97void JIT::emitSlow_op_jgreater(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
98{
99 emit_compareAndJumpSlow<OpJgreater>(currentInstruction, DoubleGreaterThanAndOrdered, operationCompareGreater, false, iter);
100}
101
102void JIT::emitSlow_op_jgreatereq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
103{
104 emit_compareAndJumpSlow<OpJgreatereq>(currentInstruction, DoubleGreaterThanOrEqualAndOrdered, operationCompareGreaterEq, false, iter);
105}
106
107void JIT::emitSlow_op_jnless(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
108{
109 emit_compareAndJumpSlow<OpJnless>(currentInstruction, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
110}
111
112void JIT::emitSlow_op_jnlesseq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
113{
114 emit_compareAndJumpSlow<OpJnlesseq>(currentInstruction, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
115}
116
117void JIT::emitSlow_op_jngreater(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
118{
119 emit_compareAndJumpSlow<OpJngreater>(currentInstruction, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
120}
121
122void JIT::emitSlow_op_jngreatereq(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
123{
124 emit_compareAndJumpSlow<OpJngreatereq>(currentInstruction, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
125}
126
127void JIT::emit_op_below(const JSInstruction* currentInstruction)
128{
129 emit_compareUnsigned<OpBelow>(currentInstruction, Below);
130}
131
132void JIT::emit_op_beloweq(const JSInstruction* currentInstruction)
133{
134 emit_compareUnsigned<OpBeloweq>(currentInstruction, BelowOrEqual);
135}
136
137void JIT::emit_op_jbelow(const JSInstruction* currentInstruction)
138{
139 emit_compareUnsignedAndJump<OpJbelow>(currentInstruction, Below);
140}
141
142void JIT::emit_op_jbeloweq(const JSInstruction* currentInstruction)
143{
144 emit_compareUnsignedAndJump<OpJbeloweq>(currentInstruction, BelowOrEqual);
145}
146
147void JIT::emit_op_unsigned(const JSInstruction* currentInstruction)
148{
149 auto bytecode = currentInstruction->as<OpUnsigned>();
150 VirtualRegister result = bytecode.m_dst;
151 VirtualRegister op1 = bytecode.m_operand;
152
153 emitGetVirtualRegister(op1, jsRegT10);
154 emitJumpSlowCaseIfNotInt(jsRegT10);
155 addSlowCase(branch32(LessThan, jsRegT10.payloadGPR(), TrustedImm32(0)));
156 boxInt32(jsRegT10.payloadGPR(), jsRegT10);
157 emitPutVirtualRegister(result, jsRegT10);
158}
159
160template<typename Op>
161void JIT::emit_compareAndJump(const JSInstruction* instruction, RelationalCondition condition)
162{
163 auto bytecode = instruction->as<Op>();
164 VirtualRegister op1 = bytecode.m_lhs;
165 VirtualRegister op2 = bytecode.m_rhs;
166 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
167 emit_compareAndJumpImpl(op1, op2, target, condition);
168}
169
170void JIT::emit_compareAndJumpImpl(VirtualRegister op1, VirtualRegister op2, unsigned target, RelationalCondition condition)
171{
172 // We generate inline code for the following cases in the fast path:
173 // - int immediate to constant int immediate
174 // - constant int immediate to int immediate
175 // - int immediate to int immediate
176
177 bool disallowAllocation = false;
178 if (isOperandConstantChar(op1)) {
179 emitGetVirtualRegister(op2, jsRegT10);
180 addSlowCase(branchIfNotCell(jsRegT10));
181 JumpList failures;
182 emitLoadCharacterString(jsRegT10.payloadGPR(), regT0, failures);
183 addSlowCase(failures);
184 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue(disallowAllocation)[0])), target);
185 return;
186 }
187 if (isOperandConstantChar(op2)) {
188 emitGetVirtualRegister(op1, jsRegT10);
189 addSlowCase(branchIfNotCell(jsRegT10));
190 JumpList failures;
191 emitLoadCharacterString(jsRegT10.payloadGPR(), regT0, failures);
192 addSlowCase(failures);
193 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue(disallowAllocation)[0])), target);
194 return;
195 }
196 if (isOperandConstantInt(op2)) {
197 emitGetVirtualRegister(op1, jsRegT10);
198 emitJumpSlowCaseIfNotInt(jsRegT10);
199 int32_t op2imm = getOperandConstantInt(op2);
200 addJump(branch32(condition, jsRegT10.payloadGPR(), Imm32(op2imm)), target);
201 return;
202 }
203 if (isOperandConstantInt(op1)) {
204 emitGetVirtualRegister(op2, jsRegT32);
205 emitJumpSlowCaseIfNotInt(jsRegT32);
206 int32_t op1imm = getOperandConstantInt(op1);
207 addJump(branch32(commute(condition), jsRegT32.payloadGPR(), Imm32(op1imm)), target);
208 return;
209 }
210
211 emitGetVirtualRegister(op1, jsRegT10);
212 emitGetVirtualRegister(op2, jsRegT32);
213 emitJumpSlowCaseIfNotInt(jsRegT10);
214 emitJumpSlowCaseIfNotInt(jsRegT32);
215
216 addJump(branch32(condition, jsRegT10.payloadGPR(), jsRegT32.payloadGPR()), target);
217}
218
219
220template<typename Op>
221void JIT::emit_compareUnsignedAndJump(const JSInstruction* instruction, RelationalCondition condition)
222{
223 auto bytecode = instruction->as<Op>();
224 VirtualRegister op1 = bytecode.m_lhs;
225 VirtualRegister op2 = bytecode.m_rhs;
226 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
227 emit_compareUnsignedAndJumpImpl(op1, op2, target, condition);
228}
229
230void JIT::emit_compareUnsignedAndJumpImpl(VirtualRegister op1, VirtualRegister op2, unsigned target, RelationalCondition condition)
231{
232 if (isOperandConstantInt(op2)) {
233 emitGetVirtualRegisterPayload(op1, regT0);
234 int32_t op2imm = getOperandConstantInt(op2);
235 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
236 } else if (isOperandConstantInt(op1)) {
237 emitGetVirtualRegisterPayload(op2, regT1);
238 int32_t op1imm = getOperandConstantInt(op1);
239 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
240 } else {
241 emitGetVirtualRegisterPayload(op1, regT0);
242 emitGetVirtualRegisterPayload(op2, regT1);
243 addJump(branch32(condition, regT0, regT1), target);
244 }
245}
246
247template<typename Op>
248void JIT::emit_compareUnsigned(const JSInstruction* instruction, RelationalCondition condition)
249{
250 auto bytecode = instruction->as<Op>();
251 VirtualRegister dst = bytecode.m_dst;
252 VirtualRegister op1 = bytecode.m_lhs;
253 VirtualRegister op2 = bytecode.m_rhs;
254 emit_compareUnsignedImpl(dst, op1, op2, condition);
255}
256
257void JIT::emit_compareUnsignedImpl(VirtualRegister dst, VirtualRegister op1, VirtualRegister op2, RelationalCondition condition)
258{
259 if (isOperandConstantInt(op2)) {
260 emitGetVirtualRegisterPayload(op1, regT0);
261 int32_t op2imm = getOperandConstantInt(op2);
262 compare32(condition, regT0, Imm32(op2imm), regT0);
263 } else if (isOperandConstantInt(op1)) {
264 emitGetVirtualRegisterPayload(op2, regT0);
265 int32_t op1imm = getOperandConstantInt(op1);
266 compare32(commute(condition), regT0, Imm32(op1imm), regT0);
267 } else {
268 emitGetVirtualRegisterPayload(op1, regT0);
269 emitGetVirtualRegisterPayload(op2, regT1);
270 compare32(condition, regT0, regT1, regT0);
271 }
272 boxBoolean(regT0, jsRegT10);
273 emitPutVirtualRegister(dst, jsRegT10);
274}
275
276template<typename Op, typename SlowOperation>
277void JIT::emit_compareAndJumpSlow(const JSInstruction* instruction, DoubleCondition condition, SlowOperation operation, bool invert, Vector<SlowCaseEntry>::iterator& iter)
278{
279 auto bytecode = instruction->as<Op>();
280 VirtualRegister op1 = bytecode.m_lhs;
281 VirtualRegister op2 = bytecode.m_rhs;
282 unsigned target = jumpTarget(instruction, bytecode.m_targetLabel);
283 emit_compareAndJumpSlowImpl(op1, op2, target, instruction->size(), condition, operation, invert, iter);
284}
285
286template<typename SlowOperation>
287void JIT::emit_compareAndJumpSlowImpl(VirtualRegister op1, VirtualRegister op2, unsigned target, size_t instructionSize, DoubleCondition condition, SlowOperation operation, bool invert, Vector<SlowCaseEntry>::iterator& iter)
288{
289
290 // We generate inline code for the following cases in the slow path:
291 // - floating-point number to constant int immediate
292 // - constant int immediate to floating-point number
293 // - floating-point number to floating-point number.
294 if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
295 linkAllSlowCases(iter);
296
297 constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
298 constexpr JSValueRegs arg1JSR = preferredArgumentJSR<SlowOperation, 1>();
299 constexpr JSValueRegs arg2JSR = preferredArgumentJSR<SlowOperation, 2>();
300
301 emitGetVirtualRegister(op1, arg1JSR);
302 emitGetVirtualRegister(op2, arg2JSR);
303 loadGlobalObject(globalObjectGPR);
304 callOperation(operation, globalObjectGPR, arg1JSR, arg2JSR);
305 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
306 return;
307 }
308
309 auto unboxDouble = [this](JSValueRegs src, FPRReg dst) {
310#if USE(JSVALUE64)
311 this->unboxDoubleWithoutAssertions(src.payloadGPR(), src.payloadGPR(), dst);
312#elif USE(JSVALUE32_64)
313 this->unboxDouble(src, dst);
314#endif
315 };
316
317 if (isOperandConstantInt(op2)) {
318 linkAllSlowCases(iter);
319
320 if (supportsFloatingPoint()) {
321 Jump fail1 = branchIfNotNumber(jsRegT10, regT4);
322 unboxDouble(jsRegT10, fpRegT0);
323
324 int32_t op2imm = getConstantOperand(op2).asInt32();
325
326 move(Imm32(op2imm), regT2);
327 convertInt32ToDouble(regT2, fpRegT1);
328
329 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
330
331 emitJumpSlowToHot(jump(), instructionSize);
332
333 fail1.link(this);
334 }
335
336 emitGetVirtualRegister(op2, jsRegT32);
337 loadGlobalObject(regT4);
338 callOperation(operation, regT4, jsRegT10, jsRegT32);
339 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
340 return;
341 }
342
343 if (isOperandConstantInt(op1)) {
344 linkAllSlowCases(iter);
345
346 if (supportsFloatingPoint()) {
347 Jump fail1 = branchIfNotNumber(jsRegT32, regT4);
348 unboxDouble(jsRegT32, fpRegT1);
349
350 int32_t op1imm = getConstantOperand(op1).asInt32();
351
352 move(Imm32(op1imm), regT0);
353 convertInt32ToDouble(regT0, fpRegT0);
354
355 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
356
357 emitJumpSlowToHot(jump(), instructionSize);
358
359 fail1.link(this);
360 }
361
362 emitGetVirtualRegister(op1, jsRegT10);
363 loadGlobalObject(regT4);
364 callOperation(operation, regT4, jsRegT10, jsRegT32);
365 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
366 return;
367 }
368
369 linkSlowCase(iter); // LHS is not Int.
370
371 if (supportsFloatingPoint()) {
372 Jump fail1 = branchIfNotNumber(jsRegT10, regT4);
373 Jump fail2 = branchIfNotNumber(jsRegT32, regT4);
374 Jump fail3 = branchIfInt32(jsRegT32);
375 unboxDouble(jsRegT10, fpRegT0);
376 unboxDouble(jsRegT32, fpRegT1);
377
378 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
379
380 emitJumpSlowToHot(jump(), instructionSize);
381
382 fail1.link(this);
383 fail2.link(this);
384 fail3.link(this);
385 }
386
387 linkSlowCase(iter); // RHS is not Int.
388 loadGlobalObject(regT4);
389 callOperation(operation, regT4, jsRegT10, jsRegT32);
390 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
391}
392
393void JIT::emit_op_inc(const JSInstruction* currentInstruction)
394{
395 auto bytecode = currentInstruction->as<OpInc>();
396 VirtualRegister srcDst = bytecode.m_srcDst;
397
398 emitGetVirtualRegister(srcDst, jsRegT10);
399 emitJumpSlowCaseIfNotInt(jsRegT10);
400 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), jsRegT10.payloadGPR()));
401 boxInt32(jsRegT10.payloadGPR(), jsRegT10);
402 emitPutVirtualRegister(srcDst, jsRegT10);
403}
404
405void JIT::emit_op_dec(const JSInstruction* currentInstruction)
406{
407 auto bytecode = currentInstruction->as<OpDec>();
408 VirtualRegister srcDst = bytecode.m_srcDst;
409
410 emitGetVirtualRegister(srcDst, jsRegT10);
411 emitJumpSlowCaseIfNotInt(jsRegT10);
412 addSlowCase(branchSub32(Overflow, TrustedImm32(1), jsRegT10.payloadGPR()));
413 boxInt32(jsRegT10.payloadGPR(), jsRegT10);
414 emitPutVirtualRegister(srcDst, jsRegT10);
415}
416
417/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
418
419#if CPU(X86_64)
420
421void JIT::emit_op_mod(const JSInstruction* currentInstruction)
422{
423 auto bytecode = currentInstruction->as<OpMod>();
424 VirtualRegister result = bytecode.m_dst;
425 VirtualRegister op1 = bytecode.m_lhs;
426 VirtualRegister op2 = bytecode.m_rhs;
427
428 // Make sure registers are correct for x86 IDIV instructions.
429 ASSERT(regT0 == X86Registers::eax);
430 auto edx = X86Registers::edx;
431 auto ecx = X86Registers::ecx;
432 ASSERT(regT4 != edx);
433 ASSERT(regT4 != ecx);
434
435 emitGetVirtualRegister(op1, regT4);
436 emitGetVirtualRegister(op2, ecx);
437 emitJumpSlowCaseIfNotInt(regT4);
438 emitJumpSlowCaseIfNotInt(ecx);
439
440 move(regT4, regT0);
441 addSlowCase(branchTest32(Zero, ecx));
442 Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
443 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
444 denominatorNotNeg1.link(this);
445 x86ConvertToDoubleWord32();
446 x86Div32(ecx);
447 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
448 addSlowCase(branchTest32(Zero, edx));
449 numeratorPositive.link(this);
450 boxInt32(edx, jsRegT10);
451 emitPutVirtualRegister(result, jsRegT10);
452}
453
454void JIT::emitSlow_op_mod(const JSInstruction*, Vector<SlowCaseEntry>::iterator& iter)
455{
456 linkAllSlowCases(iter);
457
458 JITSlowPathCall slowPathCall(this, slow_path_mod);
459 slowPathCall.call();
460}
461
462#else // CPU(X86_64)
463
464void JIT::emit_op_mod(const JSInstruction*)
465{
466 JITSlowPathCall slowPathCall(this, slow_path_mod);
467 slowPathCall.call();
468}
469
470void JIT::emitSlow_op_mod(const JSInstruction*, Vector<SlowCaseEntry>::iterator&)
471{
472 UNREACHABLE_FOR_PLATFORM();
473}
474
475#endif // CPU(X86_64)
476
477/* ------------------------------ END: OP_MOD ------------------------------ */
478
479void JIT::emit_op_pow(const JSInstruction* currentInstruction)
480{
481 auto bytecode = currentInstruction->as<OpPow>();
482 VirtualRegister result = bytecode.m_dst;
483 VirtualRegister op1 = bytecode.m_lhs;
484 VirtualRegister op2 = bytecode.m_rhs;
485
486 constexpr JSValueRegs leftRegs = jsRegT10;
487 constexpr JSValueRegs rightRegs = jsRegT32;
488 constexpr JSValueRegs resultRegs = leftRegs;
489 constexpr GPRReg scratchGPR = regT4;
490
491 emitGetVirtualRegister(op1, leftRegs);
492 emitGetVirtualRegister(op2, rightRegs);
493 emitJumpSlowCaseIfNotInt(rightRegs);
494
495 addSlowCase(branch32(LessThan, rightRegs.payloadGPR(), TrustedImm32(0)));
496 addSlowCase(branch32(GreaterThan, rightRegs.payloadGPR(), TrustedImm32(maxExponentForIntegerMathPow)));
497
498 Jump lhsNotInt = branchIfNotInt32(leftRegs);
499 convertInt32ToDouble(leftRegs.payloadGPR(), fpRegT0);
500 Jump lhsReady = jump();
501 lhsNotInt.link(this);
502 addSlowCase(branchIfNotNumber(leftRegs, scratchGPR));
503#if USE(JSVALUE64)
504 unboxDouble(leftRegs.payloadGPR(), scratchGPR, fpRegT0);
505#else
506 unboxDouble(leftRegs, fpRegT0);
507#endif
508 lhsReady.link(this);
509
510 move(TrustedImm32(1), scratchGPR);
511 convertInt32ToDouble(scratchGPR, fpRegT1);
512
513 Label loop = label();
514 Jump exponentIsEven = branchTest32(Zero, rightRegs.payloadGPR(), TrustedImm32(1));
515 mulDouble(fpRegT0, fpRegT1);
516 exponentIsEven.link(this);
517 mulDouble(fpRegT0, fpRegT0);
518 rshift32(TrustedImm32(1), rightRegs.payloadGPR());
519 branchTest32(NonZero, rightRegs.payloadGPR()).linkTo(loop, this);
520
521 boxDouble(fpRegT1, resultRegs);
522 emitPutVirtualRegister(result, resultRegs);
523}
524
525void JIT::emitSlow_op_pow(const JSInstruction*, Vector<SlowCaseEntry>::iterator& iter)
526{
527 linkAllSlowCases(iter);
528
529 JITSlowPathCall slowPathCall(this, slow_path_pow);
530 slowPathCall.call();
531}
532
533void JIT::emit_op_negate(const JSInstruction* currentInstruction)
534{
535 UnaryArithProfile* arithProfile = &m_unlinkedCodeBlock->unaryArithProfile(currentInstruction->as<OpNegate>().m_profileIndex);
536 JITNegIC* negateIC = m_mathICs.addJITNegIC(arithProfile);
537 m_instructionToMathIC.add(currentInstruction, negateIC);
538 // FIXME: it would be better to call those operationValueNegate, since the operand can be a BigInt
539 emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
540}
541
542void JIT::emitSlow_op_negate(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
543{
544 linkAllSlowCases(iter);
545
546 JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction));
547 // FIXME: it would be better to call those operationValueNegate, since the operand can be a BigInt
548 emitMathICSlow<OpNegate>(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize);
549}
550
551template<typename Op, typename SnippetGenerator>
552void JIT::emitBitBinaryOpFastPath(const JSInstruction* currentInstruction, ProfilingPolicy profilingPolicy)
553{
554 auto bytecode = currentInstruction->as<Op>();
555 VirtualRegister result = bytecode.m_dst;
556 VirtualRegister op1 = bytecode.m_lhs;
557 VirtualRegister op2 = bytecode.m_rhs;
558
559 constexpr JSValueRegs leftRegs = jsRegT10;
560 constexpr JSValueRegs rightRegs = jsRegT32;
561 constexpr JSValueRegs resultRegs = leftRegs;
562 constexpr GPRReg scratchGPR = regT4;
563
564 SnippetOperand leftOperand;
565 SnippetOperand rightOperand;
566
567 if (isOperandConstantInt(op1))
568 leftOperand.setConstInt32(getOperandConstantInt(op1));
569 else if (isOperandConstantInt(op2))
570 rightOperand.setConstInt32(getOperandConstantInt(op2));
571
572 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
573
574 if (!leftOperand.isConst())
575 emitGetVirtualRegister(op1, leftRegs);
576 if (!rightOperand.isConst())
577 emitGetVirtualRegister(op2, rightRegs);
578
579 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
580
581 gen.generateFastPath(*this);
582
583 ASSERT(gen.didEmitFastPath());
584 gen.endJumpList().link(this);
585 if (profilingPolicy == ProfilingPolicy::ShouldEmitProfiling)
586 emitValueProfilingSiteIfProfiledOpcode(bytecode);
587 emitPutVirtualRegister(result, resultRegs);
588
589 addSlowCase(gen.slowPathJumpList());
590}
591
592void JIT::emit_op_bitnot(const JSInstruction* currentInstruction)
593{
594 auto bytecode = currentInstruction->as<OpBitnot>();
595 VirtualRegister result = bytecode.m_dst;
596 VirtualRegister op1 = bytecode.m_operand;
597
598 emitGetVirtualRegister(op1, jsRegT10);
599
600 addSlowCase(branchIfNotInt32(jsRegT10));
601 not32(jsRegT10.payloadGPR());
602#if USE(JSVALUE64)
603 boxInt32(jsRegT10.payloadGPR(), jsRegT10);
604#endif
605
606 emitValueProfilingSiteIfProfiledOpcode(bytecode);
607
608 emitPutVirtualRegister(result, jsRegT10);
609}
610
611void JIT::emit_op_bitand(const JSInstruction* currentInstruction)
612{
613 emitBitBinaryOpFastPath<OpBitand, JITBitAndGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
614}
615
616void JIT::emit_op_bitor(const JSInstruction* currentInstruction)
617{
618 emitBitBinaryOpFastPath<OpBitor, JITBitOrGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
619}
620
621void JIT::emit_op_bitxor(const JSInstruction* currentInstruction)
622{
623 emitBitBinaryOpFastPath<OpBitxor, JITBitXorGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);
624}
625
626void JIT::emit_op_lshift(const JSInstruction* currentInstruction)
627{
628 emitBitBinaryOpFastPath<OpLshift, JITLeftShiftGenerator>(currentInstruction);
629}
630
631void JIT::emitRightShiftFastPath(const JSInstruction* currentInstruction, OpcodeID opcodeID)
632{
633 ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
634 switch (opcodeID) {
635 case op_rshift:
636 emitRightShiftFastPath<OpRshift>(currentInstruction, JITRightShiftGenerator::SignedShift);
637 break;
638 case op_urshift:
639 emitRightShiftFastPath<OpUrshift>(currentInstruction, JITRightShiftGenerator::UnsignedShift);
640 break;
641 default:
642 ASSERT_NOT_REACHED();
643 }
644}
645
646template<typename Op>
647void JIT::emitRightShiftFastPath(const JSInstruction* currentInstruction, JITRightShiftGenerator::ShiftType snippetShiftType)
648{
649 auto bytecode = currentInstruction->as<Op>();
650 VirtualRegister result = bytecode.m_dst;
651 VirtualRegister op1 = bytecode.m_lhs;
652 VirtualRegister op2 = bytecode.m_rhs;
653
654 constexpr JSValueRegs leftRegs = jsRegT10;
655 constexpr JSValueRegs rightRegs = jsRegT32;
656 constexpr JSValueRegs resultRegs = leftRegs;
657 constexpr GPRReg scratchGPR = regT4;
658
659 SnippetOperand leftOperand;
660 SnippetOperand rightOperand;
661
662 if (isOperandConstantInt(op1))
663 leftOperand.setConstInt32(getOperandConstantInt(op1));
664 else if (isOperandConstantInt(op2))
665 rightOperand.setConstInt32(getOperandConstantInt(op2));
666
667 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
668
669 if (!leftOperand.isConst())
670 emitGetVirtualRegister(op1, leftRegs);
671 if (!rightOperand.isConst())
672 emitGetVirtualRegister(op2, rightRegs);
673
674 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, scratchGPR, snippetShiftType);
675
676 gen.generateFastPath(*this);
677
678 ASSERT(gen.didEmitFastPath());
679 gen.endJumpList().link(this);
680 emitPutVirtualRegister(result, resultRegs);
681
682 addSlowCase(gen.slowPathJumpList());
683}
684
685void JIT::emit_op_rshift(const JSInstruction* currentInstruction)
686{
687 emitRightShiftFastPath(currentInstruction, op_rshift);
688}
689
690void JIT::emit_op_urshift(const JSInstruction* currentInstruction)
691{
692 emitRightShiftFastPath(currentInstruction, op_urshift);
693}
694
695void JIT::emit_op_add(const JSInstruction* currentInstruction)
696{
697 BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpAdd>().m_profileIndex);
698 JITAddIC* addIC = m_mathICs.addJITAddIC(arithProfile);
699 m_instructionToMathIC.add(currentInstruction, addIC);
700 emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
701}
702
703void JIT::emitSlow_op_add(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
704{
705 linkAllSlowCases(iter);
706
707 JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction));
708 emitMathICSlow<OpAdd>(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize);
709}
710
711template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
712void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const JSInstruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
713{
714 auto bytecode = currentInstruction->as<Op>();
715 VirtualRegister result = bytecode.m_dst;
716 VirtualRegister operand = bytecode.m_operand;
717
718 constexpr GPRReg globalObjectGPR = preferredArgumentGPR<ProfiledFunction, 0>();
719 constexpr JSValueRegs srcRegs = preferredArgumentJSR<ProfiledFunction, 1>();
720 // ArithNegate benefits from using the same register as src and dst.
721 constexpr JSValueRegs resultRegs = srcRegs;
722 constexpr GPRReg scratchGPR = globalObjectGPR;
723 static_assert(noOverlap(srcRegs, scratchGPR));
724
725#if ENABLE(MATH_IC_STATS)
726 auto inlineStart = label();
727#endif
728
729 mathIC->m_generator = Generator(resultRegs, srcRegs, scratchGPR);
730
731 emitGetVirtualRegister(operand, srcRegs);
732
733 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, makeUniqueRef<MathICGenerationState>()).iterator->value.get();
734
735 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
736 if (!generatedInlineCode) {
737 UnaryArithProfile* arithProfile = mathIC->arithProfile();
738 loadGlobalObject(globalObjectGPR);
739 if (arithProfile && shouldEmitProfiling())
740 callOperationWithResult(profiledFunction, resultRegs, globalObjectGPR, srcRegs, TrustedImmPtr(arithProfile));
741 else
742 callOperationWithResult(nonProfiledFunction, resultRegs, globalObjectGPR, srcRegs);
743 } else
744 addSlowCase(mathICGenerationState.slowPathJumps);
745
746#if ENABLE(MATH_IC_STATS)
747 auto inlineEnd = label();
748 addLinkTask([=] (LinkBuffer& linkBuffer) {
749 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
750 mathIC->m_generatedCodeSize += size;
751 });
752#endif
753
754 emitPutVirtualRegister(result, resultRegs);
755}
756
757template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
758void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const JSInstruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
759{
760 auto bytecode = currentInstruction->as<Op>();
761 VirtualRegister result = bytecode.m_dst;
762 VirtualRegister op1 = bytecode.m_lhs;
763 VirtualRegister op2 = bytecode.m_rhs;
764
765 constexpr GPRReg globalObjectGPR = preferredArgumentGPR<ProfiledFunction, 0>();
766 constexpr JSValueRegs leftRegs = preferredArgumentJSR<ProfiledFunction, 1>();
767 constexpr JSValueRegs rightRegs = preferredArgumentJSR<ProfiledFunction, 2>();
768 constexpr JSValueRegs resultRegs = returnValueJSR;
769 constexpr GPRReg scratchGPR = regT5;
770 static_assert(noOverlap(leftRegs, rightRegs, scratchGPR));
771 static_assert(noOverlap(resultRegs, scratchGPR));
772
773 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
774 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
775
776 if (isOperandConstantInt(op1))
777 leftOperand.setConstInt32(getOperandConstantInt(op1));
778 else if (isOperandConstantInt(op2))
779 rightOperand.setConstInt32(getOperandConstantInt(op2));
780
781 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
782
783 mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, fpRegT1, scratchGPR);
784
785 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
786
787 if (!Generator::isLeftOperandValidConstant(leftOperand))
788 emitGetVirtualRegister(op1, leftRegs);
789 if (!Generator::isRightOperandValidConstant(rightOperand))
790 emitGetVirtualRegister(op2, rightRegs);
791
792#if ENABLE(MATH_IC_STATS)
793 auto inlineStart = label();
794#endif
795
796 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, makeUniqueRef<MathICGenerationState>()).iterator->value.get();
797
798 bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
799 if (!generatedInlineCode) {
800 if (leftOperand.isConst())
801 emitGetVirtualRegister(op1, leftRegs);
802 else if (rightOperand.isConst())
803 emitGetVirtualRegister(op2, rightRegs);
804 BinaryArithProfile* arithProfile = mathIC->arithProfile();
805 loadGlobalObject(globalObjectGPR);
806 if (arithProfile && shouldEmitProfiling())
807 callOperationWithResult(profiledFunction, resultRegs, globalObjectGPR, leftRegs, rightRegs, TrustedImmPtr(arithProfile));
808 else
809 callOperationWithResult(nonProfiledFunction, resultRegs, globalObjectGPR, leftRegs, rightRegs);
810 } else
811 addSlowCase(mathICGenerationState.slowPathJumps);
812
813#if ENABLE(MATH_IC_STATS)
814 auto inlineEnd = label();
815 addLinkTask([=] (LinkBuffer& linkBuffer) {
816 size_t size = linkBuffer.locationOf(inlineEnd).executableAddress<char*>() - linkBuffer.locationOf(inlineStart).executableAddress<char*>();
817 mathIC->m_generatedCodeSize += size;
818 });
819#endif
820
821 emitPutVirtualRegister(result, resultRegs);
822}
823
824template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
825void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const JSInstruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
826{
827 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value.get();
828 mathICGenerationState.slowPathStart = label();
829
830 auto bytecode = currentInstruction->as<Op>();
831 VirtualRegister result = bytecode.m_dst;
832
833 constexpr GPRReg globalObjetGPR = preferredArgumentGPR<ProfiledFunction, 0>();
834 constexpr JSValueRegs srcRegs = preferredArgumentJSR<ProfiledFunction, 1>();
835 constexpr JSValueRegs resultRegs = returnValueJSR;
836
837#if ENABLE(MATH_IC_STATS)
838 auto slowPathStart = label();
839#endif
840
841 UnaryArithProfile* arithProfile = mathIC->arithProfile();
842 loadGlobalObject(globalObjetGPR);
843 if (arithProfile && shouldEmitProfiling()) {
844 if (mathICGenerationState.shouldSlowPathRepatch)
845 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(profiledRepatchFunction), resultRegs, globalObjetGPR, srcRegs, TrustedImmPtr(mathIC));
846 else
847 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, globalObjetGPR, srcRegs, TrustedImmPtr(arithProfile));
848 } else
849 mathICGenerationState.slowPathCall = callOperationWithResult(reinterpret_cast<J_JITOperation_GJMic>(repatchFunction), resultRegs, globalObjetGPR, srcRegs, TrustedImmPtr(mathIC));
850
851#if ENABLE(MATH_IC_STATS)
852 auto slowPathEnd = label();
853 addLinkTask([=] (LinkBuffer& linkBuffer) {
854 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
855 mathIC->m_generatedCodeSize += size;
856 });
857#endif
858
859 emitPutVirtualRegister(result, resultRegs);
860
861 addLinkTask([=, this] (LinkBuffer& linkBuffer) {
862 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value.get();
863 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
864 });
865}
866
867template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
868void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const JSInstruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
869{
870 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value.get();
871 mathICGenerationState.slowPathStart = label();
872
873 auto bytecode = currentInstruction->as<Op>();
874 VirtualRegister result = bytecode.m_dst;
875 VirtualRegister op1 = bytecode.m_lhs;
876 VirtualRegister op2 = bytecode.m_rhs;
877
878 constexpr GPRReg globalObjetGPR = preferredArgumentGPR<ProfiledFunction, 0>();
879 constexpr JSValueRegs leftRegs = preferredArgumentJSR<ProfiledFunction, 1>();
880 constexpr JSValueRegs rightRegs = preferredArgumentJSR<ProfiledFunction, 2>();
881 constexpr JSValueRegs resultRegs = returnValueJSR;
882
883 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
884 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
885
886 if (isOperandConstantInt(op1))
887 leftOperand.setConstInt32(getOperandConstantInt(op1));
888 else if (isOperandConstantInt(op2))
889 rightOperand.setConstInt32(getOperandConstantInt(op2));
890
891 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
892
893 if (Generator::isLeftOperandValidConstant(leftOperand))
894 emitGetVirtualRegister(op1, leftRegs);
895 else if (Generator::isRightOperandValidConstant(rightOperand))
896 emitGetVirtualRegister(op2, rightRegs);
897
898#if ENABLE(MATH_IC_STATS)
899 auto slowPathStart = label();
900#endif
901
902 BinaryArithProfile* arithProfile = mathIC->arithProfile();
903 loadGlobalObject(globalObjetGPR);
904 if (arithProfile && shouldEmitProfiling()) {
905 if (mathICGenerationState.shouldSlowPathRepatch)
906 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(profiledRepatchFunction), resultRegs, globalObjetGPR, leftRegs, rightRegs, TrustedImmPtr(mathIC));
907 else
908 mathICGenerationState.slowPathCall = callOperationWithResult(profiledFunction, resultRegs, globalObjetGPR, leftRegs, rightRegs, TrustedImmPtr(arithProfile));
909 } else
910 mathICGenerationState.slowPathCall = callOperationWithResult(bitwise_cast<J_JITOperation_GJJMic>(repatchFunction), resultRegs, globalObjetGPR, leftRegs, rightRegs, TrustedImmPtr(mathIC));
911
912#if ENABLE(MATH_IC_STATS)
913 auto slowPathEnd = label();
914 addLinkTask([=] (LinkBuffer& linkBuffer) {
915 size_t size = linkBuffer.locationOf(slowPathEnd).executableAddress<char*>() - linkBuffer.locationOf(slowPathStart).executableAddress<char*>();
916 mathIC->m_generatedCodeSize += size;
917 });
918#endif
919
920 emitPutVirtualRegister(result, resultRegs);
921
922 addLinkTask([=, this] (LinkBuffer& linkBuffer) {
923 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value.get();
924 mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
925 });
926}
927
928void JIT::emit_op_div(const JSInstruction* currentInstruction)
929{
930 auto bytecode = currentInstruction->as<OpDiv>();
931 VirtualRegister result = bytecode.m_dst;
932 VirtualRegister op1 = bytecode.m_lhs;
933 VirtualRegister op2 = bytecode.m_rhs;
934
935 constexpr JSValueRegs leftRegs = jsRegT10;
936 constexpr JSValueRegs rightRegs = jsRegT32;
937 constexpr JSValueRegs resultRegs = leftRegs;
938 constexpr GPRReg scratchGPR = regT4;
939 constexpr FPRReg scratchFPR = fpRegT2;
940
941 BinaryArithProfile* arithProfile = nullptr;
942 if (shouldEmitProfiling())
943 arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpDiv>().m_profileIndex);
944
945 SnippetOperand leftOperand(bytecode.m_operandTypes.first());
946 SnippetOperand rightOperand(bytecode.m_operandTypes.second());
947
948 if (isOperandConstantInt(op1))
949 leftOperand.setConstInt32(getOperandConstantInt(op1));
950#if USE(JSVALUE64)
951 else if (isOperandConstantDouble(op1))
952 leftOperand.setConstDouble(getOperandConstantDouble(op1));
953#endif
954 else if (isOperandConstantInt(op2))
955 rightOperand.setConstInt32(getOperandConstantInt(op2));
956#if USE(JSVALUE64)
957 else if (isOperandConstantDouble(op2))
958 rightOperand.setConstDouble(getOperandConstantDouble(op2));
959#endif
960
961 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
962
963 if (!leftOperand.isConst())
964 emitGetVirtualRegister(op1, leftRegs);
965 if (!rightOperand.isConst())
966 emitGetVirtualRegister(op2, rightRegs);
967
968 JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
969 fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
970
971 gen.generateFastPath(*this);
972
973 if (gen.didEmitFastPath()) {
974 gen.endJumpList().link(this);
975 emitPutVirtualRegister(result, resultRegs);
976
977 addSlowCase(gen.slowPathJumpList());
978 } else {
979 ASSERT(gen.endJumpList().empty());
980 ASSERT(gen.slowPathJumpList().empty());
981 JITSlowPathCall slowPathCall(this, slow_path_div);
982 slowPathCall.call();
983 }
984}
985
986void JIT::emit_op_mul(const JSInstruction* currentInstruction)
987{
988 BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpMul>().m_profileIndex);
989 JITMulIC* mulIC = m_mathICs.addJITMulIC(arithProfile);
990 m_instructionToMathIC.add(currentInstruction, mulIC);
991 emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
992}
993
994void JIT::emitSlow_op_mul(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
995{
996 linkAllSlowCases(iter);
997
998 JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction));
999 emitMathICSlow<OpMul>(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);
1000}
1001
1002void JIT::emit_op_sub(const JSInstruction* currentInstruction)
1003{
1004 BinaryArithProfile* arithProfile = &m_unlinkedCodeBlock->binaryArithProfile(currentInstruction->as<OpSub>().m_profileIndex);
1005 JITSubIC* subIC = m_mathICs.addJITSubIC(arithProfile);
1006 m_instructionToMathIC.add(currentInstruction, subIC);
1007 emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
1008}
1009
1010void JIT::emitSlow_op_sub(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1011{
1012 linkAllSlowCases(iter);
1013
1014 JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction));
1015 emitMathICSlow<OpSub>(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);
1016}
1017
1018} // namespace JSC
1019
1020#endif // ENABLE(JIT)
Note: See TracBrowser for help on using the repository browser.