1 | /*
|
---|
2 | * Copyright (C) 2008-2022 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 |
|
---|
28 | #if ENABLE(JIT)
|
---|
29 |
|
---|
30 | #include "JIT.h"
|
---|
31 |
|
---|
32 | #include "BaselineJITRegisters.h"
|
---|
33 | #include "BytecodeOperandsForCheckpoint.h"
|
---|
34 | #include "CacheableIdentifierInlines.h"
|
---|
35 | #include "CallFrameShuffler.h"
|
---|
36 | #include "CodeBlock.h"
|
---|
37 | #include "JITInlines.h"
|
---|
38 | #include "ScratchRegisterAllocator.h"
|
---|
39 | #include "SetupVarargsFrame.h"
|
---|
40 | #include "SlowPathCall.h"
|
---|
41 | #include "StackAlignment.h"
|
---|
42 | #include "ThunkGenerators.h"
|
---|
43 |
|
---|
44 | namespace JSC {
|
---|
45 |
|
---|
46 | void JIT::emit_op_ret(const JSInstruction* currentInstruction)
|
---|
47 | {
|
---|
48 | static_assert(noOverlap(returnValueJSR, callFrameRegister));
|
---|
49 |
|
---|
50 | // Return the result in returnValueGPR (returnValueGPR2/returnValueGPR on 32-bit).
|
---|
51 | auto bytecode = currentInstruction->as<OpRet>();
|
---|
52 | emitGetVirtualRegister(bytecode.m_value, returnValueJSR);
|
---|
53 | emitNakedNearJump(vm().getCTIStub(returnFromBaselineGenerator).code());
|
---|
54 | }
|
---|
55 |
|
---|
56 | MacroAssemblerCodeRef<JITThunkPtrTag> JIT::returnFromBaselineGenerator(VM&)
|
---|
57 | {
|
---|
58 | CCallHelpers jit;
|
---|
59 |
|
---|
60 | jit.checkStackPointerAlignment();
|
---|
61 | jit.emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
|
---|
62 | jit.emitFunctionEpilogue();
|
---|
63 | jit.ret();
|
---|
64 |
|
---|
65 | LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
|
---|
66 | return FINALIZE_THUNK(patchBuffer, JITThunkPtrTag, "Baseline: op_ret_handler");
|
---|
67 | }
|
---|
68 |
|
---|
69 | template<typename Op>
|
---|
70 | void JIT::emitPutCallResult(const Op& bytecode)
|
---|
71 | {
|
---|
72 | emitValueProfilingSite(bytecode, returnValueJSR);
|
---|
73 | emitPutVirtualRegister(destinationFor(bytecode, m_bytecodeIndex.checkpoint()).virtualRegister(), returnValueJSR);
|
---|
74 | }
|
---|
75 |
|
---|
76 |
|
---|
77 | template<typename Op>
|
---|
78 | std::enable_if_t<
|
---|
79 | Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs
|
---|
80 | && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments
|
---|
81 | , void>
|
---|
82 | JIT::compileSetupFrame(const Op& bytecode)
|
---|
83 | {
|
---|
84 | unsigned checkpoint = m_bytecodeIndex.checkpoint();
|
---|
85 | int argCountIncludingThis = argumentCountIncludingThisFor(bytecode, checkpoint);
|
---|
86 | int registerOffset = -static_cast<int>(stackOffsetInRegistersForCall(bytecode, checkpoint));
|
---|
87 |
|
---|
88 |
|
---|
89 | if (Op::opcodeID == op_call && shouldEmitProfiling()) {
|
---|
90 | constexpr JSValueRegs tmpJSR = returnValueJSR;
|
---|
91 | constexpr GPRReg tmpGPR = tmpJSR.payloadGPR();
|
---|
92 | emitGetVirtualRegister(VirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0)), tmpJSR);
|
---|
93 | Jump done = branchIfNotCell(tmpJSR);
|
---|
94 | load32(Address(tmpJSR.payloadGPR(), JSCell::structureIDOffset()), tmpGPR);
|
---|
95 | store32ToMetadata(tmpGPR, bytecode, Op::Metadata::offsetOfArrayProfile() + ArrayProfile::offsetOfLastSeenStructureID());
|
---|
96 | done.link(this);
|
---|
97 | }
|
---|
98 |
|
---|
99 | addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
|
---|
100 | store32(TrustedImm32(argCountIncludingThis), Address(stackPointerRegister, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
|
---|
101 | }
|
---|
102 |
|
---|
103 | template<typename Op>
|
---|
104 | std::enable_if_t<
|
---|
105 | Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs
|
---|
106 | || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments
|
---|
107 | , void>
|
---|
108 | JIT::compileSetupFrame(const Op& bytecode)
|
---|
109 | {
|
---|
110 | VirtualRegister thisValue = bytecode.m_thisValue;
|
---|
111 | VirtualRegister arguments = bytecode.m_arguments;
|
---|
112 | int firstFreeRegister = bytecode.m_firstFree.offset(); // FIXME: Why is this a virtual register if we never use it as one...
|
---|
113 | int firstVarArgOffset = bytecode.m_firstVarArg;
|
---|
114 |
|
---|
115 | {
|
---|
116 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<Z_JITOperation_GJZZ, 0>();
|
---|
117 | constexpr JSValueRegs argumentsJSR = preferredArgumentJSR<Z_JITOperation_GJZZ, 1>();
|
---|
118 |
|
---|
119 | Z_JITOperation_GJZZ sizeOperation;
|
---|
120 | if (Op::opcodeID == op_tail_call_forward_arguments)
|
---|
121 | sizeOperation = operationSizeFrameForForwardArguments;
|
---|
122 | else
|
---|
123 | sizeOperation = operationSizeFrameForVarargs;
|
---|
124 |
|
---|
125 | loadGlobalObject(globalObjectGPR);
|
---|
126 | emitGetVirtualRegister(arguments, argumentsJSR);
|
---|
127 | callOperation(sizeOperation, globalObjectGPR, argumentsJSR, -firstFreeRegister, firstVarArgOffset);
|
---|
128 | move(TrustedImm32(-firstFreeRegister), regT1);
|
---|
129 | emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
|
---|
130 | }
|
---|
131 |
|
---|
132 | #if USE(JSVALUE64)
|
---|
133 | addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
|
---|
134 | #elif USE(JSVALUE32_64)
|
---|
135 | addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
|
---|
136 | #endif
|
---|
137 |
|
---|
138 | {
|
---|
139 | emitGetVirtualRegister(arguments, jsRegT32);
|
---|
140 | F_JITOperation_GFJZZ setupOperation;
|
---|
141 | if (Op::opcodeID == op_tail_call_forward_arguments)
|
---|
142 | setupOperation = operationSetupForwardArgumentsFrame;
|
---|
143 | else
|
---|
144 | setupOperation = operationSetupVarargsFrame;
|
---|
145 | loadGlobalObject(regT4);
|
---|
146 | callOperation(setupOperation, regT4, regT1, jsRegT32, firstVarArgOffset, regT0);
|
---|
147 | move(returnValueGPR, regT5);
|
---|
148 | }
|
---|
149 |
|
---|
150 | // Profile the argument count.
|
---|
151 | load32(Address(regT5, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
|
---|
152 | materializePointerIntoMetadata(bytecode, Op::Metadata::offsetOfCallLinkInfo(), regT0);
|
---|
153 | load32(Address(regT0, CallLinkInfo::offsetOfMaxArgumentCountIncludingThis()), regT3);
|
---|
154 | Jump notBiggest = branch32(Above, regT3, regT2);
|
---|
155 | store32(regT2, Address(regT0, CallLinkInfo::offsetOfMaxArgumentCountIncludingThis()));
|
---|
156 | notBiggest.link(this);
|
---|
157 |
|
---|
158 | // Initialize 'this'.
|
---|
159 | constexpr JSValueRegs thisJSR = jsRegT10;
|
---|
160 | emitGetVirtualRegister(thisValue, thisJSR);
|
---|
161 | storeValue(thisJSR, Address(regT5, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
|
---|
162 |
|
---|
163 | addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT5, stackPointerRegister);
|
---|
164 | }
|
---|
165 |
|
---|
166 | template<typename Op>
|
---|
167 | bool JIT::compileCallEval(const Op&)
|
---|
168 | {
|
---|
169 | return false;
|
---|
170 | }
|
---|
171 |
|
---|
172 | template<>
|
---|
173 | bool JIT::compileCallEval(const OpCallEval& bytecode)
|
---|
174 | {
|
---|
175 | addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, argumentGPR1);
|
---|
176 | storePtr(callFrameRegister, Address(argumentGPR1, CallFrame::callerFrameOffset()));
|
---|
177 |
|
---|
178 | resetSP();
|
---|
179 |
|
---|
180 | move(TrustedImm32(bytecode.m_ecmaMode.value()), argumentGPR2);
|
---|
181 | loadGlobalObject(argumentGPR0);
|
---|
182 | callOperation(operationCallEval, argumentGPR0, argumentGPR1, argumentGPR2);
|
---|
183 | addSlowCase(branchIfEmpty(returnValueJSR));
|
---|
184 |
|
---|
185 | setFastPathResumePoint();
|
---|
186 | emitPutCallResult(bytecode);
|
---|
187 |
|
---|
188 | return true;
|
---|
189 | }
|
---|
190 |
|
---|
191 | void JIT::compileCallEvalSlowCase(const JSInstruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
192 | {
|
---|
193 | linkAllSlowCases(iter);
|
---|
194 |
|
---|
195 | auto bytecode = instruction->as<OpCallEval>();
|
---|
196 | int registerOffset = -bytecode.m_argv;
|
---|
197 |
|
---|
198 | addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
|
---|
199 |
|
---|
200 | static_assert(noOverlap(BaselineJITRegisters::Call::calleeJSR, BaselineJITRegisters::Call::callLinkInfoGPR, regT3));
|
---|
201 | loadValue(Address(stackPointerRegister, sizeof(Register) * CallFrameSlot::callee - sizeof(CallerFrameAndPC)), BaselineJITRegisters::Call::calleeJSR);
|
---|
202 | loadGlobalObject(regT3);
|
---|
203 | materializePointerIntoMetadata(bytecode, OpCallEval::Metadata::offsetOfCallLinkInfo(), BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
204 | emitVirtualCallWithoutMovingGlobalObject(*m_vm, BaselineJITRegisters::Call::callLinkInfoGPR, CallMode::Regular);
|
---|
205 | resetSP();
|
---|
206 | }
|
---|
207 |
|
---|
208 | template<typename Op>
|
---|
209 | bool JIT::compileTailCall(const Op&, UnlinkedCallLinkInfo*, unsigned)
|
---|
210 | {
|
---|
211 | return false;
|
---|
212 | }
|
---|
213 |
|
---|
214 | template<>
|
---|
215 | bool JIT::compileTailCall(const OpTailCall& bytecode, UnlinkedCallLinkInfo*, unsigned callLinkInfoIndex)
|
---|
216 | {
|
---|
217 | materializePointerIntoMetadata(bytecode, OpTailCall::Metadata::offsetOfCallLinkInfo(), BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
218 | JumpList slowPaths = CallLinkInfo::emitTailCallDataICFastPath(*this, BaselineJITRegisters::Call::calleeJSR.payloadGPR(), BaselineJITRegisters::Call::callLinkInfoGPR, scopedLambda<void()>([&] {
|
---|
219 | CallFrameShuffleData shuffleData = CallFrameShuffleData::createForBaselineOrLLIntTailCall(bytecode, m_unlinkedCodeBlock->numParameters());
|
---|
220 | CallFrameShuffler shuffler { *this, shuffleData };
|
---|
221 | shuffler.lockGPR(BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
222 | shuffler.prepareForTailCall();
|
---|
223 | }));
|
---|
224 | addSlowCase(slowPaths);
|
---|
225 |
|
---|
226 | auto doneLocation = label();
|
---|
227 | m_callCompilationInfo[callLinkInfoIndex].doneLocation = doneLocation;
|
---|
228 |
|
---|
229 | return true;
|
---|
230 | }
|
---|
231 |
|
---|
232 | template<typename Op>
|
---|
233 | void JIT::compileOpCall(const JSInstruction* instruction, unsigned callLinkInfoIndex)
|
---|
234 | {
|
---|
235 | OpcodeID opcodeID = Op::opcodeID;
|
---|
236 | auto bytecode = instruction->as<Op>();
|
---|
237 | VirtualRegister callee = calleeFor(bytecode, m_bytecodeIndex.checkpoint());
|
---|
238 |
|
---|
239 | /* Caller always:
|
---|
240 | - Updates callFrameRegister to callee callFrame.
|
---|
241 | - Initializes ArgumentCount; CallerFrame; Callee.
|
---|
242 |
|
---|
243 | For a JS call:
|
---|
244 | - Callee initializes ReturnPC; CodeBlock.
|
---|
245 | - Callee restores callFrameRegister before return.
|
---|
246 |
|
---|
247 | For a non-JS call:
|
---|
248 | - Caller initializes ReturnPC; CodeBlock.
|
---|
249 | - Caller restores callFrameRegister after return.
|
---|
250 | */
|
---|
251 |
|
---|
252 | UnlinkedCallLinkInfo* info = nullptr;
|
---|
253 | if (opcodeID != op_call_eval) {
|
---|
254 | info = addUnlinkedCallLinkInfo();
|
---|
255 | info->bytecodeIndex = m_bytecodeIndex;
|
---|
256 | ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
|
---|
257 | m_callCompilationInfo.append(CallCompilationInfo());
|
---|
258 | m_callCompilationInfo[callLinkInfoIndex].unlinkedCallLinkInfo = info;
|
---|
259 | }
|
---|
260 | compileSetupFrame(bytecode);
|
---|
261 |
|
---|
262 | // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
|
---|
263 | uint32_t locationBits = CallSiteIndex(m_bytecodeIndex).bits();
|
---|
264 | store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCountIncludingThis * static_cast<int>(sizeof(Register)) + TagOffset));
|
---|
265 |
|
---|
266 | emitGetVirtualRegister(callee, BaselineJITRegisters::Call::calleeJSR);
|
---|
267 | storeValue(BaselineJITRegisters::Call::calleeJSR, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
|
---|
268 |
|
---|
269 | if (compileCallEval(bytecode))
|
---|
270 | return;
|
---|
271 |
|
---|
272 | #if USE(JSVALUE32_64)
|
---|
273 | // We need this on JSVALUE32_64 only as on JSVALUE64 a pointer comparison in the DataIC fast
|
---|
274 | // path catches this.
|
---|
275 | addSlowCase(branchIfNotCell(BaselineJITRegisters::Call::calleeJSR));
|
---|
276 | #endif
|
---|
277 |
|
---|
278 | if (compileTailCall(bytecode, info, callLinkInfoIndex))
|
---|
279 | return;
|
---|
280 |
|
---|
281 | materializePointerIntoMetadata(bytecode, Op::Metadata::offsetOfCallLinkInfo(), BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
282 | if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
|
---|
283 | auto slowPaths = CallLinkInfo::emitTailCallDataICFastPath(*this, BaselineJITRegisters::Call::calleeJSR.payloadGPR(), BaselineJITRegisters::Call::callLinkInfoGPR, scopedLambda<void()>([&] {
|
---|
284 | emitRestoreCalleeSaves();
|
---|
285 | prepareForTailCallSlow(BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
286 | }));
|
---|
287 | addSlowCase(slowPaths);
|
---|
288 | auto doneLocation = label();
|
---|
289 | m_callCompilationInfo[callLinkInfoIndex].doneLocation = doneLocation;
|
---|
290 | return;
|
---|
291 | }
|
---|
292 |
|
---|
293 | auto slowPaths = CallLinkInfo::emitDataICFastPath(*this, BaselineJITRegisters::Call::calleeJSR.payloadGPR(), BaselineJITRegisters::Call::callLinkInfoGPR);
|
---|
294 | auto doneLocation = label();
|
---|
295 | addSlowCase(slowPaths);
|
---|
296 |
|
---|
297 | m_callCompilationInfo[callLinkInfoIndex].doneLocation = doneLocation;
|
---|
298 |
|
---|
299 | if constexpr (Op::opcodeID != op_iterator_open && Op::opcodeID != op_iterator_next)
|
---|
300 | setFastPathResumePoint();
|
---|
301 | resetSP();
|
---|
302 | emitPutCallResult(bytecode);
|
---|
303 | }
|
---|
304 |
|
---|
305 | template<typename Op>
|
---|
306 | void JIT::compileOpCallSlowCase(const JSInstruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned)
|
---|
307 | {
|
---|
308 | OpcodeID opcodeID = Op::opcodeID;
|
---|
309 | auto bytecode = instruction->as<Op>();
|
---|
310 | ASSERT(opcodeID != op_call_eval);
|
---|
311 |
|
---|
312 | linkAllSlowCases(iter);
|
---|
313 |
|
---|
314 | loadGlobalObject(regT3);
|
---|
315 | materializePointerIntoMetadata(bytecode, Op::Metadata::offsetOfCallLinkInfo(), regT2);
|
---|
316 |
|
---|
317 | if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
|
---|
318 | emitRestoreCalleeSaves();
|
---|
319 |
|
---|
320 | CallLinkInfo::emitDataICSlowPath(*m_vm, *this, regT2);
|
---|
321 |
|
---|
322 | if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
|
---|
323 | abortWithReason(JITDidReturnFromTailCall);
|
---|
324 | return;
|
---|
325 | }
|
---|
326 | }
|
---|
327 |
|
---|
328 | void JIT::emit_op_call(const JSInstruction* currentInstruction)
|
---|
329 | {
|
---|
330 | compileOpCall<OpCall>(currentInstruction, m_callLinkInfoIndex++);
|
---|
331 | }
|
---|
332 |
|
---|
333 | void JIT::emit_op_tail_call(const JSInstruction* currentInstruction)
|
---|
334 | {
|
---|
335 | compileOpCall<OpTailCall>(currentInstruction, m_callLinkInfoIndex++);
|
---|
336 | }
|
---|
337 |
|
---|
338 | void JIT::emit_op_call_eval(const JSInstruction* currentInstruction)
|
---|
339 | {
|
---|
340 | compileOpCall<OpCallEval>(currentInstruction, m_callLinkInfoIndex);
|
---|
341 | }
|
---|
342 |
|
---|
343 | void JIT::emit_op_call_varargs(const JSInstruction* currentInstruction)
|
---|
344 | {
|
---|
345 | compileOpCall<OpCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
|
---|
346 | }
|
---|
347 |
|
---|
348 | void JIT::emit_op_tail_call_varargs(const JSInstruction* currentInstruction)
|
---|
349 | {
|
---|
350 | compileOpCall<OpTailCallVarargs>(currentInstruction, m_callLinkInfoIndex++);
|
---|
351 | }
|
---|
352 |
|
---|
353 | void JIT::emit_op_tail_call_forward_arguments(const JSInstruction* currentInstruction)
|
---|
354 | {
|
---|
355 | compileOpCall<OpTailCallForwardArguments>(currentInstruction, m_callLinkInfoIndex++);
|
---|
356 | }
|
---|
357 |
|
---|
358 | void JIT::emit_op_construct_varargs(const JSInstruction* currentInstruction)
|
---|
359 | {
|
---|
360 | compileOpCall<OpConstructVarargs>(currentInstruction, m_callLinkInfoIndex++);
|
---|
361 | }
|
---|
362 |
|
---|
363 | void JIT::emit_op_construct(const JSInstruction* currentInstruction)
|
---|
364 | {
|
---|
365 | compileOpCall<OpConstruct>(currentInstruction, m_callLinkInfoIndex++);
|
---|
366 | }
|
---|
367 |
|
---|
368 | void JIT::emitSlow_op_call(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
369 | {
|
---|
370 | compileOpCallSlowCase<OpCall>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
371 | }
|
---|
372 |
|
---|
373 | void JIT::emitSlow_op_tail_call(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
374 | {
|
---|
375 | compileOpCallSlowCase<OpTailCall>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
376 | }
|
---|
377 |
|
---|
378 | void JIT::emitSlow_op_call_eval(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
379 | {
|
---|
380 | compileCallEvalSlowCase(currentInstruction, iter);
|
---|
381 | }
|
---|
382 |
|
---|
383 | void JIT::emitSlow_op_call_varargs(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
384 | {
|
---|
385 | compileOpCallSlowCase<OpCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
386 | }
|
---|
387 |
|
---|
388 | void JIT::emitSlow_op_tail_call_varargs(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
389 | {
|
---|
390 | compileOpCallSlowCase<OpTailCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
391 | }
|
---|
392 |
|
---|
393 | void JIT::emitSlow_op_tail_call_forward_arguments(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
394 | {
|
---|
395 | compileOpCallSlowCase<OpTailCallForwardArguments>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
396 | }
|
---|
397 |
|
---|
398 | void JIT::emitSlow_op_construct_varargs(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
399 | {
|
---|
400 | compileOpCallSlowCase<OpConstructVarargs>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
401 | }
|
---|
402 |
|
---|
403 | void JIT::emitSlow_op_construct(const JSInstruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
404 | {
|
---|
405 | compileOpCallSlowCase<OpConstruct>(currentInstruction, iter, m_callLinkInfoIndex++);
|
---|
406 | }
|
---|
407 |
|
---|
408 | void JIT::emit_op_iterator_open(const JSInstruction* instruction)
|
---|
409 | {
|
---|
410 | auto bytecode = instruction->as<OpIteratorOpen>();
|
---|
411 | auto* tryFastFunction = ([&] () {
|
---|
412 | switch (instruction->width()) {
|
---|
413 | case Narrow: return iterator_open_try_fast_narrow;
|
---|
414 | case Wide16: return iterator_open_try_fast_wide16;
|
---|
415 | case Wide32: return iterator_open_try_fast_wide32;
|
---|
416 | default: RELEASE_ASSERT_NOT_REACHED();
|
---|
417 | }
|
---|
418 | })();
|
---|
419 |
|
---|
420 | JITSlowPathCall slowPathCall(this, tryFastFunction);
|
---|
421 | slowPathCall.call();
|
---|
422 | Jump fastCase = branch32(NotEqual, GPRInfo::returnValueGPR2, TrustedImm32(static_cast<uint32_t>(IterationMode::Generic)));
|
---|
423 |
|
---|
424 | compileOpCall<OpIteratorOpen>(instruction, m_callLinkInfoIndex++);
|
---|
425 | advanceToNextCheckpoint();
|
---|
426 |
|
---|
427 | // call result (iterator) is in returnValueJSR
|
---|
428 |
|
---|
429 | emitJumpSlowCaseIfNotJSCell(returnValueJSR);
|
---|
430 |
|
---|
431 | using BaselineJITRegisters::GetById::baseJSR;
|
---|
432 | using BaselineJITRegisters::GetById::resultJSR;
|
---|
433 | using BaselineJITRegisters::GetById::FastPath::stubInfoGPR;
|
---|
434 |
|
---|
435 | static_assert(noOverlap(returnValueJSR, stubInfoGPR));
|
---|
436 | static_assert(returnValueJSR == baseJSR); // Otherwise will need move(returnValueJSR, baseJSR)
|
---|
437 | static_assert(baseJSR == resultJSR);
|
---|
438 |
|
---|
439 | const Identifier* ident = &vm().propertyNames->next;
|
---|
440 |
|
---|
441 | auto [ stubInfo, stubInfoIndex ] = addUnlinkedStructureStubInfo();
|
---|
442 | JITGetByIdGenerator gen(
|
---|
443 | nullptr, stubInfo, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
|
---|
444 | CacheableIdentifier::createFromImmortalIdentifier(ident->impl()), baseJSR, resultJSR, stubInfoGPR, AccessType::GetById);
|
---|
445 | gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
|
---|
446 |
|
---|
447 | gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
|
---|
448 | resetSP(); // We might OSR exit here, so we need to conservatively reset SP
|
---|
449 | addSlowCase();
|
---|
450 | m_getByIds.append(gen);
|
---|
451 |
|
---|
452 | emitValueProfilingSite(bytecode, resultJSR);
|
---|
453 | emitPutVirtualRegister(bytecode.m_next, resultJSR);
|
---|
454 |
|
---|
455 | fastCase.link(this);
|
---|
456 | }
|
---|
457 |
|
---|
458 | void JIT::emitSlow_op_iterator_open(const JSInstruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
459 | {
|
---|
460 | auto bytecode = instruction->as<OpIteratorOpen>();
|
---|
461 |
|
---|
462 | linkAllSlowCases(iter);
|
---|
463 | compileOpCallSlowCase<OpIteratorOpen>(instruction, iter, m_callLinkInfoIndex++);
|
---|
464 | resetSP();
|
---|
465 | emitPutCallResult(bytecode);
|
---|
466 | emitJumpSlowToHotForCheckpoint(jump());
|
---|
467 |
|
---|
468 | linkAllSlowCases(iter);
|
---|
469 | JSValueRegs iteratorJSR = BaselineJITRegisters::GetById::baseJSR;
|
---|
470 | JumpList notObject;
|
---|
471 | notObject.append(branchIfNotCell(iteratorJSR));
|
---|
472 | notObject.append(branchIfNotObject(iteratorJSR.payloadGPR()));
|
---|
473 |
|
---|
474 | VirtualRegister nextVReg = bytecode.m_next;
|
---|
475 | UniquedStringImpl* ident = vm().propertyNames->next.impl();
|
---|
476 |
|
---|
477 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
|
---|
478 |
|
---|
479 | Label coldPathBegin = label();
|
---|
480 |
|
---|
481 | using SlowOperation = decltype(operationGetByIdOptimize);
|
---|
482 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
483 | constexpr GPRReg stubInfoGPR = preferredArgumentGPR<SlowOperation, 1>();
|
---|
484 | constexpr JSValueRegs arg2JSR = preferredArgumentJSR<SlowOperation, 2>();
|
---|
485 |
|
---|
486 | moveValueRegs(iteratorJSR, arg2JSR);
|
---|
487 | loadGlobalObject(globalObjectGPR);
|
---|
488 | loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
|
---|
489 | callOperationWithProfile<SlowOperation>(
|
---|
490 | bytecode,
|
---|
491 | Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()),
|
---|
492 | nextVReg,
|
---|
493 | globalObjectGPR, stubInfoGPR, arg2JSR,
|
---|
494 | CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
|
---|
495 | gen.reportSlowPathCall(coldPathBegin, Call());
|
---|
496 |
|
---|
497 | auto done = jump();
|
---|
498 |
|
---|
499 | notObject.link(this);
|
---|
500 | loadGlobalObject(argumentGPR0);
|
---|
501 | callOperation(operationThrowIteratorResultIsNotObject, argumentGPR0);
|
---|
502 |
|
---|
503 | done.link(this);
|
---|
504 | }
|
---|
505 |
|
---|
506 | void JIT::emit_op_iterator_next(const JSInstruction* instruction)
|
---|
507 | {
|
---|
508 | auto bytecode = instruction->as<OpIteratorNext>();
|
---|
509 | auto* tryFastFunction = ([&] () {
|
---|
510 | switch (instruction->width()) {
|
---|
511 | case Narrow: return iterator_next_try_fast_narrow;
|
---|
512 | case Wide16: return iterator_next_try_fast_wide16;
|
---|
513 | case Wide32: return iterator_next_try_fast_wide32;
|
---|
514 | default: RELEASE_ASSERT_NOT_REACHED();
|
---|
515 | }
|
---|
516 | })();
|
---|
517 |
|
---|
518 | using BaselineJITRegisters::GetById::baseJSR;
|
---|
519 | using BaselineJITRegisters::GetById::resultJSR;
|
---|
520 | using BaselineJITRegisters::GetById::FastPath::dontClobberJSR;
|
---|
521 | using BaselineJITRegisters::GetById::FastPath::stubInfoGPR;
|
---|
522 |
|
---|
523 | constexpr JSValueRegs nextJSR = baseJSR; // Used as temporary register
|
---|
524 | emitGetVirtualRegister(bytecode.m_next, nextJSR);
|
---|
525 | Jump genericCase = branchIfNotEmpty(nextJSR);
|
---|
526 |
|
---|
527 | JITSlowPathCall slowPathCall(this, tryFastFunction);
|
---|
528 | slowPathCall.call();
|
---|
529 | Jump fastCase = branch32(NotEqual, GPRInfo::returnValueGPR2, TrustedImm32(static_cast<uint32_t>(IterationMode::Generic)));
|
---|
530 |
|
---|
531 | genericCase.link(this);
|
---|
532 | load8FromMetadata(bytecode, OpIteratorNext::Metadata::offsetOfIterationMetadata() + IterationModeMetadata::offsetOfSeenModes(), regT0);
|
---|
533 | or32(TrustedImm32(static_cast<uint8_t>(IterationMode::Generic)), regT0);
|
---|
534 | store8ToMetadata(regT0, bytecode, OpIteratorNext::Metadata::offsetOfIterationMetadata() + IterationModeMetadata::offsetOfSeenModes());
|
---|
535 | compileOpCall<OpIteratorNext>(instruction, m_callLinkInfoIndex++);
|
---|
536 | advanceToNextCheckpoint();
|
---|
537 |
|
---|
538 | // call result ({ done, value } JSObject) in regT0 (regT1/regT0 or 32-bit)
|
---|
539 | static_assert(noOverlap(resultJSR, stubInfoGPR));
|
---|
540 |
|
---|
541 | constexpr JSValueRegs iterCallResultJSR = dontClobberJSR;
|
---|
542 | moveValueRegs(returnValueJSR, iterCallResultJSR);
|
---|
543 |
|
---|
544 | constexpr JSValueRegs doneJSR = resultJSR;
|
---|
545 | {
|
---|
546 | emitJumpSlowCaseIfNotJSCell(returnValueJSR);
|
---|
547 |
|
---|
548 | RegisterSet preservedRegs = RegisterSet::stubUnavailableRegisters();
|
---|
549 | preservedRegs.set(iterCallResultJSR);
|
---|
550 | auto [ stubInfo, stubInfoIndex ] = addUnlinkedStructureStubInfo();
|
---|
551 | JITGetByIdGenerator gen(
|
---|
552 | nullptr, stubInfo, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), preservedRegs,
|
---|
553 | CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->done.impl()), returnValueJSR, doneJSR, stubInfoGPR, AccessType::GetById);
|
---|
554 | gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
|
---|
555 |
|
---|
556 | gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
|
---|
557 | resetSP(); // We might OSR exit here, so we need to conservatively reset SP
|
---|
558 | addSlowCase();
|
---|
559 | m_getByIds.append(gen);
|
---|
560 |
|
---|
561 | emitValueProfilingSite(bytecode, doneJSR);
|
---|
562 | emitPutVirtualRegister(bytecode.m_done, doneJSR);
|
---|
563 | advanceToNextCheckpoint();
|
---|
564 | }
|
---|
565 |
|
---|
566 | {
|
---|
567 | RegisterSet usedRegisters(doneJSR, iterCallResultJSR);
|
---|
568 | ScratchRegisterAllocator scratchAllocator(usedRegisters);
|
---|
569 | GPRReg scratch1 = scratchAllocator.allocateScratchGPR();
|
---|
570 | GPRReg scratch2 = scratchAllocator.allocateScratchGPR();
|
---|
571 | GPRReg globalGPR = scratchAllocator.allocateScratchGPR();
|
---|
572 | const bool shouldCheckMasqueradesAsUndefined = false;
|
---|
573 | loadGlobalObject(globalGPR);
|
---|
574 | JumpList iterationDone = branchIfTruthy(vm(), doneJSR, scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, globalGPR);
|
---|
575 |
|
---|
576 | moveValueRegs(iterCallResultJSR, baseJSR);
|
---|
577 |
|
---|
578 | auto [ stubInfo, stubInfoIndex ] = addUnlinkedStructureStubInfo();
|
---|
579 | JITGetByIdGenerator gen(
|
---|
580 | nullptr, stubInfo, JITType::BaselineJIT, CodeOrigin(m_bytecodeIndex), CallSiteIndex(BytecodeIndex(m_bytecodeIndex.offset())), RegisterSet::stubUnavailableRegisters(),
|
---|
581 | CacheableIdentifier::createFromImmortalIdentifier(vm().propertyNames->value.impl()), baseJSR, resultJSR, stubInfoGPR, AccessType::GetById);
|
---|
582 | gen.m_unlinkedStubInfoConstantIndex = stubInfoIndex;
|
---|
583 |
|
---|
584 | gen.generateBaselineDataICFastPath(*this, stubInfoIndex, stubInfoGPR);
|
---|
585 | resetSP(); // We might OSR exit here, so we need to conservatively reset SP
|
---|
586 | addSlowCase();
|
---|
587 | m_getByIds.append(gen);
|
---|
588 |
|
---|
589 | emitValueProfilingSite(bytecode, resultJSR);
|
---|
590 | emitPutVirtualRegister(bytecode.m_value, resultJSR);
|
---|
591 |
|
---|
592 | iterationDone.link(this);
|
---|
593 | }
|
---|
594 |
|
---|
595 | fastCase.link(this);
|
---|
596 | }
|
---|
597 |
|
---|
598 | void JIT::emitSlow_op_iterator_next(const JSInstruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
|
---|
599 | {
|
---|
600 | auto bytecode = instruction->as<OpIteratorNext>();
|
---|
601 |
|
---|
602 | linkAllSlowCases(iter);
|
---|
603 | compileOpCallSlowCase<OpIteratorNext>(instruction, iter, m_callLinkInfoIndex++);
|
---|
604 | resetSP();
|
---|
605 | emitPutCallResult(bytecode);
|
---|
606 | emitJumpSlowToHotForCheckpoint(jump());
|
---|
607 |
|
---|
608 | using BaselineJITRegisters::GetById::resultJSR;
|
---|
609 | using BaselineJITRegisters::GetById::FastPath::dontClobberJSR;
|
---|
610 |
|
---|
611 | constexpr JSValueRegs iterCallResultJSR = dontClobberJSR;
|
---|
612 |
|
---|
613 | {
|
---|
614 | VirtualRegister doneVReg = bytecode.m_done;
|
---|
615 |
|
---|
616 | linkAllSlowCases(iter);
|
---|
617 | JumpList notObject;
|
---|
618 | notObject.append(branchIfNotCell(iterCallResultJSR));
|
---|
619 |
|
---|
620 | UniquedStringImpl* ident = vm().propertyNames->done.impl();
|
---|
621 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
|
---|
622 |
|
---|
623 | Label coldPathBegin = label();
|
---|
624 |
|
---|
625 | notObject.append(branchIfNotObject(iterCallResultJSR.payloadGPR()));
|
---|
626 |
|
---|
627 | using SlowOperation = decltype(operationGetByIdOptimize);
|
---|
628 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
629 | constexpr GPRReg stubInfoGPR = preferredArgumentGPR<SlowOperation, 1>();
|
---|
630 | constexpr JSValueRegs arg2JSR = preferredArgumentJSR<SlowOperation, 2>();
|
---|
631 |
|
---|
632 | moveValueRegs(iterCallResultJSR, arg2JSR);
|
---|
633 | loadGlobalObject(globalObjectGPR);
|
---|
634 | loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
|
---|
635 | callOperationWithProfile<SlowOperation>(
|
---|
636 | bytecode,
|
---|
637 | Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()),
|
---|
638 | doneVReg,
|
---|
639 | globalObjectGPR, stubInfoGPR, arg2JSR,
|
---|
640 | CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
|
---|
641 | gen.reportSlowPathCall(coldPathBegin, Call());
|
---|
642 |
|
---|
643 | constexpr JSValueRegs doneJSR = resultJSR;
|
---|
644 | emitGetVirtualRegister(doneVReg, doneJSR);
|
---|
645 | emitGetVirtualRegister(bytecode.m_value, iterCallResultJSR);
|
---|
646 | emitJumpSlowToHotForCheckpoint(jump());
|
---|
647 |
|
---|
648 | notObject.link(this);
|
---|
649 | loadGlobalObject(argumentGPR0);
|
---|
650 | callOperation(operationThrowIteratorResultIsNotObject, argumentGPR0);
|
---|
651 | }
|
---|
652 |
|
---|
653 | {
|
---|
654 | linkAllSlowCases(iter);
|
---|
655 | VirtualRegister valueVReg = bytecode.m_value;
|
---|
656 |
|
---|
657 | UniquedStringImpl* ident = vm().propertyNames->value.impl();
|
---|
658 | JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
|
---|
659 |
|
---|
660 | Label coldPathBegin = label();
|
---|
661 |
|
---|
662 | using SlowOperation = decltype(operationGetByIdOptimize);
|
---|
663 | constexpr GPRReg globalObjectGPR = preferredArgumentGPR<SlowOperation, 0>();
|
---|
664 | constexpr GPRReg stubInfoGPR = preferredArgumentGPR<SlowOperation, 1>();
|
---|
665 | constexpr JSValueRegs arg2JSR = preferredArgumentJSR<SlowOperation, 2>();
|
---|
666 |
|
---|
667 | moveValueRegs(iterCallResultJSR, arg2JSR);
|
---|
668 | loadGlobalObject(globalObjectGPR);
|
---|
669 | loadConstant(gen.m_unlinkedStubInfoConstantIndex, stubInfoGPR);
|
---|
670 | callOperationWithProfile<decltype(operationGetByIdOptimize)>(
|
---|
671 | bytecode,
|
---|
672 | Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()),
|
---|
673 | valueVReg,
|
---|
674 | globalObjectGPR, stubInfoGPR, arg2JSR,
|
---|
675 | CacheableIdentifier::createFromImmortalIdentifier(ident).rawBits());
|
---|
676 | gen.reportSlowPathCall(coldPathBegin, Call());
|
---|
677 | }
|
---|
678 | }
|
---|
679 |
|
---|
680 | } // namespace JSC
|
---|
681 |
|
---|
682 | #endif // ENABLE(JIT)
|
---|