source: webkit/trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp

Last change on this file was 294794, checked in by [email protected], 3 years ago

[JSC][ARMv7] Support callee save FPRs

Patch by Geza Lore <Geza Lore> on 2022-05-25
https://bugs.webkit.org/show_bug.cgi?id=240376

Reviewed by Yusuke Suzuki.

ARMv7 FPRs d8-d15 (also referenced as s16-s32 and q4-q7) are callee save
in the host ABI, but currently JSC is unaware of this. This does not
currently cause problems as they are not used, but will be used by the
Wasm JITs.

In preparation for the 32-bit ports of the Wasm JITs, this patch:

  • Teaches JSC about callee save FPRs on ARMv7. d8-d15 are host ABI callee save, but only d8-d14 are VM callee save, i.e.: we treat d15 as a volatile register in JIT code. This is so we can use d15 as a macro assembler scratch register.
  • Changes offlineasm and MacroAssemblerARMv7 to use d15 as the FP scratch register. We do this so we can use the full range of d0-d7 as temporary, and in particular as Wasm argument/return registers.
  • To achieve the above, we need to modify RegisterAtOffsetList as GPRs and FPRs have different sizes on JSVALUE32_64 platforms
  • Adds the ARMv7 specific registers to RegisterSet::macroScratchRegisters()
  • assembler/ARMv7Registers.h:
  • assembler/MacroAssemblerARMv7.h:
  • b3/air/AirCode.cpp:

(JSC::B3::Air::Code::calleeSaveRegisterAtOffsetList const):
(JSC::B3::Air::Code::dump const):

  • b3/testb3_7.cpp:

(testInfiniteLoopDoesntCauseBadHoisting):

  • bytecode/CodeBlock.cpp:

(JSC::CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters):
(JSC::CodeBlock::calleeSaveSpaceAsVirtualRegisters):
(JSC::roundCalleeSaveSpaceAsVirtualRegisters): Deleted.

  • bytecode/ValueRecovery.h:

(JSC::ValueRecovery::calleeSaveGPRDisplacedInJSStack):
(JSC::ValueRecovery::calleeSaveRegDisplacedInJSStack): Deleted.

  • dfg/DFGOSREntry.cpp:

(JSC::DFG::prepareOSREntry):

  • dfg/DFGOSRExitCompilerCommon.cpp:

(JSC::DFG::calleeSaveSlot):

  • ftl/FTLOSRExitCompiler.cpp:

(JSC::FTL::compileStub):

  • interpreter/Interpreter.cpp:

(JSC::UnwindFunctor::copyCalleeSavesToEntryFrameCalleeSavesBuffer const):

  • jit/AssemblyHelpers.cpp:

(JSC::AssemblyHelpers::restoreCalleeSavesFromEntryFrameCalleeSavesBuffer):
(JSC::AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBufferImpl):
(JSC::AssemblyHelpers::copyCalleeSavesToEntryFrameCalleeSavesBufferImpl):
(JSC::AssemblyHelpers::emitSave):
(JSC::AssemblyHelpers::emitRestore):
(JSC::AssemblyHelpers::emitSaveCalleeSavesFor):
(JSC::AssemblyHelpers::emitRestoreCalleeSavesFor):
(JSC::AssemblyHelpers::copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer):
(JSC::AssemblyHelpers::emitSaveOrCopyLLIntBaselineCalleeSavesFor):

  • jit/CallFrameShuffleData.cpp:

(JSC::CallFrameShuffleData::setupCalleeSaveRegisters):

  • jit/CallFrameShuffler.h:

(JSC::CallFrameShuffler::snapshot const):

  • jit/CallFrameShuffler32_64.cpp:

(JSC::CallFrameShuffler::emitDisplace):

  • jit/FPRInfo.h:
  • jit/GPRInfo.h:
  • jit/RegisterAtOffsetList.cpp:

(JSC::RegisterAtOffsetList::RegisterAtOffsetList):

  • jit/RegisterAtOffsetList.h:

(JSC::RegisterAtOffsetList::registerCount const):
(JSC::RegisterAtOffsetList::sizeOfAreaInBytes const):
(JSC::RegisterAtOffsetList::adjustOffsets):
(JSC::RegisterAtOffsetList::size const): Deleted.
(JSC::RegisterAtOffsetList::at): Deleted.

  • jit/RegisterSet.cpp:

(JSC::RegisterSet::macroScratchRegisters):
(JSC::RegisterSet::vmCalleeSaveRegisters):

  • llint/LowLevelInterpreter.asm:
  • offlineasm/arm.rb:
  • wasm/js/JSToWasm.cpp:

(JSC::Wasm::createJSToWasmWrapper):

  • wasm/js/WasmToJS.cpp:

(JSC::Wasm::wasmToJS):

  • wasm/js/WebAssemblyFunction.cpp:

(JSC::WebAssemblyFunction::jsCallEntrypointSlow):

Canonical link: https://commits.webkit.org/250952@main

  • Property svn:eol-style set to native
File size: 19.4 KB
Line 
1/*
2 * Copyright (C) 2011-2019 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGOSREntry.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "BytecodeStructs.h"
32#include "CallFrame.h"
33#include "CodeBlock.h"
34#include "DFGJITCode.h"
35#include "DFGNode.h"
36#include "JSCJSValueInlines.h"
37#include "RegisterAtOffsetList.h"
38#include "VMInlines.h"
39#include <wtf/CommaPrinter.h>
40
41namespace JSC { namespace DFG {
42
43void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const
44{
45 out.print(m_bytecodeIndex, ", machine code = ", RawPointer(m_machineCode.executableAddress()));
46 out.print(", stack rules = [");
47
48 auto printOperand = [&] (VirtualRegister reg) {
49 out.print(inContext(m_expectedValues.operand(reg), context), " (");
50 VirtualRegister toReg;
51 bool overwritten = false;
52 for (OSREntryReshuffling reshuffling : m_reshufflings) {
53 if (reg == VirtualRegister(reshuffling.fromOffset)) {
54 toReg = VirtualRegister(reshuffling.toOffset);
55 break;
56 }
57 if (reg == VirtualRegister(reshuffling.toOffset))
58 overwritten = true;
59 }
60 if (!overwritten && !toReg.isValid())
61 toReg = reg;
62 if (toReg.isValid()) {
63 if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal()))
64 out.print("ignored");
65 else
66 out.print("maps to ", toReg);
67 } else
68 out.print("overwritten");
69 if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal()))
70 out.print(", forced double");
71 if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal()))
72 out.print(", forced machine int");
73 out.print(")");
74 };
75
76 CommaPrinter comma;
77 for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) {
78 out.print(comma, "arg", argumentIndex, ":");
79 printOperand(virtualRegisterForArgumentIncludingThis(argumentIndex));
80 }
81 for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) {
82 out.print(comma, "loc", localIndex, ":");
83 printOperand(virtualRegisterForLocal(localIndex));
84 }
85
86 out.print("], machine stack used = ", m_machineStackUsed);
87}
88
89void OSREntryData::dump(PrintStream& out) const
90{
91 dumpInContext(out, nullptr);
92}
93
94SUPPRESS_ASAN
95void* prepareOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* codeBlock, BytecodeIndex bytecodeIndex)
96{
97 ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
98 ASSERT(codeBlock->alternative());
99 ASSERT(codeBlock->alternative()->jitType() == JITType::BaselineJIT);
100 ASSERT(codeBlock->jitCode()->dfgCommon()->isStillValid());
101 ASSERT(!codeBlock->isJettisoned());
102
103 if (!Options::useOSREntryToDFG())
104 return nullptr;
105
106 dataLogLnIf(Options::verboseOSR(),
107 "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
108 " from ", bytecodeIndex);
109
110 sanitizeStackForVM(vm);
111
112 if (bytecodeIndex)
113 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true);
114
115 if (codeBlock->jitType() != JITType::DFGJIT) {
116 RELEASE_ASSERT(codeBlock->jitType() == JITType::FTLJIT);
117
118 // When will this happen? We could have:
119 //
120 // - An exit from the FTL JIT into the baseline JIT followed by an attempt
121 // to reenter. We're fine with allowing this to fail. If it happens
122 // enough we'll just reoptimize. It basically means that the OSR exit cost
123 // us dearly and so reoptimizing is the right thing to do.
124 //
125 // - We have recursive code with hot loops. Consider that foo has a hot loop
126 // that calls itself. We have two foo's on the stack, lets call them foo1
127 // and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
128 // optimized all the way into the FTL. Then it returns into foo1, and then
129 // foo1 wants to get optimized. It might reach this conclusion from its
130 // hot loop and attempt to OSR enter. And we'll tell it that it can't. It
131 // might be worth addressing this case, but I just think this case will
132 // be super rare. For now, if it does happen, it'll cause some compilation
133 // thrashing.
134
135 dataLogLnIf(Options::verboseOSR(), " OSR failed because the target code block is not DFG.");
136 return nullptr;
137 }
138
139 JITCode* jitCode = codeBlock->jitCode()->dfg();
140 OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
141
142 if (!entry) {
143 dataLogLnIf(Options::verboseOSR(), " OSR failed because the entrypoint was optimized out.");
144 return nullptr;
145 }
146
147 ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
148
149 // The code below checks if it is safe to perform OSR entry. It may find
150 // that it is unsafe to do so, for any number of reasons, which are documented
151 // below. If the code decides not to OSR then it returns 0, and it's the caller's
152 // responsibility to patch up the state in such a way as to ensure that it's
153 // both safe and efficient to continue executing baseline code for now. This
154 // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
155 // or codeBlock->dontOptimizeAnytimeSoon().
156
157 // 1) Verify predictions. If the predictions are inconsistent with the actual
158 // values, then OSR entry is not possible at this time. It's tempting to
159 // assume that we could somehow avoid this case. We can certainly avoid it
160 // for first-time loop OSR - that is, OSR into a CodeBlock that we have just
161 // compiled. Then we are almost guaranteed that all of the predictions will
162 // check out. It would be pretty easy to make that a hard guarantee. But
163 // then there would still be the case where two call frames with the same
164 // baseline CodeBlock are on the stack at the same time. The top one
165 // triggers compilation and OSR. In that case, we may no longer have
166 // accurate value profiles for the one deeper in the stack. Hence, when we
167 // pop into the CodeBlock that is deeper on the stack, we might OSR and
168 // realize that the predictions are wrong. Probably, in most cases, this is
169 // just an anomaly in the sense that the older CodeBlock simply went off
170 // into a less-likely path. So, the wisest course of action is to simply not
171 // OSR at this time.
172
173 for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
174 JSValue value;
175 if (!argument)
176 value = callFrame->thisValue();
177 else
178 value = callFrame->argument(argument - 1);
179
180 if (!entry->m_expectedValues.argument(argument).validateOSREntryValue(value, FlushedJSValue)) {
181 dataLogLnIf(Options::verboseOSR(),
182 " OSR failed because argument ", argument, " is ", value,
183 ", expected ", entry->m_expectedValues.argument(argument));
184 return nullptr;
185 }
186 }
187
188 for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
189 int localOffset = virtualRegisterForLocal(local).offset();
190 JSValue value = callFrame->registers()[localOffset].asanUnsafeJSValue();
191 FlushFormat format = FlushedJSValue;
192
193 if (entry->m_localsForcedAnyInt.get(local)) {
194 if (!value.isAnyInt()) {
195 dataLogLnIf(Options::verboseOSR(),
196 " OSR failed because variable ", localOffset, " is ",
197 value, ", expected ",
198 "machine int.");
199 return nullptr;
200 }
201 value = jsDoubleNumber(value.asAnyInt());
202 format = FlushedInt52;
203 }
204
205 if (entry->m_localsForcedDouble.get(local)) {
206 if (!value.isNumber()) {
207 dataLogLnIf(Options::verboseOSR(),
208 " OSR failed because variable ", localOffset, " is ",
209 value, ", expected number.");
210 return nullptr;
211 }
212 value = jsDoubleNumber(value.asNumber());
213 format = FlushedDouble;
214 }
215
216 if (!entry->m_expectedValues.local(local).validateOSREntryValue(value, format)) {
217 dataLogLnIf(Options::verboseOSR(),
218 " OSR failed because variable ", VirtualRegister(localOffset), " is ",
219 value, ", expected ",
220 entry->m_expectedValues.local(local), ".");
221 return nullptr;
222 }
223 }
224
225 // 2) Check the stack height. The DFG JIT may require a taller stack than the
226 // baseline JIT, in some cases. If we can't grow the stack, then don't do
227 // OSR right now. That's the only option we have unless we want basic block
228 // boundaries to start throwing RangeErrors. Although that would be possible,
229 // it seems silly: you'd be diverting the program to error handling when it
230 // would have otherwise just kept running albeit less quickly.
231
232 unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
233 if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()]))) {
234 dataLogLnIf(Options::verboseOSR(), " OSR failed because stack growth failed.");
235 return nullptr;
236 }
237
238 dataLogLnIf(Options::verboseOSR(), " OSR should succeed.");
239
240 // At this point we're committed to entering. We will do some work to set things up,
241 // but we also rely on our caller recognizing that when we return a non-null pointer,
242 // that means that we're already past the point of no return and we must succeed at
243 // entering.
244
245 // 3) Set up the data in the scratch buffer and perform data format conversions.
246
247 unsigned frameSize = jitCode->common.frameRegisterCount;
248 unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
249 unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);
250
251 Register* scratch = bitwise_cast<Register*>(vm.scratchBufferForSize(sizeof(Register) * (2 + CallFrame::headerSizeInRegisters + maxFrameSize))->dataBuffer());
252
253 *bitwise_cast<size_t*>(scratch + 0) = frameSize;
254
255 void* targetPC = entry->m_machineCode.executableAddress();
256 RELEASE_ASSERT(codeBlock->jitCode()->contains(entry->m_machineCode.untaggedExecutableAddress()));
257 dataLogLnIf(Options::verboseOSR(), " OSR using target PC ", RawPointer(targetPC));
258 RELEASE_ASSERT(targetPC);
259 *bitwise_cast<void**>(scratch + 1) = tagCodePtrWithStackPointerForJITCall(untagCodePtr<OSREntryPtrTag>(targetPC), callFrame);
260
261 Register* pivot = scratch + 2 + CallFrame::headerSizeInRegisters;
262
263 for (int index = -CallFrame::headerSizeInRegisters; index < static_cast<int>(baselineFrameSize); ++index) {
264 VirtualRegister reg(-1 - index);
265
266 if (reg.isLocal()) {
267 if (entry->m_localsForcedDouble.get(reg.toLocal())) {
268 *bitwise_cast<double*>(pivot + index) = callFrame->registers()[reg.offset()].asanUnsafeJSValue().asNumber();
269 continue;
270 }
271
272 if (entry->m_localsForcedAnyInt.get(reg.toLocal())) {
273 *bitwise_cast<int64_t*>(pivot + index) = callFrame->registers()[reg.offset()].asanUnsafeJSValue().asAnyInt() << JSValue::int52ShiftAmount;
274 continue;
275 }
276 }
277
278 pivot[index] = callFrame->registers()[reg.offset()].asanUnsafeJSValue();
279 }
280
281 // 4) Reshuffle those registers that need reshuffling.
282 Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
283 for (unsigned i = entry->m_reshufflings.size(); i--;)
284 temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].asanUnsafeJSValue();
285 for (unsigned i = entry->m_reshufflings.size(); i--;)
286 pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
287
288 // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
289 // some programs by eliminating some stale pointer pathologies.
290 for (unsigned i = frameSize; i--;) {
291 if (entry->m_machineStackUsed.get(i))
292 continue;
293 pivot[i] = JSValue();
294 }
295
296 // 6) Copy our callee saves to buffer.
297#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
298 const RegisterAtOffsetList* registerSaveLocations = codeBlock->jitCode()->calleeSaveRegisters();
299 RegisterAtOffsetList* allCalleeSaves = RegisterSet::vmCalleeSaveRegisterOffsets();
300 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters());
301
302 unsigned registerCount = registerSaveLocations->registerCount();
303 VMEntryRecord* record = vmEntryRecord(vm.topEntryFrame);
304 for (unsigned i = 0; i < registerCount; i++) {
305 RegisterAtOffset currentEntry = registerSaveLocations->at(i);
306 if (dontSaveRegisters.get(currentEntry.reg()))
307 continue;
308 RELEASE_ASSERT(currentEntry.reg().isGPR());
309 RegisterAtOffset* calleeSavesEntry = allCalleeSaves->find(currentEntry.reg());
310
311 if constexpr (CallerFrameAndPC::sizeInRegisters == 2)
312 *(bitwise_cast<intptr_t*>(pivot - 1) - currentEntry.offsetAsIndex()) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
313 else {
314 // We need to adjust 4-bytes on 32-bits, otherwise we will clobber some parts of
315 // pivot[-1] when currentEntry.offsetAsIndex() returns -1. This region contains
316 // CallerFrameAndPC and if it is cloberred, we will have a corrupted stack.
317 // Also, we need to store callee-save registers swapped in pairs on scratch buffer,
318 // otherwise they will be swapped when copied to call frame during OSR Entry code.
319 // Here is how we would like to have the buffer configured:
320 //
321 // pivot[-4] = ArgumentCountIncludingThis
322 // pivot[-3] = Callee
323 // pivot[-2] = CodeBlock
324 // pivot[-1] = CallerFrameAndReturnPC
325 // pivot[0] = csr1/csr0
326 // pivot[1] = csr3/csr2
327 // ...
328 ASSERT(sizeof(intptr_t) == 4);
329 ASSERT(CallerFrameAndPC::sizeInRegisters == 1);
330 ASSERT(currentEntry.offsetAsIndex() < 0);
331
332 int offsetAsIndex = currentEntry.offsetAsIndex();
333 int properIndex = offsetAsIndex % 2 ? offsetAsIndex - 1 : offsetAsIndex + 1;
334 *(bitwise_cast<intptr_t*>(pivot - 1) + 1 - properIndex) = record->calleeSaveRegistersBuffer[calleeSavesEntry->offsetAsIndex()];
335 }
336 }
337#endif
338
339 // 7) Fix the call frame to have the right code block.
340
341 *bitwise_cast<CodeBlock**>(pivot - (CallFrameSlot::codeBlock + 1)) = codeBlock;
342
343 dataLogLnIf(Options::verboseOSR(), " OSR returning data buffer ", RawPointer(scratch));
344 return scratch;
345}
346
347MacroAssemblerCodePtr<ExceptionHandlerPtrTag> prepareCatchOSREntry(VM& vm, CallFrame* callFrame, CodeBlock* baselineCodeBlock, CodeBlock* optimizedCodeBlock, BytecodeIndex bytecodeIndex)
348{
349 ASSERT(optimizedCodeBlock->jitType() == JITType::DFGJIT || optimizedCodeBlock->jitType() == JITType::FTLJIT);
350 ASSERT(optimizedCodeBlock->jitCode()->dfgCommon()->isStillValid());
351 ASSERT(!optimizedCodeBlock->isJettisoned());
352
353 if (!Options::useOSREntryToDFG() && optimizedCodeBlock->jitCode()->jitType() == JITType::DFGJIT)
354 return nullptr;
355 if (!Options::useOSREntryToFTL() && optimizedCodeBlock->jitCode()->jitType() == JITType::FTLJIT)
356 return nullptr;
357
358 CommonData* dfgCommon = optimizedCodeBlock->jitCode()->dfgCommon();
359 RELEASE_ASSERT(dfgCommon);
360 DFG::CatchEntrypointData* catchEntrypoint = dfgCommon->catchOSREntryDataForBytecodeIndex(bytecodeIndex);
361 if (!catchEntrypoint) {
362 // This can be null under some circumstances. The most common is that we didn't
363 // compile this op_catch as an entrypoint since it had never executed when starting
364 // the compilation.
365 return nullptr;
366 }
367
368 // We're only allowed to OSR enter if we've proven we have compatible argument types.
369 for (unsigned argument = 0; argument < catchEntrypoint->argumentFormats.size(); ++argument) {
370 JSValue value = callFrame->uncheckedR(virtualRegisterForArgumentIncludingThis(argument)).jsValue();
371 switch (catchEntrypoint->argumentFormats[argument]) {
372 case DFG::FlushedInt32:
373 if (!value.isInt32())
374 return nullptr;
375 break;
376 case DFG::FlushedCell:
377 if (!value.isCell())
378 return nullptr;
379 break;
380 case DFG::FlushedBoolean:
381 if (!value.isBoolean())
382 return nullptr;
383 break;
384 case DFG::DeadFlush:
385 // This means the argument is not alive. Therefore, it's allowed to be any type.
386 break;
387 case DFG::FlushedJSValue:
388 // An argument is trivially a JSValue.
389 break;
390 default:
391 RELEASE_ASSERT_NOT_REACHED();
392 }
393 }
394
395 unsigned frameSizeForCheck = dfgCommon->requiredRegisterCountForExecutionAndExit();
396 if (UNLIKELY(!vm.ensureStackCapacityFor(&callFrame->registers()[virtualRegisterForLocal(frameSizeForCheck).offset()])))
397 return nullptr;
398
399 auto instruction = baselineCodeBlock->instructions().at(callFrame->bytecodeIndex());
400 ASSERT(instruction->is<OpCatch>());
401 ValueProfileAndVirtualRegisterBuffer* buffer = instruction->as<OpCatch>().metadata(baselineCodeBlock).m_buffer;
402 JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer());
403 unsigned index = 0;
404 buffer->forEach([&] (ValueProfileAndVirtualRegister& profile) {
405 if (!VirtualRegister(profile.m_operand).isLocal())
406 return;
407 dataBuffer[index] = callFrame->uncheckedR(profile.m_operand).jsValue();
408 ++index;
409 });
410
411 // The active length of catchOSREntryBuffer will be zeroed by ClearCatchLocals node.
412 dfgCommon->catchOSREntryBuffer->setActiveLength(sizeof(JSValue) * index);
413
414 // At this point, we're committed to triggering an OSR entry immediately after we return. Hence, it is safe to modify stack here.
415 callFrame->setCodeBlock(optimizedCodeBlock);
416
417 return catchEntrypoint->machineCode;
418}
419
420} } // namespace JSC::DFG
421
422#endif // ENABLE(DFG_JIT)
Note: See TracBrowser for help on using the repository browser.