1 | /*
|
---|
2 | * Copyright (C) 2011-2022 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "DFGJITCompiler.h"
|
---|
28 |
|
---|
29 | #if ENABLE(DFG_JIT)
|
---|
30 |
|
---|
31 | #include "CodeBlock.h"
|
---|
32 | #include "CodeBlockWithJITType.h"
|
---|
33 | #include "DFGFailedFinalizer.h"
|
---|
34 | #include "DFGInlineCacheWrapperInlines.h"
|
---|
35 | #include "DFGJITCode.h"
|
---|
36 | #include "DFGJITFinalizer.h"
|
---|
37 | #include "DFGOSRExit.h"
|
---|
38 | #include "DFGSpeculativeJIT.h"
|
---|
39 | #include "DFGThunks.h"
|
---|
40 | #include "JSCJSValueInlines.h"
|
---|
41 | #include "LinkBuffer.h"
|
---|
42 | #include "MaxFrameExtentForSlowPathCall.h"
|
---|
43 | #include "ProbeContext.h"
|
---|
44 | #include "ThunkGenerators.h"
|
---|
45 | #include "VM.h"
|
---|
46 |
|
---|
47 | namespace JSC { namespace DFG {
|
---|
48 |
|
---|
49 | JITCompiler::JITCompiler(Graph& dfg)
|
---|
50 | : CCallHelpers(dfg.m_codeBlock)
|
---|
51 | , m_graph(dfg)
|
---|
52 | , m_jitCode(adoptRef(new JITCode(m_graph.m_plan.isUnlinked())))
|
---|
53 | , m_blockHeads(dfg.numBlocks())
|
---|
54 | , m_pcToCodeOriginMapBuilder(dfg.m_vm)
|
---|
55 | {
|
---|
56 | if (UNLIKELY(shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler))
|
---|
57 | m_disassembler = makeUnique<Disassembler>(dfg);
|
---|
58 | #if ENABLE(FTL_JIT)
|
---|
59 | m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy());
|
---|
60 | for (BytecodeIndex tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes())
|
---|
61 | m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
|
---|
62 | #endif
|
---|
63 | }
|
---|
64 |
|
---|
65 | JITCompiler::~JITCompiler()
|
---|
66 | {
|
---|
67 | }
|
---|
68 |
|
---|
69 | void JITCompiler::linkOSRExits()
|
---|
70 | {
|
---|
71 | ASSERT(m_osrExit.size() == m_exitCompilationInfo.size());
|
---|
72 | if (UNLIKELY(m_graph.compilation())) {
|
---|
73 | for (unsigned i = 0; i < m_osrExit.size(); ++i) {
|
---|
74 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
|
---|
75 | Vector<Label> labels;
|
---|
76 |
|
---|
77 | auto appendLabel = [&] (Label label) {
|
---|
78 | RELEASE_ASSERT(label.isSet());
|
---|
79 | labels.append(label);
|
---|
80 | };
|
---|
81 |
|
---|
82 | if (!info.m_failureJumps.empty()) {
|
---|
83 | for (unsigned j = 0; j < info.m_failureJumps.jumps().size(); ++j)
|
---|
84 | appendLabel(info.m_failureJumps.jumps()[j].label());
|
---|
85 | } else if (info.m_replacementSource.isSet())
|
---|
86 | appendLabel(info.m_replacementSource);
|
---|
87 | m_exitSiteLabels.append(labels);
|
---|
88 | }
|
---|
89 | }
|
---|
90 |
|
---|
91 | JumpList dispatchCases;
|
---|
92 | JumpList dispatchCasesWithoutLinkedFailures;
|
---|
93 | for (unsigned i = 0; i < m_osrExit.size(); ++i) {
|
---|
94 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
|
---|
95 | JumpList& failureJumps = info.m_failureJumps;
|
---|
96 | if (!failureJumps.empty())
|
---|
97 | failureJumps.link(this);
|
---|
98 | else
|
---|
99 | info.m_replacementDestination = label();
|
---|
100 |
|
---|
101 | jitAssertHasValidCallFrame();
|
---|
102 | #if USE(JSVALUE64)
|
---|
103 | if (m_graph.m_plan.isUnlinked()) {
|
---|
104 | move(TrustedImm32(i), GPRInfo::numberTagRegister);
|
---|
105 | if (info.m_replacementDestination.isSet())
|
---|
106 | dispatchCasesWithoutLinkedFailures.append(jump());
|
---|
107 | else
|
---|
108 | dispatchCases.append(jump());
|
---|
109 | continue;
|
---|
110 | }
|
---|
111 | #endif
|
---|
112 | UNUSED_VARIABLE(dispatchCases);
|
---|
113 | UNUSED_VARIABLE(dispatchCasesWithoutLinkedFailures);
|
---|
114 | store32(TrustedImm32(i), &vm().osrExitIndex);
|
---|
115 | info.m_patchableJump = patchableJump();
|
---|
116 | }
|
---|
117 |
|
---|
118 | #if USE(JSVALUE64)
|
---|
119 | if (m_graph.m_plan.isUnlinked()) {
|
---|
120 | // When jumping to OSR exit handler via exception, we do not have proper callFrameRegister and constantsRegister.
|
---|
121 | // We should reload appropriate callFrameRegister from VM::callFrameForCatch to materialize constants buffer register.
|
---|
122 | // FIXME: The following code can be a DFG Thunk.
|
---|
123 | if (!dispatchCasesWithoutLinkedFailures.empty()) {
|
---|
124 | dispatchCasesWithoutLinkedFailures.link(this);
|
---|
125 | loadPtr(vm().addressOfCallFrameForCatch(), GPRInfo::notCellMaskRegister);
|
---|
126 | MacroAssembler::Jump didNotHaveException = branchTestPtr(MacroAssembler::Zero, GPRInfo::notCellMaskRegister);
|
---|
127 | move(GPRInfo::notCellMaskRegister, GPRInfo::constantsRegister);
|
---|
128 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, GPRInfo::constantsRegister, GPRInfo::constantsRegister);
|
---|
129 | loadPtr(Address(GPRInfo::constantsRegister, CodeBlock::offsetOfJITData()), GPRInfo::constantsRegister);
|
---|
130 | didNotHaveException.link(this);
|
---|
131 | }
|
---|
132 | dispatchCases.link(this);
|
---|
133 | store32(GPRInfo::numberTagRegister, &vm().osrExitIndex);
|
---|
134 | loadPtr(Address(GPRInfo::constantsRegister, JITData::offsetOfExits()), GPRInfo::constantsRegister);
|
---|
135 | static_assert(sizeof(JITData::ExitVector::value_type) == 16);
|
---|
136 | ASSERT(!JITData::ExitVector::value_type::offsetOfCodePtr());
|
---|
137 | lshiftPtr(TrustedImm32(4), GPRInfo::numberTagRegister);
|
---|
138 | addPtr(GPRInfo::numberTagRegister, GPRInfo::constantsRegister);
|
---|
139 | emitMaterializeTagCheckRegisters();
|
---|
140 | farJump(Address(GPRInfo::constantsRegister, JITData::ExitVector::Storage::offsetOfData()), OSRExitPtrTag);
|
---|
141 | }
|
---|
142 | #endif
|
---|
143 | }
|
---|
144 |
|
---|
145 | void JITCompiler::compileEntry()
|
---|
146 | {
|
---|
147 | // This code currently matches the old JIT. In the function header we need to
|
---|
148 | // save return address and call frame via the prologue and perform a fast stack check.
|
---|
149 | // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
|
---|
150 | // We'll need to convert the remaining cti_ style calls (specifically the stack
|
---|
151 | // check) which will be dependent on stack layout. (We'd need to account for this in
|
---|
152 | // both normal return code and when jumping to an exception handler).
|
---|
153 | emitFunctionPrologue();
|
---|
154 | jitAssertCodeBlockOnCallFrameWithType(GPRInfo::regT2, JITType::DFGJIT);
|
---|
155 | }
|
---|
156 |
|
---|
157 | void JITCompiler::compileSetupRegistersForEntry()
|
---|
158 | {
|
---|
159 | emitSaveCalleeSaves();
|
---|
160 | emitMaterializeTagCheckRegisters();
|
---|
161 | #if USE(JSVALUE64)
|
---|
162 | if (m_graph.m_plan.isUnlinked()) {
|
---|
163 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, GPRInfo::constantsRegister);
|
---|
164 | loadPtr(Address(GPRInfo::constantsRegister, CodeBlock::offsetOfJITData()), GPRInfo::constantsRegister);
|
---|
165 | }
|
---|
166 | #endif
|
---|
167 | }
|
---|
168 |
|
---|
169 | void JITCompiler::compileEntryExecutionFlag()
|
---|
170 | {
|
---|
171 | #if ENABLE(FTL_JIT)
|
---|
172 | if (m_graph.m_plan.canTierUpAndOSREnter())
|
---|
173 | store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
|
---|
174 | #endif // ENABLE(FTL_JIT)
|
---|
175 | }
|
---|
176 |
|
---|
177 | void JITCompiler::compileBody()
|
---|
178 | {
|
---|
179 | // We generate the speculative code path, followed by OSR exit code to return
|
---|
180 | // to the old JIT code if speculations fail.
|
---|
181 |
|
---|
182 | bool compiledSpeculative = m_speculative->compile();
|
---|
183 | ASSERT_UNUSED(compiledSpeculative, compiledSpeculative);
|
---|
184 | }
|
---|
185 |
|
---|
186 | void JITCompiler::link(LinkBuffer& linkBuffer)
|
---|
187 | {
|
---|
188 | // Link the code, populate data in CodeBlock data structures.
|
---|
189 | m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
|
---|
190 | m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
|
---|
191 |
|
---|
192 | if (!m_graph.m_plan.inlineCallFrames()->isEmpty())
|
---|
193 | m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames();
|
---|
194 |
|
---|
195 | #if USE(JSVALUE32_64)
|
---|
196 | m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
|
---|
197 | #endif
|
---|
198 |
|
---|
199 | m_graph.registerFrozenValues();
|
---|
200 |
|
---|
201 | ASSERT(m_jitCode->m_stringSwitchJumpTables.isEmpty());
|
---|
202 | ASSERT(m_jitCode->m_switchJumpTables.isEmpty());
|
---|
203 | if (!m_graph.m_stringSwitchJumpTables.isEmpty())
|
---|
204 | m_jitCode->m_stringSwitchJumpTables = WTFMove(m_graph.m_stringSwitchJumpTables);
|
---|
205 | if (!m_graph.m_switchJumpTables.isEmpty())
|
---|
206 | m_jitCode->m_switchJumpTables = WTFMove(m_graph.m_switchJumpTables);
|
---|
207 |
|
---|
208 | for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
|
---|
209 | SwitchData& data = **iter;
|
---|
210 | switch (data.kind) {
|
---|
211 | case SwitchChar:
|
---|
212 | case SwitchImm: {
|
---|
213 | if (!data.didUseJumpTable) {
|
---|
214 | ASSERT(m_jitCode->m_switchJumpTables[data.switchTableIndex].isEmpty());
|
---|
215 | continue;
|
---|
216 | }
|
---|
217 |
|
---|
218 | const UnlinkedSimpleJumpTable& unlinkedTable = m_graph.unlinkedSwitchJumpTable(data.switchTableIndex);
|
---|
219 | SimpleJumpTable& linkedTable = m_jitCode->m_switchJumpTables[data.switchTableIndex];
|
---|
220 | linkedTable.m_ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
|
---|
221 | RELEASE_ASSERT(linkedTable.m_ctiOffsets.size() == unlinkedTable.m_branchOffsets.size());
|
---|
222 | for (unsigned j = linkedTable.m_ctiOffsets.size(); j--;)
|
---|
223 | linkedTable.m_ctiOffsets[j] = linkedTable.m_ctiDefault;
|
---|
224 | for (unsigned j = data.cases.size(); j--;) {
|
---|
225 | SwitchCase& myCase = data.cases[j];
|
---|
226 | linkedTable.m_ctiOffsets[myCase.value.switchLookupValue(data.kind) - unlinkedTable.m_min] =
|
---|
227 | linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]);
|
---|
228 | }
|
---|
229 | break;
|
---|
230 | }
|
---|
231 |
|
---|
232 | case SwitchString: {
|
---|
233 | if (!data.didUseJumpTable) {
|
---|
234 | ASSERT(m_jitCode->m_stringSwitchJumpTables[data.switchTableIndex].isEmpty());
|
---|
235 | continue;
|
---|
236 | }
|
---|
237 |
|
---|
238 | const UnlinkedStringJumpTable& unlinkedTable = m_graph.unlinkedStringSwitchJumpTable(data.switchTableIndex);
|
---|
239 | StringJumpTable& linkedTable = m_jitCode->m_stringSwitchJumpTables[data.switchTableIndex];
|
---|
240 | auto ctiDefault = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[data.fallThrough.block->index]);
|
---|
241 | RELEASE_ASSERT(linkedTable.m_ctiOffsets.size() == unlinkedTable.m_offsetTable.size() + 1);
|
---|
242 | for (auto& entry : linkedTable.m_ctiOffsets)
|
---|
243 | entry = ctiDefault;
|
---|
244 | for (unsigned j = data.cases.size(); j--;) {
|
---|
245 | SwitchCase& myCase = data.cases[j];
|
---|
246 | auto iter = unlinkedTable.m_offsetTable.find(myCase.value.stringImpl());
|
---|
247 | RELEASE_ASSERT(iter != unlinkedTable.m_offsetTable.end());
|
---|
248 | linkedTable.m_ctiOffsets[iter->value.m_indexInTable] = linkBuffer.locationOf<JSSwitchPtrTag>(m_blockHeads[myCase.target.block->index]);
|
---|
249 | }
|
---|
250 | break;
|
---|
251 | }
|
---|
252 |
|
---|
253 | case SwitchCell:
|
---|
254 | RELEASE_ASSERT_NOT_REACHED();
|
---|
255 | break;
|
---|
256 | }
|
---|
257 | }
|
---|
258 |
|
---|
259 | // Link all calls out from the JIT code to their respective functions.
|
---|
260 | for (unsigned i = 0; i < m_calls.size(); ++i)
|
---|
261 | linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
|
---|
262 |
|
---|
263 | finalizeInlineCaches(m_getByIds, linkBuffer);
|
---|
264 | finalizeInlineCaches(m_getByIdsWithThis, linkBuffer);
|
---|
265 | finalizeInlineCaches(m_getByVals, linkBuffer);
|
---|
266 | finalizeInlineCaches(m_putByIds, linkBuffer);
|
---|
267 | finalizeInlineCaches(m_putByVals, linkBuffer);
|
---|
268 | finalizeInlineCaches(m_delByIds, linkBuffer);
|
---|
269 | finalizeInlineCaches(m_delByVals, linkBuffer);
|
---|
270 | finalizeInlineCaches(m_inByIds, linkBuffer);
|
---|
271 | finalizeInlineCaches(m_inByVals, linkBuffer);
|
---|
272 | finalizeInlineCaches(m_instanceOfs, linkBuffer);
|
---|
273 | finalizeInlineCaches(m_privateBrandAccesses, linkBuffer);
|
---|
274 |
|
---|
275 | if (m_graph.m_plan.isUnlinked()) {
|
---|
276 | m_jitCode->m_unlinkedStubInfos = FixedVector<UnlinkedStructureStubInfo>(m_unlinkedStubInfos.size());
|
---|
277 | if (m_jitCode->m_unlinkedStubInfos.size())
|
---|
278 | std::move(m_unlinkedStubInfos.begin(), m_unlinkedStubInfos.end(), m_jitCode->m_unlinkedStubInfos.begin());
|
---|
279 | ASSERT(m_jitCode->common.m_stubInfos.isEmpty());
|
---|
280 | }
|
---|
281 |
|
---|
282 | for (auto& record : m_jsCalls) {
|
---|
283 | auto& info = *record.info;
|
---|
284 | info.setCodeLocations(
|
---|
285 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPathStart),
|
---|
286 | linkBuffer.locationOf<JSInternalPtrTag>(record.doneLocation));
|
---|
287 | }
|
---|
288 |
|
---|
289 | for (auto& record : m_jsDirectCalls) {
|
---|
290 | auto& info = *record.info;
|
---|
291 | info.setCodeLocations(
|
---|
292 | linkBuffer.locationOf<JSInternalPtrTag>(record.slowPath),
|
---|
293 | CodeLocationLabel<JSInternalPtrTag>());
|
---|
294 | }
|
---|
295 |
|
---|
296 | if (!m_exceptionChecks.empty())
|
---|
297 | linkBuffer.link(m_exceptionChecks, CodeLocationLabel(vm().getCTIStub(handleExceptionGenerator).retaggedCode<NoPtrTag>()));
|
---|
298 | if (!m_exceptionChecksWithCallFrameRollback.empty())
|
---|
299 | linkBuffer.link(m_exceptionChecksWithCallFrameRollback, CodeLocationLabel(vm().getCTIStub(handleExceptionWithCallFrameRollbackGenerator).retaggedCode<NoPtrTag>()));
|
---|
300 |
|
---|
301 | if (!m_graph.m_plan.isUnlinked()) {
|
---|
302 | MacroAssemblerCodeRef<JITThunkPtrTag> osrExitThunk = vm().getCTIStub(osrExitGenerationThunkGenerator);
|
---|
303 | auto target = CodeLocationLabel<JITThunkPtrTag>(osrExitThunk.code());
|
---|
304 | Vector<JumpReplacement> jumpReplacements;
|
---|
305 | for (unsigned i = 0; i < m_osrExit.size(); ++i) {
|
---|
306 | OSRExitCompilationInfo& info = m_exitCompilationInfo[i];
|
---|
307 | linkBuffer.link(info.m_patchableJump.m_jump, target);
|
---|
308 | OSRExit& exit = m_osrExit[i];
|
---|
309 | exit.m_patchableJumpLocation = linkBuffer.locationOf<JSInternalPtrTag>(info.m_patchableJump);
|
---|
310 | if (info.m_replacementSource.isSet()) {
|
---|
311 | jumpReplacements.append(JumpReplacement(
|
---|
312 | linkBuffer.locationOf<JSInternalPtrTag>(info.m_replacementSource),
|
---|
313 | linkBuffer.locationOf<OSRExitPtrTag>(info.m_replacementDestination)));
|
---|
314 | }
|
---|
315 | }
|
---|
316 | m_jitCode->common.m_jumpReplacements = WTFMove(jumpReplacements);
|
---|
317 | }
|
---|
318 |
|
---|
319 | #if ASSERT_ENABLED
|
---|
320 | for (auto& info : m_exitCompilationInfo) {
|
---|
321 | if (info.m_replacementSource.isSet())
|
---|
322 | ASSERT(!m_graph.m_plan.isUnlinked());
|
---|
323 | }
|
---|
324 | if (m_graph.m_plan.isUnlinked())
|
---|
325 | ASSERT(m_jitCode->common.m_jumpReplacements.isEmpty());
|
---|
326 | #endif
|
---|
327 |
|
---|
328 | if (UNLIKELY(m_graph.compilation())) {
|
---|
329 | ASSERT(m_exitSiteLabels.size() == m_osrExit.size());
|
---|
330 | for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) {
|
---|
331 | Vector<Label>& labels = m_exitSiteLabels[i];
|
---|
332 | Vector<MacroAssemblerCodePtr<JSInternalPtrTag>> addresses;
|
---|
333 | for (unsigned j = 0; j < labels.size(); ++j)
|
---|
334 | addresses.append(linkBuffer.locationOf<JSInternalPtrTag>(labels[j]));
|
---|
335 | m_graph.compilation()->addOSRExitSite(addresses);
|
---|
336 | }
|
---|
337 | } else
|
---|
338 | ASSERT(!m_exitSiteLabels.size());
|
---|
339 |
|
---|
340 | m_jitCode->common.compilation = m_graph.compilation();
|
---|
341 | m_jitCode->m_osrExit = WTFMove(m_osrExit);
|
---|
342 | m_jitCode->m_speculationRecovery = WTFMove(m_speculationRecovery);
|
---|
343 |
|
---|
344 | // Link new DFG exception handlers and remove baseline JIT handlers.
|
---|
345 | m_codeBlock->clearExceptionHandlers();
|
---|
346 | for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
|
---|
347 | OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
|
---|
348 | if (info.m_replacementDestination.isSet()) {
|
---|
349 | // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
|
---|
350 | // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
|
---|
351 | // If this *is set*, it means we will be landing at this code location from genericUnwind from an
|
---|
352 | // exception thrown in a child call frame.
|
---|
353 | CodeLocationLabel<ExceptionHandlerPtrTag> catchLabel = linkBuffer.locationOf<ExceptionHandlerPtrTag>(info.m_replacementDestination);
|
---|
354 | HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
|
---|
355 | CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
|
---|
356 | newExceptionHandler.start = callSite.bits();
|
---|
357 | newExceptionHandler.end = callSite.bits() + 1;
|
---|
358 | newExceptionHandler.nativeCode = catchLabel;
|
---|
359 | m_codeBlock->appendExceptionHandler(newExceptionHandler);
|
---|
360 | }
|
---|
361 | }
|
---|
362 |
|
---|
363 | if (m_pcToCodeOriginMapBuilder.didBuildMapping())
|
---|
364 | m_jitCode->common.m_pcToCodeOriginMap = makeUnique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer);
|
---|
365 |
|
---|
366 | m_jitCode->m_linkerIR = LinkerIR(WTFMove(m_constantPool));
|
---|
367 | }
|
---|
368 |
|
---|
369 | static void emitStackOverflowCheck(JITCompiler& jit, MacroAssembler::JumpList& stackOverflow)
|
---|
370 | {
|
---|
371 | int frameTopOffset = virtualRegisterForLocal(jit.graph().requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register);
|
---|
372 | unsigned maxFrameSize = -frameTopOffset;
|
---|
373 |
|
---|
374 | jit.addPtr(MacroAssembler::TrustedImm32(frameTopOffset), GPRInfo::callFrameRegister, GPRInfo::regT1);
|
---|
375 | if (UNLIKELY(maxFrameSize > Options::reservedZoneSize()))
|
---|
376 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, GPRInfo::regT1, GPRInfo::callFrameRegister));
|
---|
377 | stackOverflow.append(jit.branchPtr(MacroAssembler::Above, MacroAssembler::AbsoluteAddress(jit.vm().addressOfSoftStackLimit()), GPRInfo::regT1));
|
---|
378 | }
|
---|
379 |
|
---|
380 | void JITCompiler::compile()
|
---|
381 | {
|
---|
382 | makeCatchOSREntryBuffer();
|
---|
383 |
|
---|
384 | setStartOfCode();
|
---|
385 | compileEntry();
|
---|
386 | m_speculative = makeUnique<SpeculativeJIT>(*this);
|
---|
387 |
|
---|
388 | // Plant a check that sufficient space is available in the JSStack.
|
---|
389 | JumpList stackOverflow;
|
---|
390 | emitStackOverflowCheck(*this, stackOverflow);
|
---|
391 |
|
---|
392 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
|
---|
393 | checkStackPointerAlignment();
|
---|
394 | compileSetupRegistersForEntry();
|
---|
395 | compileEntryExecutionFlag();
|
---|
396 | compileBody();
|
---|
397 | setEndOfMainPath();
|
---|
398 |
|
---|
399 | // === Footer code generation ===
|
---|
400 | //
|
---|
401 | // Generate the stack overflow handling; if the stack check in the entry head fails,
|
---|
402 | // we need to call out to a helper function to throw the StackOverflowError.
|
---|
403 | stackOverflow.link(this);
|
---|
404 |
|
---|
405 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
|
---|
406 |
|
---|
407 | if (maxFrameExtentForSlowPathCall)
|
---|
408 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
|
---|
409 |
|
---|
410 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, GPRInfo::argumentGPR0);
|
---|
411 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, GPRInfo::argumentGPR0);
|
---|
412 |
|
---|
413 | // Generate slow path code.
|
---|
414 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
|
---|
415 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
|
---|
416 |
|
---|
417 | linkOSRExits();
|
---|
418 |
|
---|
419 | // Create OSR entry trampolines if necessary.
|
---|
420 | m_speculative->createOSREntries();
|
---|
421 | setEndOfCode();
|
---|
422 |
|
---|
423 | auto linkBuffer = makeUnique<LinkBuffer>(*this, m_codeBlock, LinkBuffer::Profile::DFG, JITCompilationCanFail);
|
---|
424 | if (linkBuffer->didFailToAllocate()) {
|
---|
425 | m_graph.m_plan.setFinalizer(makeUnique<FailedFinalizer>(m_graph.m_plan));
|
---|
426 | return;
|
---|
427 | }
|
---|
428 |
|
---|
429 | link(*linkBuffer);
|
---|
430 | m_speculative->linkOSREntries(*linkBuffer);
|
---|
431 |
|
---|
432 | disassemble(*linkBuffer);
|
---|
433 |
|
---|
434 | auto codeRef = FINALIZE_DFG_CODE(*linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT)).data());
|
---|
435 | m_jitCode->initializeCodeRefForDFG(codeRef, codeRef.code());
|
---|
436 | m_jitCode->variableEventStream = m_speculative->finalizeEventStream();
|
---|
437 |
|
---|
438 | auto finalizer = makeUnique<JITFinalizer>(m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
|
---|
439 | m_graph.m_plan.setFinalizer(WTFMove(finalizer));
|
---|
440 | }
|
---|
441 |
|
---|
442 | void JITCompiler::compileFunction()
|
---|
443 | {
|
---|
444 | makeCatchOSREntryBuffer();
|
---|
445 |
|
---|
446 | setStartOfCode();
|
---|
447 | Label entryLabel(this);
|
---|
448 | compileEntry();
|
---|
449 |
|
---|
450 | // === Function header code generation ===
|
---|
451 | // This is the main entry point, without performing an arity check.
|
---|
452 | // If we needed to perform an arity check we will already have moved the return address,
|
---|
453 | // so enter after this.
|
---|
454 | Label fromArityCheck(this);
|
---|
455 | // Plant a check that sufficient space is available in the JSStack.
|
---|
456 | JumpList stackOverflow;
|
---|
457 | emitStackOverflowCheck(*this, stackOverflow);
|
---|
458 |
|
---|
459 | // Move the stack pointer down to accommodate locals
|
---|
460 | addPtr(TrustedImm32(-(m_graph.frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, stackPointerRegister);
|
---|
461 | checkStackPointerAlignment();
|
---|
462 |
|
---|
463 | compileSetupRegistersForEntry();
|
---|
464 | compileEntryExecutionFlag();
|
---|
465 |
|
---|
466 | // === Function body code generation ===
|
---|
467 | m_speculative = makeUnique<SpeculativeJIT>(*this);
|
---|
468 | compileBody();
|
---|
469 | setEndOfMainPath();
|
---|
470 |
|
---|
471 | // === Function footer code generation ===
|
---|
472 | //
|
---|
473 | // Generate code to perform the stack overflow handling (if the stack check in
|
---|
474 | // the function header fails), and generate the entry point with arity check.
|
---|
475 | //
|
---|
476 | // Generate the stack overflow handling; if the stack check in the function head fails,
|
---|
477 | // we need to call out to a helper function to throw the StackOverflowError.
|
---|
478 | stackOverflow.link(this);
|
---|
479 |
|
---|
480 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
|
---|
481 |
|
---|
482 | if (maxFrameExtentForSlowPathCall)
|
---|
483 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
|
---|
484 |
|
---|
485 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, GPRInfo::argumentGPR0);
|
---|
486 | m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, GPRInfo::argumentGPR0);
|
---|
487 |
|
---|
488 | // The fast entry point into a function does not check the correct number of arguments
|
---|
489 | // have been passed to the call (we only use the fast entry point where we can statically
|
---|
490 | // determine the correct number of arguments have been passed, or have already checked).
|
---|
491 | // In cases where an arity check is necessary, we enter here.
|
---|
492 | // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
|
---|
493 | Call callArityFixup;
|
---|
494 | Label arityCheck;
|
---|
495 | bool requiresArityFixup = m_codeBlock->numParameters() != 1;
|
---|
496 | if (requiresArityFixup) {
|
---|
497 | arityCheck = label();
|
---|
498 | compileEntry();
|
---|
499 |
|
---|
500 | load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCountIncludingThis), GPRInfo::regT1);
|
---|
501 | branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
|
---|
502 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
|
---|
503 | if (maxFrameExtentForSlowPathCall)
|
---|
504 | addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
|
---|
505 | emitGetFromCallFrameHeaderPtr(CallFrameSlot::codeBlock, GPRInfo::argumentGPR0);
|
---|
506 | loadPtr(Address(GPRInfo::argumentGPR0, CodeBlock::offsetOfGlobalObject()), GPRInfo::argumentGPR0);
|
---|
507 | m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->isConstructor() ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
|
---|
508 | if (maxFrameExtentForSlowPathCall)
|
---|
509 | addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
|
---|
510 | branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
|
---|
511 | emitStoreCodeOrigin(CodeOrigin(BytecodeIndex(0)));
|
---|
512 | move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
|
---|
513 | callArityFixup = nearCall();
|
---|
514 | jump(fromArityCheck);
|
---|
515 | } else
|
---|
516 | arityCheck = entryLabel;
|
---|
517 |
|
---|
518 | // Generate slow path code.
|
---|
519 | m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
|
---|
520 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
|
---|
521 |
|
---|
522 | linkOSRExits();
|
---|
523 |
|
---|
524 | // Create OSR entry trampolines if necessary.
|
---|
525 | m_speculative->createOSREntries();
|
---|
526 | setEndOfCode();
|
---|
527 |
|
---|
528 | // === Link ===
|
---|
529 | auto linkBuffer = makeUnique<LinkBuffer>(*this, m_codeBlock, LinkBuffer::Profile::DFG, JITCompilationCanFail);
|
---|
530 | if (linkBuffer->didFailToAllocate()) {
|
---|
531 | m_graph.m_plan.setFinalizer(makeUnique<FailedFinalizer>(m_graph.m_plan));
|
---|
532 | return;
|
---|
533 | }
|
---|
534 | link(*linkBuffer);
|
---|
535 | m_speculative->linkOSREntries(*linkBuffer);
|
---|
536 |
|
---|
537 | if (requiresArityFixup)
|
---|
538 | linkBuffer->link(callArityFixup, FunctionPtr<JITThunkPtrTag>(vm().getCTIStub(arityFixupGenerator).code()));
|
---|
539 |
|
---|
540 | disassemble(*linkBuffer);
|
---|
541 |
|
---|
542 | MacroAssemblerCodePtr<JSEntryPtrTag> withArityCheck = linkBuffer->locationOf<JSEntryPtrTag>(arityCheck);
|
---|
543 |
|
---|
544 | m_jitCode->initializeCodeRefForDFG(
|
---|
545 | FINALIZE_DFG_CODE(*linkBuffer, JSEntryPtrTag, "DFG JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITType::DFGJIT)).data()),
|
---|
546 | withArityCheck);
|
---|
547 | m_jitCode->variableEventStream = m_speculative->finalizeEventStream();
|
---|
548 |
|
---|
549 | auto finalizer = makeUnique<JITFinalizer>(m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
|
---|
550 | m_graph.m_plan.setFinalizer(WTFMove(finalizer));
|
---|
551 | }
|
---|
552 |
|
---|
553 | void JITCompiler::disassemble(LinkBuffer& linkBuffer)
|
---|
554 | {
|
---|
555 | if (shouldDumpDisassembly()) {
|
---|
556 | m_disassembler->dump(linkBuffer);
|
---|
557 | linkBuffer.didAlreadyDisassemble();
|
---|
558 | }
|
---|
559 |
|
---|
560 | if (UNLIKELY(m_graph.m_plan.compilation()))
|
---|
561 | m_disassembler->reportToProfiler(m_graph.m_plan.compilation(), linkBuffer);
|
---|
562 | }
|
---|
563 |
|
---|
564 | #if USE(JSVALUE32_64)
|
---|
565 | void* JITCompiler::addressOfDoubleConstant(Node* node)
|
---|
566 | {
|
---|
567 | double value = node->asNumber();
|
---|
568 | int64_t valueBits = bitwise_cast<int64_t>(value);
|
---|
569 | return m_graph.m_doubleConstantsMap.ensure(valueBits, [&]{
|
---|
570 | double* addressInConstantPool = m_graph.m_doubleConstants.add();
|
---|
571 | *addressInConstantPool = value;
|
---|
572 | return addressInConstantPool;
|
---|
573 | }).iterator->value;
|
---|
574 | }
|
---|
575 | #endif
|
---|
576 |
|
---|
577 | void JITCompiler::noticeCatchEntrypoint(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer, Vector<FlushFormat>&& argumentFormats)
|
---|
578 | {
|
---|
579 | RELEASE_ASSERT(basicBlock.isCatchEntrypoint);
|
---|
580 | RELEASE_ASSERT(basicBlock.intersectionOfCFAHasVisited); // An entrypoint is reachable by definition.
|
---|
581 | m_graph.appendCatchEntrypoint(basicBlock.bytecodeBegin, linkBuffer.locationOf<ExceptionHandlerPtrTag>(blockHead), WTFMove(argumentFormats));
|
---|
582 | }
|
---|
583 |
|
---|
584 | void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
|
---|
585 | {
|
---|
586 | RELEASE_ASSERT(!basicBlock.isCatchEntrypoint);
|
---|
587 |
|
---|
588 | // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
|
---|
589 | if (!basicBlock.intersectionOfCFAHasVisited)
|
---|
590 | return;
|
---|
591 |
|
---|
592 | OSREntryData entry;
|
---|
593 | entry.m_bytecodeIndex = basicBlock.bytecodeBegin;
|
---|
594 | entry.m_machineCode = linkBuffer.locationOf<OSREntryPtrTag>(blockHead);
|
---|
595 |
|
---|
596 | FixedOperands<AbstractValue> expectedValues(basicBlock.intersectionOfPastValuesAtHead);
|
---|
597 | Vector<OSREntryReshuffling> reshufflings;
|
---|
598 |
|
---|
599 | // Fix the expected values: in our protocol, a dead variable will have an expected
|
---|
600 | // value of (None, []). But the old JIT may stash some values there. So we really
|
---|
601 | // need (Top, TOP).
|
---|
602 | for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
|
---|
603 | Node* node = basicBlock.variablesAtHead.argument(argument);
|
---|
604 | if (!node || !node->shouldGenerate())
|
---|
605 | expectedValues.argument(argument).makeBytecodeTop();
|
---|
606 | }
|
---|
607 | for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
|
---|
608 | Node* node = basicBlock.variablesAtHead.local(local);
|
---|
609 | if (!node || !node->shouldGenerate())
|
---|
610 | expectedValues.local(local).makeBytecodeTop();
|
---|
611 | else {
|
---|
612 | VariableAccessData* variable = node->variableAccessData();
|
---|
613 | entry.m_machineStackUsed.set(variable->machineLocal().toLocal());
|
---|
614 |
|
---|
615 | switch (variable->flushFormat()) {
|
---|
616 | case FlushedDouble:
|
---|
617 | entry.m_localsForcedDouble.set(local);
|
---|
618 | break;
|
---|
619 | case FlushedInt52:
|
---|
620 | entry.m_localsForcedAnyInt.set(local);
|
---|
621 | break;
|
---|
622 | default:
|
---|
623 | break;
|
---|
624 | }
|
---|
625 |
|
---|
626 | ASSERT(!variable->operand().isTmp());
|
---|
627 | if (variable->operand().virtualRegister() != variable->machineLocal()) {
|
---|
628 | reshufflings.append(
|
---|
629 | OSREntryReshuffling(
|
---|
630 | variable->operand().virtualRegister().offset(), variable->machineLocal().offset()));
|
---|
631 | }
|
---|
632 | }
|
---|
633 | }
|
---|
634 |
|
---|
635 | entry.m_expectedValues = WTFMove(expectedValues);
|
---|
636 | entry.m_reshufflings = WTFMove(reshufflings);
|
---|
637 | m_osrEntry.append(WTFMove(entry));
|
---|
638 | }
|
---|
639 |
|
---|
640 | void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
|
---|
641 | {
|
---|
642 | OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
|
---|
643 | exit.m_codeOrigin = opCatchOrigin;
|
---|
644 | exit.m_exceptionHandlerCallSiteIndex = callSite;
|
---|
645 | OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
|
---|
646 | m_osrExit.append(WTFMove(exit));
|
---|
647 | m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
|
---|
648 | }
|
---|
649 |
|
---|
650 | void JITCompiler::exceptionCheck()
|
---|
651 | {
|
---|
652 | // It's important that we use origin.forExit here. Consider if we hoist string
|
---|
653 | // addition outside a loop, and that we exit at the point of that concatenation
|
---|
654 | // from an out of memory exception.
|
---|
655 | // If the original loop had a try/catch around string concatenation, if we "catch"
|
---|
656 | // that exception inside the loop, then the loops induction variable will be undefined
|
---|
657 | // in the OSR exit value recovery. It's more defensible for the string concatenation,
|
---|
658 | // then, to not be caught by the for loops' try/catch.
|
---|
659 | // Here is the program I'm speaking about:
|
---|
660 | //
|
---|
661 | // >>>> lets presume "c = a + b" gets hoisted here.
|
---|
662 | // for (var i = 0; i < length; i++) {
|
---|
663 | // try {
|
---|
664 | // c = a + b
|
---|
665 | // } catch(e) {
|
---|
666 | // If we threw an out of memory error, and we cought the exception
|
---|
667 | // right here, then "i" would almost certainly be undefined, which
|
---|
668 | // would make no sense.
|
---|
669 | // ...
|
---|
670 | // }
|
---|
671 | // }
|
---|
672 | CodeOrigin opCatchOrigin;
|
---|
673 | HandlerInfo* exceptionHandler;
|
---|
674 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler);
|
---|
675 | if (willCatchException) {
|
---|
676 | unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream.size();
|
---|
677 | MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck(vm());
|
---|
678 | // We assume here that this is called after callOpeartion()/appendCall() is called.
|
---|
679 | appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.codeOrigins->lastCallSite(), hadException);
|
---|
680 | } else
|
---|
681 | m_exceptionChecks.append(emitExceptionCheck(vm()));
|
---|
682 | }
|
---|
683 |
|
---|
684 | CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
|
---|
685 | {
|
---|
686 | CodeOrigin opCatchOrigin;
|
---|
687 | HandlerInfo* exceptionHandler;
|
---|
688 | bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
|
---|
689 | CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
|
---|
690 | if (willCatchException)
|
---|
691 | appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
|
---|
692 | return callSite;
|
---|
693 | }
|
---|
694 |
|
---|
695 | void JITCompiler::setEndOfMainPath()
|
---|
696 | {
|
---|
697 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
|
---|
698 | if (LIKELY(!m_disassembler))
|
---|
699 | return;
|
---|
700 | m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
|
---|
701 | }
|
---|
702 |
|
---|
703 | void JITCompiler::setEndOfCode()
|
---|
704 | {
|
---|
705 | m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
|
---|
706 | if (LIKELY(!m_disassembler))
|
---|
707 | return;
|
---|
708 | m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
|
---|
709 | }
|
---|
710 |
|
---|
711 | void JITCompiler::makeCatchOSREntryBuffer()
|
---|
712 | {
|
---|
713 | if (m_graph.m_maxLocalsForCatchOSREntry) {
|
---|
714 | uint32_t numberOfLiveLocals = std::max(*m_graph.m_maxLocalsForCatchOSREntry, 1u); // Make sure we always allocate a non-null catchOSREntryBuffer.
|
---|
715 | m_jitCode->common.catchOSREntryBuffer = vm().scratchBufferForSize(sizeof(JSValue) * numberOfLiveLocals);
|
---|
716 | }
|
---|
717 | }
|
---|
718 |
|
---|
719 | void JITCompiler::loadConstant(LinkerIR::Constant index, GPRReg dest)
|
---|
720 | {
|
---|
721 | #if USE(JSVALUE64)
|
---|
722 | loadPtr(Address(GPRInfo::constantsRegister, JITData::offsetOfData() + sizeof(void*) * index), dest);
|
---|
723 | #else
|
---|
724 | UNUSED_PARAM(index);
|
---|
725 | UNUSED_PARAM(dest);
|
---|
726 | RELEASE_ASSERT_NOT_REACHED();
|
---|
727 | #endif
|
---|
728 | }
|
---|
729 |
|
---|
730 | void JITCompiler::loadLinkableConstant(LinkableConstant constant, GPRReg dest)
|
---|
731 | {
|
---|
732 | constant.materialize(*this, dest);
|
---|
733 | }
|
---|
734 |
|
---|
735 | void JITCompiler::storeLinkableConstant(LinkableConstant constant, Address dest)
|
---|
736 | {
|
---|
737 | constant.store(*this, dest);
|
---|
738 | }
|
---|
739 |
|
---|
740 | JITCompiler::LinkableConstant::LinkableConstant(JITCompiler& jit, JSCell* cell)
|
---|
741 | {
|
---|
742 | jit.m_graph.m_plan.weakReferences().addLazily(cell);
|
---|
743 | if (jit.m_graph.m_plan.isUnlinked()) {
|
---|
744 | m_index = jit.addToConstantPool(LinkerIR::Type::CellPointer, cell);
|
---|
745 | return;
|
---|
746 | }
|
---|
747 | m_pointer = cell;
|
---|
748 | }
|
---|
749 |
|
---|
750 | JITCompiler::LinkableConstant::LinkableConstant(JITCompiler& jit, void* pointer, NonCellTag)
|
---|
751 | {
|
---|
752 | if (jit.m_graph.m_plan.isUnlinked()) {
|
---|
753 | m_index = jit.addToConstantPool(LinkerIR::Type::NonCellPointer, pointer);
|
---|
754 | return;
|
---|
755 | }
|
---|
756 | m_pointer = pointer;
|
---|
757 | }
|
---|
758 |
|
---|
759 | void JITCompiler::LinkableConstant::materialize(CCallHelpers& jit, GPRReg resultGPR)
|
---|
760 | {
|
---|
761 | #if USE(JSVALUE64)
|
---|
762 | if (isUnlinked()) {
|
---|
763 | jit.loadPtr(unlinkedAddress(), resultGPR);
|
---|
764 | return;
|
---|
765 | }
|
---|
766 | #endif
|
---|
767 | jit.move(TrustedImmPtr(m_pointer), resultGPR);
|
---|
768 | }
|
---|
769 |
|
---|
770 | void JITCompiler::LinkableConstant::store(CCallHelpers& jit, CCallHelpers::Address address)
|
---|
771 | {
|
---|
772 | #if USE(JSVALUE64)
|
---|
773 | if (isUnlinked()) {
|
---|
774 | jit.transferPtr(unlinkedAddress(), address);
|
---|
775 | return;
|
---|
776 | }
|
---|
777 | #endif
|
---|
778 | jit.storePtr(TrustedImmPtr(m_pointer), address);
|
---|
779 | }
|
---|
780 |
|
---|
781 | LinkerIR::Constant JITCompiler::addToConstantPool(LinkerIR::Type type, void* payload)
|
---|
782 | {
|
---|
783 | LinkerIR::Value value { payload, type };
|
---|
784 | auto result = m_constantPoolMap.add(value, m_constantPoolMap.size());
|
---|
785 | if (result.isNewEntry)
|
---|
786 | m_constantPool.append(value);
|
---|
787 | return result.iterator->value;
|
---|
788 | }
|
---|
789 |
|
---|
790 | std::tuple<CompileTimeStructureStubInfo, JITCompiler::LinkableConstant> JITCompiler::addStructureStubInfo()
|
---|
791 | {
|
---|
792 | if (m_graph.m_plan.isUnlinked()) {
|
---|
793 | void* unlinkedStubInfoIndex = bitwise_cast<void*>(static_cast<uintptr_t>(m_unlinkedStubInfos.size()));
|
---|
794 | UnlinkedStructureStubInfo* stubInfo = &m_unlinkedStubInfos.alloc();
|
---|
795 | LinkerIR::Constant stubInfoIndex = addToConstantPool(LinkerIR::Type::StructureStubInfo, unlinkedStubInfoIndex);
|
---|
796 | return std::tuple { stubInfo, LinkableConstant(stubInfoIndex) };
|
---|
797 | }
|
---|
798 | StructureStubInfo* stubInfo = jitCode()->common.m_stubInfos.add();
|
---|
799 | return std::tuple { stubInfo, LinkableConstant() };
|
---|
800 | }
|
---|
801 |
|
---|
802 | } } // namespace JSC::DFG
|
---|
803 |
|
---|
804 | #endif // ENABLE(DFG_JIT)
|
---|