1 | /*
|
---|
2 | * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "DFGJITCode.h"
|
---|
28 |
|
---|
29 | #if ENABLE(DFG_JIT)
|
---|
30 |
|
---|
31 | #include "CodeBlock.h"
|
---|
32 | #include "DFGThunks.h"
|
---|
33 | #include "FTLForOSREntryJITCode.h"
|
---|
34 | #include "JumpTable.h"
|
---|
35 |
|
---|
36 | namespace JSC { namespace DFG {
|
---|
37 |
|
---|
38 | JITData::JITData(const JITCode& jitCode, ExitVector&& exits)
|
---|
39 | : Base(jitCode.m_linkerIR.size())
|
---|
40 | , m_stubInfos(jitCode.m_unlinkedStubInfos.size())
|
---|
41 | , m_exits(WTFMove(exits))
|
---|
42 | {
|
---|
43 | for (unsigned i = 0; i < jitCode.m_linkerIR.size(); ++i) {
|
---|
44 | auto entry = jitCode.m_linkerIR.at(i);
|
---|
45 | switch (entry.type()) {
|
---|
46 | case LinkerIR::Type::StructureStubInfo: {
|
---|
47 | unsigned index = bitwise_cast<uintptr_t>(entry.pointer());
|
---|
48 | const UnlinkedStructureStubInfo& unlinkedStubInfo = jitCode.m_unlinkedStubInfos[index];
|
---|
49 | StructureStubInfo& stubInfo = m_stubInfos[index];
|
---|
50 | stubInfo.initializeFromDFGUnlinkedStructureStubInfo(unlinkedStubInfo);
|
---|
51 | at(i) = &stubInfo;
|
---|
52 | break;
|
---|
53 | }
|
---|
54 | default:
|
---|
55 | at(i) = entry.pointer();
|
---|
56 | break;
|
---|
57 | }
|
---|
58 | }
|
---|
59 | }
|
---|
60 |
|
---|
61 | JITCode::JITCode(bool isUnlinked)
|
---|
62 | : DirectJITCode(JITType::DFGJIT)
|
---|
63 | , common(isUnlinked)
|
---|
64 | {
|
---|
65 | }
|
---|
66 |
|
---|
67 | JITCode::~JITCode()
|
---|
68 | {
|
---|
69 | }
|
---|
70 |
|
---|
71 | CommonData* JITCode::dfgCommon()
|
---|
72 | {
|
---|
73 | return &common;
|
---|
74 | }
|
---|
75 |
|
---|
76 | JITCode* JITCode::dfg()
|
---|
77 | {
|
---|
78 | return this;
|
---|
79 | }
|
---|
80 |
|
---|
81 | void JITCode::shrinkToFit(const ConcurrentJSLocker&)
|
---|
82 | {
|
---|
83 | common.shrinkToFit();
|
---|
84 | minifiedDFG.prepareAndShrink();
|
---|
85 | }
|
---|
86 |
|
---|
87 | void JITCode::reconstruct(
|
---|
88 | CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex,
|
---|
89 | Operands<ValueRecovery>& result)
|
---|
90 | {
|
---|
91 | variableEventStream.reconstruct(codeBlock, codeOrigin, minifiedDFG, streamIndex, result);
|
---|
92 | }
|
---|
93 |
|
---|
94 | void JITCode::reconstruct(CallFrame* callFrame, CodeBlock* codeBlock, CodeOrigin codeOrigin, unsigned streamIndex, Operands<std::optional<JSValue>>& result)
|
---|
95 | {
|
---|
96 | Operands<ValueRecovery> recoveries;
|
---|
97 | reconstruct(codeBlock, codeOrigin, streamIndex, recoveries);
|
---|
98 |
|
---|
99 | result = Operands<std::optional<JSValue>>(OperandsLike, recoveries);
|
---|
100 | for (size_t i = result.size(); i--;)
|
---|
101 | result[i] = recoveries[i].recover(callFrame);
|
---|
102 | }
|
---|
103 |
|
---|
104 | RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
|
---|
105 | {
|
---|
106 | for (OSRExit& exit : m_osrExit) {
|
---|
107 | if (exit.isExceptionHandler() && exit.m_exceptionHandlerCallSiteIndex.bits() == callSiteIndex.bits()) {
|
---|
108 | Operands<ValueRecovery> valueRecoveries;
|
---|
109 | reconstruct(codeBlock, exit.m_codeOrigin, exit.m_streamIndex, valueRecoveries);
|
---|
110 | RegisterSet liveAtOSRExit;
|
---|
111 | for (size_t index = 0; index < valueRecoveries.size(); ++index) {
|
---|
112 | const ValueRecovery& recovery = valueRecoveries[index];
|
---|
113 | if (recovery.isInRegisters()) {
|
---|
114 | if (recovery.isInGPR())
|
---|
115 | liveAtOSRExit.set(recovery.gpr());
|
---|
116 | else if (recovery.isInFPR())
|
---|
117 | liveAtOSRExit.set(recovery.fpr());
|
---|
118 | #if USE(JSVALUE32_64)
|
---|
119 | else if (recovery.isInJSValueRegs()) {
|
---|
120 | liveAtOSRExit.set(recovery.payloadGPR());
|
---|
121 | liveAtOSRExit.set(recovery.tagGPR());
|
---|
122 | }
|
---|
123 | #endif
|
---|
124 | else
|
---|
125 | RELEASE_ASSERT_NOT_REACHED();
|
---|
126 | }
|
---|
127 | }
|
---|
128 |
|
---|
129 | return liveAtOSRExit;
|
---|
130 | }
|
---|
131 | }
|
---|
132 |
|
---|
133 | return { };
|
---|
134 | }
|
---|
135 |
|
---|
136 | #if ENABLE(FTL_JIT)
|
---|
137 | bool JITCode::checkIfOptimizationThresholdReached(CodeBlock* codeBlock)
|
---|
138 | {
|
---|
139 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
140 | return tierUpCounter.checkIfThresholdCrossedAndSet(codeBlock);
|
---|
141 | }
|
---|
142 |
|
---|
143 | void JITCode::optimizeNextInvocation(CodeBlock* codeBlock)
|
---|
144 | {
|
---|
145 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
146 | dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing next invocation.");
|
---|
147 | tierUpCounter.setNewThreshold(0, codeBlock);
|
---|
148 | }
|
---|
149 |
|
---|
150 | void JITCode::dontOptimizeAnytimeSoon(CodeBlock* codeBlock)
|
---|
151 | {
|
---|
152 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
153 | dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Not FTL-optimizing anytime soon.");
|
---|
154 | tierUpCounter.deferIndefinitely();
|
---|
155 | }
|
---|
156 |
|
---|
157 | void JITCode::optimizeAfterWarmUp(CodeBlock* codeBlock)
|
---|
158 | {
|
---|
159 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
160 | dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing after warm-up.");
|
---|
161 | CodeBlock* baseline = codeBlock->baselineVersion();
|
---|
162 | tierUpCounter.setNewThreshold(
|
---|
163 | baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeAfterWarmUp()),
|
---|
164 | baseline);
|
---|
165 | }
|
---|
166 |
|
---|
167 | void JITCode::optimizeSoon(CodeBlock* codeBlock)
|
---|
168 | {
|
---|
169 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
170 | dataLogLnIf(Options::verboseOSR(), *codeBlock, ": FTL-optimizing soon.");
|
---|
171 | CodeBlock* baseline = codeBlock->baselineVersion();
|
---|
172 | tierUpCounter.setNewThreshold(
|
---|
173 | baseline->adjustedCounterValue(Options::thresholdForFTLOptimizeSoon()),
|
---|
174 | codeBlock);
|
---|
175 | }
|
---|
176 |
|
---|
177 | void JITCode::forceOptimizationSlowPathConcurrently(CodeBlock* codeBlock)
|
---|
178 | {
|
---|
179 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
180 | dataLogLnIf(Options::verboseOSR(), *codeBlock, ": Forcing slow path concurrently for FTL entry.");
|
---|
181 | tierUpCounter.forceSlowPathConcurrently();
|
---|
182 | }
|
---|
183 |
|
---|
184 | void JITCode::setOptimizationThresholdBasedOnCompilationResult(
|
---|
185 | CodeBlock* codeBlock, CompilationResult result)
|
---|
186 | {
|
---|
187 | ASSERT(codeBlock->jitType() == JITType::DFGJIT);
|
---|
188 | switch (result) {
|
---|
189 | case CompilationSuccessful:
|
---|
190 | optimizeNextInvocation(codeBlock);
|
---|
191 | codeBlock->baselineVersion()->m_hasBeenCompiledWithFTL = true;
|
---|
192 | return;
|
---|
193 | case CompilationFailed:
|
---|
194 | dontOptimizeAnytimeSoon(codeBlock);
|
---|
195 | codeBlock->baselineVersion()->m_didFailFTLCompilation = true;
|
---|
196 | return;
|
---|
197 | case CompilationDeferred:
|
---|
198 | optimizeAfterWarmUp(codeBlock);
|
---|
199 | return;
|
---|
200 | case CompilationInvalidated:
|
---|
201 | // This is weird - it will only happen in cases when the DFG code block (i.e.
|
---|
202 | // the code block that this JITCode belongs to) is also invalidated. So it
|
---|
203 | // doesn't really matter what we do. But, we do the right thing anyway. Note
|
---|
204 | // that us counting the reoptimization actually means that we might count it
|
---|
205 | // twice. But that's generally OK. It's better to overcount reoptimizations
|
---|
206 | // than it is to undercount them.
|
---|
207 | codeBlock->baselineVersion()->countReoptimization();
|
---|
208 | optimizeAfterWarmUp(codeBlock);
|
---|
209 | return;
|
---|
210 | }
|
---|
211 | RELEASE_ASSERT_NOT_REACHED();
|
---|
212 | }
|
---|
213 |
|
---|
214 | void JITCode::setOSREntryBlock(VM& vm, const JSCell* owner, CodeBlock* osrEntryBlock)
|
---|
215 | {
|
---|
216 | if (Options::verboseOSR()) {
|
---|
217 | dataLogLn(RawPointer(this), ": Setting OSR entry block to ", RawPointer(osrEntryBlock));
|
---|
218 | dataLogLn("OSR entries will go to ", osrEntryBlock->jitCode()->ftlForOSREntry()->addressForCall(ArityCheckNotRequired));
|
---|
219 | }
|
---|
220 | m_osrEntryBlock.set(vm, owner, osrEntryBlock);
|
---|
221 | }
|
---|
222 |
|
---|
223 | void JITCode::clearOSREntryBlockAndResetThresholds(CodeBlock *dfgCodeBlock)
|
---|
224 | {
|
---|
225 | ASSERT(m_osrEntryBlock);
|
---|
226 |
|
---|
227 | BytecodeIndex osrEntryBytecode = m_osrEntryBlock->jitCode()->ftlForOSREntry()->bytecodeIndex();
|
---|
228 | m_osrEntryBlock.clear();
|
---|
229 | osrEntryRetry = 0;
|
---|
230 | tierUpEntryTriggers.set(osrEntryBytecode, JITCode::TriggerReason::DontTrigger);
|
---|
231 | setOptimizationThresholdBasedOnCompilationResult(dfgCodeBlock, CompilationDeferred);
|
---|
232 | }
|
---|
233 | #endif // ENABLE(FTL_JIT)
|
---|
234 |
|
---|
235 | void JITCode::validateReferences(const TrackedReferences& trackedReferences)
|
---|
236 | {
|
---|
237 | common.validateReferences(trackedReferences);
|
---|
238 |
|
---|
239 | for (OSREntryData& entry : m_osrEntry) {
|
---|
240 | for (unsigned i = entry.m_expectedValues.size(); i--;)
|
---|
241 | entry.m_expectedValues[i].validateReferences(trackedReferences);
|
---|
242 | }
|
---|
243 |
|
---|
244 | minifiedDFG.validateReferences(trackedReferences);
|
---|
245 | }
|
---|
246 |
|
---|
247 | std::optional<CodeOrigin> JITCode::findPC(CodeBlock* codeBlock, void* pc)
|
---|
248 | {
|
---|
249 | const auto* jitData = codeBlock->dfgJITData();
|
---|
250 | auto osrExitThunk = codeBlock->vm().getCTIStub(osrExitGenerationThunkGenerator).retagged<OSRExitPtrTag>();
|
---|
251 | for (unsigned exitIndex = 0; exitIndex < m_osrExit.size(); ++exitIndex) {
|
---|
252 | const auto& codeRef = jitData->exitCode(exitIndex);
|
---|
253 | if (ExecutableMemoryHandle* handle = codeRef.executableMemory()) {
|
---|
254 | if (handle != osrExitThunk.executableMemory()) {
|
---|
255 | if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr()) {
|
---|
256 | OSRExit& exit = m_osrExit[exitIndex];
|
---|
257 | return std::optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
|
---|
258 | }
|
---|
259 | }
|
---|
260 | }
|
---|
261 | }
|
---|
262 |
|
---|
263 | return std::nullopt;
|
---|
264 | }
|
---|
265 |
|
---|
266 | void JITCode::finalizeOSREntrypoints(Vector<OSREntryData>&& osrEntry)
|
---|
267 | {
|
---|
268 | auto comparator = [] (const auto& a, const auto& b) {
|
---|
269 | return a.m_bytecodeIndex < b.m_bytecodeIndex;
|
---|
270 | };
|
---|
271 | std::sort(osrEntry.begin(), osrEntry.end(), comparator);
|
---|
272 |
|
---|
273 | #if ASSERT_ENABLED
|
---|
274 | auto verifyIsSorted = [&] (auto& osrVector) {
|
---|
275 | for (unsigned i = 0; i + 1 < osrVector.size(); ++i)
|
---|
276 | ASSERT(osrVector[i].m_bytecodeIndex <= osrVector[i + 1].m_bytecodeIndex);
|
---|
277 | };
|
---|
278 | verifyIsSorted(osrEntry);
|
---|
279 | #endif
|
---|
280 | m_osrEntry = WTFMove(osrEntry);
|
---|
281 | }
|
---|
282 |
|
---|
283 | } } // namespace JSC::DFG
|
---|
284 |
|
---|
285 | #endif // ENABLE(DFG_JIT)
|
---|