1 | /*
|
---|
2 | * Copyright (C) 2011-2021 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #pragma once
|
---|
27 |
|
---|
28 | #if ENABLE(DFG_JIT)
|
---|
29 |
|
---|
30 | #include "AssemblyHelpers.h"
|
---|
31 | #include "BytecodeLivenessAnalysisInlines.h"
|
---|
32 | #include "CodeBlock.h"
|
---|
33 | #include "DFGArgumentPosition.h"
|
---|
34 | #include "DFGBasicBlock.h"
|
---|
35 | #include "DFGFrozenValue.h"
|
---|
36 | #include "DFGNode.h"
|
---|
37 | #include "DFGPlan.h"
|
---|
38 | #include "DFGPropertyTypeKey.h"
|
---|
39 | #include "FullBytecodeLiveness.h"
|
---|
40 | #include "JITScannable.h"
|
---|
41 | #include "MethodOfGettingAValueProfile.h"
|
---|
42 | #include <wtf/BitVector.h>
|
---|
43 | #include <wtf/GenericHashKey.h>
|
---|
44 | #include <wtf/HashMap.h>
|
---|
45 | #include <wtf/StackCheck.h>
|
---|
46 | #include <wtf/StdLibExtras.h>
|
---|
47 | #include <wtf/Vector.h>
|
---|
48 |
|
---|
49 | namespace WTF {
|
---|
50 | template <typename T> class SingleRootGraph;
|
---|
51 | }
|
---|
52 |
|
---|
53 | namespace JSC {
|
---|
54 |
|
---|
55 | class CodeBlock;
|
---|
56 | class CallFrame;
|
---|
57 |
|
---|
58 | namespace DFG {
|
---|
59 |
|
---|
60 | class BackwardsCFG;
|
---|
61 | class BackwardsDominators;
|
---|
62 | class CFG;
|
---|
63 | class CPSCFG;
|
---|
64 | class ControlEquivalenceAnalysis;
|
---|
65 | template <typename T> class Dominators;
|
---|
66 | template <typename T> class NaturalLoops;
|
---|
67 | class FlowIndexing;
|
---|
68 | template<typename> class FlowMap;
|
---|
69 |
|
---|
70 | using ArgumentsVector = Vector<Node*, 8>;
|
---|
71 |
|
---|
72 | using SSACFG = CFG;
|
---|
73 | using CPSDominators = Dominators<CPSCFG>;
|
---|
74 | using SSADominators = Dominators<SSACFG>;
|
---|
75 | using CPSNaturalLoops = NaturalLoops<CPSCFG>;
|
---|
76 | using SSANaturalLoops = NaturalLoops<SSACFG>;
|
---|
77 |
|
---|
78 | #define DFG_NODE_DO_TO_CHILDREN(graph, node, thingToDo) do { \
|
---|
79 | Node* _node = (node); \
|
---|
80 | if (_node->flags() & NodeHasVarArgs) { \
|
---|
81 | for (unsigned _childIdx = _node->firstChild(); \
|
---|
82 | _childIdx < _node->firstChild() + _node->numChildren(); \
|
---|
83 | _childIdx++) { \
|
---|
84 | if (!!(graph).m_varArgChildren[_childIdx]) \
|
---|
85 | thingToDo(_node, (graph).m_varArgChildren[_childIdx]); \
|
---|
86 | } \
|
---|
87 | } else { \
|
---|
88 | for (unsigned _edgeIndex = 0; _edgeIndex < AdjacencyList::Size; _edgeIndex++) { \
|
---|
89 | Edge& _edge = _node->children.child(_edgeIndex); \
|
---|
90 | if (!_edge) \
|
---|
91 | break; \
|
---|
92 | thingToDo(_node, _edge); \
|
---|
93 | } \
|
---|
94 | } \
|
---|
95 | } while (false)
|
---|
96 |
|
---|
97 | #define DFG_ASSERT(graph, node, assertion, ...) do { \
|
---|
98 | if (!!(assertion)) \
|
---|
99 | break; \
|
---|
100 | (graph).logAssertionFailure( \
|
---|
101 | (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, #assertion); \
|
---|
102 | CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
|
---|
103 | } while (false)
|
---|
104 |
|
---|
105 | #define DFG_CRASH(graph, node, reason, ...) do { \
|
---|
106 | (graph).logAssertionFailure( \
|
---|
107 | (node), __FILE__, __LINE__, WTF_PRETTY_FUNCTION, (reason)); \
|
---|
108 | CRASH_WITH_SECURITY_IMPLICATION_AND_INFO(__VA_ARGS__); \
|
---|
109 | } while (false)
|
---|
110 |
|
---|
111 | struct InlineVariableData {
|
---|
112 | InlineCallFrame* inlineCallFrame;
|
---|
113 | unsigned argumentPositionStart;
|
---|
114 | VariableAccessData* calleeVariable;
|
---|
115 | };
|
---|
116 |
|
---|
117 | enum AddSpeculationMode {
|
---|
118 | DontSpeculateInt32,
|
---|
119 | SpeculateInt32AndTruncateConstants,
|
---|
120 | SpeculateInt32
|
---|
121 | };
|
---|
122 |
|
---|
123 | struct Prefix {
|
---|
124 | enum NoHeaderTag { NoHeader };
|
---|
125 |
|
---|
126 | Prefix() { }
|
---|
127 |
|
---|
128 | Prefix(const char* prefixStr, NoHeaderTag tag = NoHeader)
|
---|
129 | : prefixStr(prefixStr)
|
---|
130 | , noHeader(tag == NoHeader)
|
---|
131 | { }
|
---|
132 |
|
---|
133 | Prefix(NoHeaderTag)
|
---|
134 | : noHeader(true)
|
---|
135 | { }
|
---|
136 |
|
---|
137 | void dump(PrintStream& out) const;
|
---|
138 |
|
---|
139 | void clearBlockIndex() { blockIndex = -1; }
|
---|
140 | void clearNodeIndex() { nodeIndex = -1; }
|
---|
141 |
|
---|
142 | void enable() { m_enabled = true; }
|
---|
143 | void disable() { m_enabled = false; }
|
---|
144 |
|
---|
145 | int32_t phaseNumber { -1 };
|
---|
146 | int32_t blockIndex { -1 };
|
---|
147 | int32_t nodeIndex { -1 };
|
---|
148 | const char* prefixStr { nullptr };
|
---|
149 | bool noHeader { false };
|
---|
150 |
|
---|
151 | static constexpr const char* noString = nullptr;
|
---|
152 |
|
---|
153 | private:
|
---|
154 | bool m_enabled { true };
|
---|
155 | };
|
---|
156 |
|
---|
157 | //
|
---|
158 | // === Graph ===
|
---|
159 | //
|
---|
160 | // The order may be significant for nodes with side-effects (property accesses, value conversions).
|
---|
161 | // Nodes that are 'dead' remain in the vector with refCount 0.
|
---|
162 | class Graph final : public virtual Scannable {
|
---|
163 | public:
|
---|
164 | Graph(VM&, Plan&);
|
---|
165 | ~Graph() final;
|
---|
166 |
|
---|
167 | void changeChild(Edge& edge, Node* newNode)
|
---|
168 | {
|
---|
169 | edge.setNode(newNode);
|
---|
170 | }
|
---|
171 |
|
---|
172 | void changeEdge(Edge& edge, Edge newEdge)
|
---|
173 | {
|
---|
174 | edge = newEdge;
|
---|
175 | }
|
---|
176 |
|
---|
177 | void compareAndSwap(Edge& edge, Node* oldNode, Node* newNode)
|
---|
178 | {
|
---|
179 | if (edge.node() != oldNode)
|
---|
180 | return;
|
---|
181 | changeChild(edge, newNode);
|
---|
182 | }
|
---|
183 |
|
---|
184 | void compareAndSwap(Edge& edge, Edge oldEdge, Edge newEdge)
|
---|
185 | {
|
---|
186 | if (edge != oldEdge)
|
---|
187 | return;
|
---|
188 | changeEdge(edge, newEdge);
|
---|
189 | }
|
---|
190 |
|
---|
191 | void performSubstitution(Node* node)
|
---|
192 | {
|
---|
193 | if (node->flags() & NodeHasVarArgs) {
|
---|
194 | for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++)
|
---|
195 | performSubstitutionForEdge(m_varArgChildren[childIdx]);
|
---|
196 | } else {
|
---|
197 | performSubstitutionForEdge(node->child1());
|
---|
198 | performSubstitutionForEdge(node->child2());
|
---|
199 | performSubstitutionForEdge(node->child3());
|
---|
200 | }
|
---|
201 | }
|
---|
202 |
|
---|
203 | void performSubstitutionForEdge(Edge& child)
|
---|
204 | {
|
---|
205 | // Check if this operand is actually unused.
|
---|
206 | if (!child)
|
---|
207 | return;
|
---|
208 |
|
---|
209 | // Check if there is any replacement.
|
---|
210 | Node* replacement = child->replacement();
|
---|
211 | if (!replacement)
|
---|
212 | return;
|
---|
213 |
|
---|
214 | child.setNode(replacement);
|
---|
215 |
|
---|
216 | // There is definitely a replacement. Assert that the replacement does not
|
---|
217 | // have a replacement.
|
---|
218 | ASSERT(!child->replacement());
|
---|
219 | }
|
---|
220 |
|
---|
221 | template<typename... Params>
|
---|
222 | Node* addNode(Params... params)
|
---|
223 | {
|
---|
224 | return m_nodes.addNew(params...);
|
---|
225 | }
|
---|
226 |
|
---|
227 | template<typename... Params>
|
---|
228 | Node* addNode(SpeculatedType type, Params... params)
|
---|
229 | {
|
---|
230 | Node* node = m_nodes.addNew(params...);
|
---|
231 | node->predict(type);
|
---|
232 | return node;
|
---|
233 | }
|
---|
234 |
|
---|
235 | void deleteNode(Node*);
|
---|
236 | unsigned maxNodeCount() const { return m_nodes.size(); }
|
---|
237 | Node* nodeAt(unsigned index) const { return m_nodes[index]; }
|
---|
238 | void packNodeIndices();
|
---|
239 |
|
---|
240 | void dethread();
|
---|
241 |
|
---|
242 | FrozenValue* freeze(JSValue); // We use weak freezing by default.
|
---|
243 | FrozenValue* freezeStrong(JSValue); // Shorthand for freeze(value)->strengthenTo(StrongValue).
|
---|
244 |
|
---|
245 | void convertToConstant(Node* node, FrozenValue* value);
|
---|
246 | void convertToConstant(Node* node, JSValue value);
|
---|
247 | void convertToStrongConstant(Node* node, JSValue value);
|
---|
248 |
|
---|
249 | // Use this to produce a value you know won't be accessed but the compiler
|
---|
250 | // might think is live. For exmaple, in our op_iterator_next parsing
|
---|
251 | // value VirtualRegister is only read if we are not "done". Because the
|
---|
252 | // done control flow is not in the op_iterator_next bytecode this is not
|
---|
253 | // obvious to the compiler.
|
---|
254 | // FIXME: This isn't quite a true bottom value. For example, any object
|
---|
255 | // speculation will now be Object|Other as this returns null. We should
|
---|
256 | // fix this when we can allocate on the Compiler thread.
|
---|
257 | // https://bugs.webkit.org/show_bug.cgi?id=210627
|
---|
258 | FrozenValue* bottomValueMatchingSpeculation(SpeculatedType);
|
---|
259 |
|
---|
260 | RegisteredStructure registerStructure(Structure* structure)
|
---|
261 | {
|
---|
262 | StructureRegistrationResult ignored;
|
---|
263 | return registerStructure(structure, ignored);
|
---|
264 | }
|
---|
265 | RegisteredStructure registerStructure(Structure*, StructureRegistrationResult&);
|
---|
266 | void registerAndWatchStructureTransition(Structure*);
|
---|
267 | void assertIsRegistered(Structure* structure);
|
---|
268 |
|
---|
269 | // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
|
---|
270 | void dump(PrintStream& = WTF::dataFile(), DumpContext* = nullptr);
|
---|
271 |
|
---|
272 | bool terminalsAreValid();
|
---|
273 |
|
---|
274 | enum PhiNodeDumpMode { DumpLivePhisOnly, DumpAllPhis };
|
---|
275 | void dumpBlockHeader(PrintStream&, const char* prefix, BasicBlock*, PhiNodeDumpMode, DumpContext*);
|
---|
276 | void dump(PrintStream&, Edge);
|
---|
277 | void dump(PrintStream&, const char* prefix, Node*, DumpContext* = nullptr);
|
---|
278 | static int amountOfNodeWhiteSpace(Node*);
|
---|
279 | static void printNodeWhiteSpace(PrintStream&, Node*);
|
---|
280 |
|
---|
281 | // Dump the code origin of the given node as a diff from the code origin of the
|
---|
282 | // preceding node. Returns true if anything was printed.
|
---|
283 | bool dumpCodeOrigin(PrintStream&, const char* prefix, Node*& previousNode, Node* currentNode, DumpContext*);
|
---|
284 |
|
---|
285 | AddSpeculationMode addSpeculationMode(Node* add, bool leftShouldSpeculateInt32, bool rightShouldSpeculateInt32, PredictionPass pass)
|
---|
286 | {
|
---|
287 | ASSERT(add->op() == ValueAdd || add->op() == ValueSub || add->op() == ArithAdd || add->op() == ArithSub);
|
---|
288 |
|
---|
289 | RareCaseProfilingSource source = add->sourceFor(pass);
|
---|
290 |
|
---|
291 | Node* left = add->child1().node();
|
---|
292 | Node* right = add->child2().node();
|
---|
293 |
|
---|
294 | if (left->hasConstant())
|
---|
295 | return addImmediateShouldSpeculateInt32(add, rightShouldSpeculateInt32, right, left, source);
|
---|
296 | if (right->hasConstant())
|
---|
297 | return addImmediateShouldSpeculateInt32(add, leftShouldSpeculateInt32, left, right, source);
|
---|
298 |
|
---|
299 | return (leftShouldSpeculateInt32 && rightShouldSpeculateInt32 && add->canSpeculateInt32(source)) ? SpeculateInt32 : DontSpeculateInt32;
|
---|
300 | }
|
---|
301 |
|
---|
302 | AddSpeculationMode valueAddSpeculationMode(Node* add, PredictionPass pass)
|
---|
303 | {
|
---|
304 | return addSpeculationMode(
|
---|
305 | add,
|
---|
306 | add->child1()->shouldSpeculateInt32OrBooleanExpectingDefined(),
|
---|
307 | add->child2()->shouldSpeculateInt32OrBooleanExpectingDefined(),
|
---|
308 | pass);
|
---|
309 | }
|
---|
310 |
|
---|
311 | AddSpeculationMode arithAddSpeculationMode(Node* add, PredictionPass pass)
|
---|
312 | {
|
---|
313 | return addSpeculationMode(
|
---|
314 | add,
|
---|
315 | add->child1()->shouldSpeculateInt32OrBooleanForArithmetic(),
|
---|
316 | add->child2()->shouldSpeculateInt32OrBooleanForArithmetic(),
|
---|
317 | pass);
|
---|
318 | }
|
---|
319 |
|
---|
320 | AddSpeculationMode addSpeculationMode(Node* add, PredictionPass pass)
|
---|
321 | {
|
---|
322 | if (add->op() == ValueAdd)
|
---|
323 | return valueAddSpeculationMode(add, pass);
|
---|
324 |
|
---|
325 | return arithAddSpeculationMode(add, pass);
|
---|
326 | }
|
---|
327 |
|
---|
328 | bool addShouldSpeculateInt32(Node* add, PredictionPass pass)
|
---|
329 | {
|
---|
330 | return addSpeculationMode(add, pass) != DontSpeculateInt32;
|
---|
331 | }
|
---|
332 |
|
---|
333 | bool addShouldSpeculateInt52(Node* add)
|
---|
334 | {
|
---|
335 | if (!enableInt52())
|
---|
336 | return false;
|
---|
337 |
|
---|
338 | Node* left = add->child1().node();
|
---|
339 | Node* right = add->child2().node();
|
---|
340 |
|
---|
341 | if (hasExitSite(add, Int52Overflow))
|
---|
342 | return false;
|
---|
343 |
|
---|
344 | if (Node::shouldSpeculateInt52(left, right))
|
---|
345 | return true;
|
---|
346 |
|
---|
347 | auto shouldSpeculateInt52ForAdd = [] (Node* node) {
|
---|
348 | // When DoubleConstant node appears, it means that users explicitly write a constant in their code with double form instead of integer form (1.0 instead of 1).
|
---|
349 | // In that case, we should honor this decision: using it as integer is not appropriate.
|
---|
350 | if (node->op() == DoubleConstant)
|
---|
351 | return false;
|
---|
352 | return isIntAnyFormat(node->prediction());
|
---|
353 | };
|
---|
354 |
|
---|
355 | // Allow Int52 ArithAdd only when the one side of the binary operation should be speculated Int52. It is a bit conservative
|
---|
356 | // decision. This is because Double to Int52 conversion is not so cheap. Frequent back-and-forth conversions between Double and Int52
|
---|
357 | // rather hurt the performance. If the one side of the operation is already Int52, the cost for constructing ArithAdd becomes
|
---|
358 | // cheap since only one Double to Int52 conversion could be required.
|
---|
359 | // This recovers some regression in assorted tests while keeping kraken crypto improvements.
|
---|
360 | if (!left->shouldSpeculateInt52() && !right->shouldSpeculateInt52())
|
---|
361 | return false;
|
---|
362 |
|
---|
363 | auto usesAsNumbers = [](Node* node) {
|
---|
364 | NodeFlags flags = node->flags() & NodeBytecodeBackPropMask;
|
---|
365 | if (!flags)
|
---|
366 | return false;
|
---|
367 | return (flags & (NodeBytecodeUsesAsNumber | NodeBytecodeNeedsNegZero | NodeBytecodeUsesAsInt | NodeBytecodeUsesAsArrayIndex)) == flags;
|
---|
368 | };
|
---|
369 |
|
---|
370 | // Wrapping Int52 to Value is also not so cheap. Thus, we allow Int52 addition only when the node is used as number.
|
---|
371 | if (!usesAsNumbers(add))
|
---|
372 | return false;
|
---|
373 |
|
---|
374 | return shouldSpeculateInt52ForAdd(left) && shouldSpeculateInt52ForAdd(right);
|
---|
375 | }
|
---|
376 |
|
---|
377 | bool binaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
|
---|
378 | {
|
---|
379 | Node* left = node->child1().node();
|
---|
380 | Node* right = node->child2().node();
|
---|
381 |
|
---|
382 | return Node::shouldSpeculateInt32OrBooleanForArithmetic(left, right)
|
---|
383 | && node->canSpeculateInt32(node->sourceFor(pass));
|
---|
384 | }
|
---|
385 |
|
---|
386 | bool binaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
|
---|
387 | {
|
---|
388 | if (!enableInt52())
|
---|
389 | return false;
|
---|
390 |
|
---|
391 | Node* left = node->child1().node();
|
---|
392 | Node* right = node->child2().node();
|
---|
393 |
|
---|
394 | return Node::shouldSpeculateInt52(left, right)
|
---|
395 | && node->canSpeculateInt52(pass)
|
---|
396 | && !hasExitSite(node, Int52Overflow);
|
---|
397 | }
|
---|
398 |
|
---|
399 | bool unaryArithShouldSpeculateInt32(Node* node, PredictionPass pass)
|
---|
400 | {
|
---|
401 | return node->child1()->shouldSpeculateInt32OrBooleanForArithmetic()
|
---|
402 | && node->canSpeculateInt32(pass);
|
---|
403 | }
|
---|
404 |
|
---|
405 | bool unaryArithShouldSpeculateInt52(Node* node, PredictionPass pass)
|
---|
406 | {
|
---|
407 | if (!enableInt52())
|
---|
408 | return false;
|
---|
409 | return node->child1()->shouldSpeculateInt52()
|
---|
410 | && node->canSpeculateInt52(pass)
|
---|
411 | && !hasExitSite(node, Int52Overflow);
|
---|
412 | }
|
---|
413 |
|
---|
414 | #if USE(BIGINT32)
|
---|
415 | bool binaryArithShouldSpeculateBigInt32(Node* node, PredictionPass pass)
|
---|
416 | {
|
---|
417 | if (!node->canSpeculateBigInt32(pass))
|
---|
418 | return false;
|
---|
419 | if (hasExitSite(node, BigInt32Overflow))
|
---|
420 | return false;
|
---|
421 | return Node::shouldSpeculateBigInt32(node->child1().node(), node->child2().node());
|
---|
422 | }
|
---|
423 |
|
---|
424 | bool unaryArithShouldSpeculateBigInt32(Node* node, PredictionPass pass)
|
---|
425 | {
|
---|
426 | if (!node->canSpeculateBigInt32(pass))
|
---|
427 | return false;
|
---|
428 | if (hasExitSite(node, BigInt32Overflow))
|
---|
429 | return false;
|
---|
430 | return node->child1()->shouldSpeculateBigInt32();
|
---|
431 | }
|
---|
432 | #endif
|
---|
433 |
|
---|
434 | bool canOptimizeStringObjectAccess(const CodeOrigin&);
|
---|
435 |
|
---|
436 | bool getRegExpPrototypeProperty(JSObject* regExpPrototype, Structure* regExpPrototypeStructure, UniquedStringImpl* uid, JSValue& returnJSValue);
|
---|
437 |
|
---|
438 | bool roundShouldSpeculateInt32(Node* arithRound, PredictionPass pass)
|
---|
439 | {
|
---|
440 | ASSERT(arithRound->op() == ArithRound || arithRound->op() == ArithFloor || arithRound->op() == ArithCeil || arithRound->op() == ArithTrunc);
|
---|
441 | return arithRound->canSpeculateInt32(pass) && !hasExitSite(arithRound->origin.semantic, Overflow) && !hasExitSite(arithRound->origin.semantic, NegativeZero);
|
---|
442 | }
|
---|
443 |
|
---|
444 | static const char* opName(NodeType);
|
---|
445 |
|
---|
446 | RegisteredStructureSet* addStructureSet(const StructureSet& structureSet)
|
---|
447 | {
|
---|
448 | m_structureSets.append();
|
---|
449 | RegisteredStructureSet* result = &m_structureSets.last();
|
---|
450 |
|
---|
451 | for (Structure* structure : structureSet)
|
---|
452 | result->add(registerStructure(structure));
|
---|
453 |
|
---|
454 | return result;
|
---|
455 | }
|
---|
456 |
|
---|
457 | RegisteredStructureSet* addStructureSet(const RegisteredStructureSet& structureSet)
|
---|
458 | {
|
---|
459 | m_structureSets.append();
|
---|
460 | RegisteredStructureSet* result = &m_structureSets.last();
|
---|
461 |
|
---|
462 | for (RegisteredStructure structure : structureSet)
|
---|
463 | result->add(structure);
|
---|
464 |
|
---|
465 | return result;
|
---|
466 | }
|
---|
467 |
|
---|
468 | JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
|
---|
469 | {
|
---|
470 | return m_codeBlock->globalObjectFor(codeOrigin);
|
---|
471 | }
|
---|
472 |
|
---|
473 | JSObject* globalThisObjectFor(CodeOrigin codeOrigin)
|
---|
474 | {
|
---|
475 | JSGlobalObject* object = globalObjectFor(codeOrigin);
|
---|
476 | return jsCast<JSObject*>(object->methodTable()->toThis(object, object, ECMAMode::sloppy()));
|
---|
477 | }
|
---|
478 |
|
---|
479 | CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
|
---|
480 | {
|
---|
481 | if (!inlineCallFrame)
|
---|
482 | return m_profiledBlock;
|
---|
483 | return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
|
---|
484 | }
|
---|
485 |
|
---|
486 | CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
|
---|
487 | {
|
---|
488 | return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
|
---|
489 | }
|
---|
490 |
|
---|
491 | bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
|
---|
492 | {
|
---|
493 | if (m_plan.isUnlinked())
|
---|
494 | return false;
|
---|
495 | return globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid();
|
---|
496 | }
|
---|
497 |
|
---|
498 | bool hasGlobalExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
|
---|
499 | {
|
---|
500 | return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(exitKind));
|
---|
501 | }
|
---|
502 |
|
---|
503 | bool hasExitSite(const CodeOrigin& codeOrigin, ExitKind exitKind)
|
---|
504 | {
|
---|
505 | return baselineCodeBlockFor(codeOrigin)->unlinkedCodeBlock()->hasExitSite(FrequentExitSite(codeOrigin.bytecodeIndex(), exitKind));
|
---|
506 | }
|
---|
507 |
|
---|
508 | bool hasExitSite(Node* node, ExitKind exitKind)
|
---|
509 | {
|
---|
510 | return hasExitSite(node->origin.semantic, exitKind);
|
---|
511 | }
|
---|
512 |
|
---|
513 | MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(Node* currentNode, Node* operandNode);
|
---|
514 |
|
---|
515 | BlockIndex numBlocks() const { return m_blocks.size(); }
|
---|
516 | BasicBlock* block(BlockIndex blockIndex) const { return m_blocks[blockIndex].get(); }
|
---|
517 | BasicBlock* lastBlock() const { return block(numBlocks() - 1); }
|
---|
518 |
|
---|
519 | void appendBlock(Ref<BasicBlock>&& basicBlock)
|
---|
520 | {
|
---|
521 | basicBlock->index = m_blocks.size();
|
---|
522 | m_blocks.append(WTFMove(basicBlock));
|
---|
523 | }
|
---|
524 |
|
---|
525 | void killBlock(BlockIndex blockIndex)
|
---|
526 | {
|
---|
527 | m_blocks[blockIndex] = nullptr;
|
---|
528 | }
|
---|
529 |
|
---|
530 | void killBlock(BasicBlock* basicBlock)
|
---|
531 | {
|
---|
532 | killBlock(basicBlock->index);
|
---|
533 | }
|
---|
534 |
|
---|
535 | void killBlockAndItsContents(BasicBlock*);
|
---|
536 |
|
---|
537 | void killUnreachableBlocks();
|
---|
538 |
|
---|
539 | void determineReachability();
|
---|
540 | void resetReachability();
|
---|
541 |
|
---|
542 | void computeRefCounts();
|
---|
543 |
|
---|
544 | unsigned varArgNumChildren(Node* node)
|
---|
545 | {
|
---|
546 | ASSERT(node->flags() & NodeHasVarArgs);
|
---|
547 | return node->numChildren();
|
---|
548 | }
|
---|
549 |
|
---|
550 | unsigned numChildren(Node* node)
|
---|
551 | {
|
---|
552 | if (node->flags() & NodeHasVarArgs)
|
---|
553 | return varArgNumChildren(node);
|
---|
554 | return AdjacencyList::Size;
|
---|
555 | }
|
---|
556 |
|
---|
557 | template <typename Function = bool(*)(Edge)>
|
---|
558 | AdjacencyList copyVarargChildren(Node* node, Function filter = [] (Edge) { return true; })
|
---|
559 | {
|
---|
560 | ASSERT(node->flags() & NodeHasVarArgs);
|
---|
561 | unsigned firstChild = m_varArgChildren.size();
|
---|
562 | unsigned numChildren = 0;
|
---|
563 | doToChildren(node, [&] (Edge edge) {
|
---|
564 | if (filter(edge)) {
|
---|
565 | ++numChildren;
|
---|
566 | m_varArgChildren.append(edge);
|
---|
567 | }
|
---|
568 | });
|
---|
569 |
|
---|
570 | return AdjacencyList(AdjacencyList::Variable, firstChild, numChildren);
|
---|
571 | }
|
---|
572 |
|
---|
573 | Edge& varArgChild(Node* node, unsigned index)
|
---|
574 | {
|
---|
575 | ASSERT(node->flags() & NodeHasVarArgs);
|
---|
576 | return m_varArgChildren[node->firstChild() + index];
|
---|
577 | }
|
---|
578 |
|
---|
579 | Edge& child(Node* node, unsigned index)
|
---|
580 | {
|
---|
581 | if (node->flags() & NodeHasVarArgs)
|
---|
582 | return varArgChild(node, index);
|
---|
583 | return node->children.child(index);
|
---|
584 | }
|
---|
585 |
|
---|
586 | void voteNode(Node* node, unsigned ballot, float weight = 1)
|
---|
587 | {
|
---|
588 | switch (node->op()) {
|
---|
589 | case ValueToInt32:
|
---|
590 | case UInt32ToNumber:
|
---|
591 | node = node->child1().node();
|
---|
592 | break;
|
---|
593 | default:
|
---|
594 | break;
|
---|
595 | }
|
---|
596 |
|
---|
597 | if (node->op() == GetLocal)
|
---|
598 | node->variableAccessData()->vote(ballot, weight);
|
---|
599 | }
|
---|
600 |
|
---|
601 | void voteNode(Edge edge, unsigned ballot, float weight = 1)
|
---|
602 | {
|
---|
603 | voteNode(edge.node(), ballot, weight);
|
---|
604 | }
|
---|
605 |
|
---|
606 | void voteChildren(Node* node, unsigned ballot, float weight = 1)
|
---|
607 | {
|
---|
608 | if (node->flags() & NodeHasVarArgs) {
|
---|
609 | for (unsigned childIdx = node->firstChild();
|
---|
610 | childIdx < node->firstChild() + node->numChildren();
|
---|
611 | childIdx++) {
|
---|
612 | if (!!m_varArgChildren[childIdx])
|
---|
613 | voteNode(m_varArgChildren[childIdx], ballot, weight);
|
---|
614 | }
|
---|
615 | return;
|
---|
616 | }
|
---|
617 |
|
---|
618 | if (!node->child1())
|
---|
619 | return;
|
---|
620 | voteNode(node->child1(), ballot, weight);
|
---|
621 | if (!node->child2())
|
---|
622 | return;
|
---|
623 | voteNode(node->child2(), ballot, weight);
|
---|
624 | if (!node->child3())
|
---|
625 | return;
|
---|
626 | voteNode(node->child3(), ballot, weight);
|
---|
627 | }
|
---|
628 |
|
---|
629 | template<typename T> // T = Node* or Edge
|
---|
630 | void substitute(BasicBlock& block, unsigned startIndexInBlock, T oldThing, T newThing)
|
---|
631 | {
|
---|
632 | for (unsigned indexInBlock = startIndexInBlock; indexInBlock < block.size(); ++indexInBlock) {
|
---|
633 | Node* node = block[indexInBlock];
|
---|
634 | if (node->flags() & NodeHasVarArgs) {
|
---|
635 | for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); ++childIdx) {
|
---|
636 | if (!!m_varArgChildren[childIdx])
|
---|
637 | compareAndSwap(m_varArgChildren[childIdx], oldThing, newThing);
|
---|
638 | }
|
---|
639 | continue;
|
---|
640 | }
|
---|
641 | if (!node->child1())
|
---|
642 | continue;
|
---|
643 | compareAndSwap(node->children.child1(), oldThing, newThing);
|
---|
644 | if (!node->child2())
|
---|
645 | continue;
|
---|
646 | compareAndSwap(node->children.child2(), oldThing, newThing);
|
---|
647 | if (!node->child3())
|
---|
648 | continue;
|
---|
649 | compareAndSwap(node->children.child3(), oldThing, newThing);
|
---|
650 | }
|
---|
651 | }
|
---|
652 |
|
---|
653 | // Use this if you introduce a new GetLocal and you know that you introduced it *before*
|
---|
654 | // any GetLocals in the basic block.
|
---|
655 | // FIXME: it may be appropriate, in the future, to generalize this to handle GetLocals
|
---|
656 | // introduced anywhere in the basic block.
|
---|
657 | void substituteGetLocal(BasicBlock& block, unsigned startIndexInBlock, VariableAccessData* variableAccessData, Node* newGetLocal);
|
---|
658 |
|
---|
659 | void invalidateCFG();
|
---|
660 | void invalidateNodeLiveness();
|
---|
661 |
|
---|
662 | void clearFlagsOnAllNodes(NodeFlags);
|
---|
663 |
|
---|
664 | void clearReplacements();
|
---|
665 | void clearEpochs();
|
---|
666 | void initializeNodeOwners();
|
---|
667 |
|
---|
668 | BlockList blocksInPreOrder();
|
---|
669 | BlockList blocksInPostOrder(bool isSafeToValidate = true);
|
---|
670 |
|
---|
671 | class NaturalBlockIterable {
|
---|
672 | public:
|
---|
673 | NaturalBlockIterable()
|
---|
674 | : m_graph(nullptr)
|
---|
675 | {
|
---|
676 | }
|
---|
677 |
|
---|
678 | NaturalBlockIterable(const Graph& graph)
|
---|
679 | : m_graph(&graph)
|
---|
680 | {
|
---|
681 | }
|
---|
682 |
|
---|
683 | class iterator {
|
---|
684 | public:
|
---|
685 | iterator()
|
---|
686 | : m_graph(nullptr)
|
---|
687 | , m_index(0)
|
---|
688 | {
|
---|
689 | }
|
---|
690 |
|
---|
691 | iterator(const Graph& graph, BlockIndex index)
|
---|
692 | : m_graph(&graph)
|
---|
693 | , m_index(findNext(index))
|
---|
694 | {
|
---|
695 | }
|
---|
696 |
|
---|
697 | BasicBlock *operator*()
|
---|
698 | {
|
---|
699 | return m_graph->block(m_index);
|
---|
700 | }
|
---|
701 |
|
---|
702 | iterator& operator++()
|
---|
703 | {
|
---|
704 | m_index = findNext(m_index + 1);
|
---|
705 | return *this;
|
---|
706 | }
|
---|
707 |
|
---|
708 | bool operator==(const iterator& other) const
|
---|
709 | {
|
---|
710 | return m_index == other.m_index;
|
---|
711 | }
|
---|
712 |
|
---|
713 | bool operator!=(const iterator& other) const
|
---|
714 | {
|
---|
715 | return !(*this == other);
|
---|
716 | }
|
---|
717 |
|
---|
718 | private:
|
---|
719 | BlockIndex findNext(BlockIndex index)
|
---|
720 | {
|
---|
721 | while (index < m_graph->numBlocks() && !m_graph->block(index))
|
---|
722 | index++;
|
---|
723 | return index;
|
---|
724 | }
|
---|
725 |
|
---|
726 | const Graph* m_graph;
|
---|
727 | BlockIndex m_index;
|
---|
728 | };
|
---|
729 |
|
---|
730 | iterator begin()
|
---|
731 | {
|
---|
732 | return iterator(*m_graph, 0);
|
---|
733 | }
|
---|
734 |
|
---|
735 | iterator end()
|
---|
736 | {
|
---|
737 | return iterator(*m_graph, m_graph->numBlocks());
|
---|
738 | }
|
---|
739 |
|
---|
740 | private:
|
---|
741 | const Graph* m_graph;
|
---|
742 | };
|
---|
743 |
|
---|
744 | NaturalBlockIterable blocksInNaturalOrder() const
|
---|
745 | {
|
---|
746 | return NaturalBlockIterable(*this);
|
---|
747 | }
|
---|
748 |
|
---|
749 | template<typename ChildFunctor>
|
---|
750 | ALWAYS_INLINE void doToChildrenWithNode(Node* node, const ChildFunctor& functor)
|
---|
751 | {
|
---|
752 | DFG_NODE_DO_TO_CHILDREN(*this, node, functor);
|
---|
753 | }
|
---|
754 |
|
---|
755 | template<typename ChildFunctor>
|
---|
756 | ALWAYS_INLINE void doToChildren(Node* node, const ChildFunctor& functor)
|
---|
757 | {
|
---|
758 | class ForwardingFunc {
|
---|
759 | public:
|
---|
760 | ForwardingFunc(const ChildFunctor& functor)
|
---|
761 | : m_functor(functor)
|
---|
762 | {
|
---|
763 | }
|
---|
764 |
|
---|
765 | // This is a manually written func because we want ALWAYS_INLINE.
|
---|
766 | ALWAYS_INLINE void operator()(Node*, Edge& edge) const
|
---|
767 | {
|
---|
768 | m_functor(edge);
|
---|
769 | }
|
---|
770 |
|
---|
771 | private:
|
---|
772 | const ChildFunctor& m_functor;
|
---|
773 | };
|
---|
774 |
|
---|
775 | doToChildrenWithNode(node, ForwardingFunc(functor));
|
---|
776 | }
|
---|
777 |
|
---|
778 | bool uses(Node* node, Node* child)
|
---|
779 | {
|
---|
780 | bool result = false;
|
---|
781 | doToChildren(node, [&] (Edge edge) { result |= edge == child; });
|
---|
782 | return result;
|
---|
783 | }
|
---|
784 |
|
---|
785 | bool isWatchingHavingABadTimeWatchpoint(Node* node)
|
---|
786 | {
|
---|
787 | if (m_plan.isUnlinked())
|
---|
788 | return false;
|
---|
789 | JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
|
---|
790 | return watchpoints().isWatched(globalObject->havingABadTimeWatchpoint());
|
---|
791 | }
|
---|
792 |
|
---|
793 | bool isWatchingGlobalObjectWatchpoint(JSGlobalObject* globalObject, InlineWatchpointSet& set)
|
---|
794 | {
|
---|
795 | if (m_plan.isUnlinked())
|
---|
796 | return false;
|
---|
797 |
|
---|
798 | if (watchpoints().isWatched(set))
|
---|
799 | return true;
|
---|
800 |
|
---|
801 | if (set.isStillValid()) {
|
---|
802 | // Since the global object owns this watchpoint, we make ourselves have a weak pointer to it.
|
---|
803 | // If the global object got deallocated, it wouldn't fire the watchpoint. It's unlikely the
|
---|
804 | // global object would get deallocated without this code ever getting thrown away, however,
|
---|
805 | // it's more sound logically to depend on the global object lifetime weakly.
|
---|
806 | freeze(globalObject);
|
---|
807 | watchpoints().addLazily(set);
|
---|
808 | return true;
|
---|
809 | }
|
---|
810 |
|
---|
811 | return false;
|
---|
812 | }
|
---|
813 |
|
---|
814 | bool isWatchingArrayIteratorProtocolWatchpoint(Node* node)
|
---|
815 | {
|
---|
816 | if (m_plan.isUnlinked())
|
---|
817 | return false;
|
---|
818 |
|
---|
819 | JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
|
---|
820 | InlineWatchpointSet& set = globalObject->arrayIteratorProtocolWatchpointSet();
|
---|
821 | return isWatchingGlobalObjectWatchpoint(globalObject, set);
|
---|
822 | }
|
---|
823 |
|
---|
824 | bool isWatchingNumberToStringWatchpoint(Node* node)
|
---|
825 | {
|
---|
826 | if (m_plan.isUnlinked())
|
---|
827 | return false;
|
---|
828 |
|
---|
829 | JSGlobalObject* globalObject = globalObjectFor(node->origin.semantic);
|
---|
830 | InlineWatchpointSet& set = globalObject->numberToStringWatchpointSet();
|
---|
831 | return isWatchingGlobalObjectWatchpoint(globalObject, set);
|
---|
832 | }
|
---|
833 |
|
---|
834 | bool isWatchingStructureCacheClearedWatchpoint(JSGlobalObject* globalObject)
|
---|
835 | {
|
---|
836 | if (m_plan.isUnlinked())
|
---|
837 | return false;
|
---|
838 |
|
---|
839 | InlineWatchpointSet& set = globalObject->structureCacheClearedWatchpoint();
|
---|
840 | return isWatchingGlobalObjectWatchpoint(globalObject, set);
|
---|
841 | }
|
---|
842 |
|
---|
843 | Profiler::Compilation* compilation() { return m_plan.compilation(); }
|
---|
844 |
|
---|
845 | DesiredIdentifiers& identifiers() { return m_plan.identifiers(); }
|
---|
846 | DesiredWatchpoints& watchpoints() { return m_plan.watchpoints(); }
|
---|
847 |
|
---|
848 | // Returns false if the key is already invalid or unwatchable. If this is a Presence condition,
|
---|
849 | // this also makes it cheap to query if the condition holds. Also makes sure that the GC knows
|
---|
850 | // what's going on.
|
---|
851 | bool watchCondition(const ObjectPropertyCondition&);
|
---|
852 | bool watchConditions(const ObjectPropertyConditionSet&);
|
---|
853 |
|
---|
854 | bool watchGlobalProperty(JSGlobalObject*, unsigned identifierNumber);
|
---|
855 |
|
---|
856 | // Checks if it's known that loading from the given object at the given offset is fine. This is
|
---|
857 | // computed by tracking which conditions we track with watchCondition().
|
---|
858 | bool isSafeToLoad(JSObject* base, PropertyOffset);
|
---|
859 |
|
---|
860 | // This uses either constant property inference or property type inference to derive a good abstract
|
---|
861 | // value for some property accessed with the given abstract value base.
|
---|
862 | AbstractValue inferredValueForProperty(
|
---|
863 | const AbstractValue& base, PropertyOffset, StructureClobberState);
|
---|
864 |
|
---|
865 | FullBytecodeLiveness& livenessFor(CodeBlock*);
|
---|
866 | FullBytecodeLiveness& livenessFor(InlineCallFrame*);
|
---|
867 |
|
---|
868 | // Quickly query if a single local is live at the given point. This is faster than calling
|
---|
869 | // forAllLiveInBytecode() if you will only query one local. But, if you want to know all of the
|
---|
870 | // locals live, then calling this for each local is much slower than forAllLiveInBytecode().
|
---|
871 | bool isLiveInBytecode(Operand, CodeOrigin);
|
---|
872 |
|
---|
873 | // Quickly get all of the non-argument locals and tmps live at the given point. This doesn't give you
|
---|
874 | // any arguments because those are all presumed live. You can call forAllLiveInBytecode() to
|
---|
875 | // also get the arguments. This is much faster than calling isLiveInBytecode() for each local.
|
---|
876 | template<typename Functor>
|
---|
877 | void forAllLocalsAndTmpsLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
|
---|
878 | {
|
---|
879 | // Support for not redundantly reporting arguments. Necessary because in case of a varargs
|
---|
880 | // call, only the callee knows that arguments are live while in the case of a non-varargs
|
---|
881 | // call, both callee and caller will see the variables live.
|
---|
882 | VirtualRegister exclusionStart;
|
---|
883 | VirtualRegister exclusionEnd;
|
---|
884 |
|
---|
885 | CodeOrigin* codeOriginPtr = &codeOrigin;
|
---|
886 |
|
---|
887 | bool isCallerOrigin = false;
|
---|
888 | for (;;) {
|
---|
889 | InlineCallFrame* inlineCallFrame = codeOriginPtr->inlineCallFrame();
|
---|
890 | VirtualRegister stackOffset(inlineCallFrame ? inlineCallFrame->stackOffset : 0);
|
---|
891 |
|
---|
892 | if (inlineCallFrame) {
|
---|
893 | if (inlineCallFrame->isClosureCall)
|
---|
894 | functor(stackOffset + CallFrameSlot::callee);
|
---|
895 | if (inlineCallFrame->isVarargs())
|
---|
896 | functor(stackOffset + CallFrameSlot::argumentCountIncludingThis);
|
---|
897 | }
|
---|
898 |
|
---|
899 | CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
|
---|
900 | FullBytecodeLiveness& fullLiveness = livenessFor(codeBlock);
|
---|
901 | const auto& livenessAtBytecode = fullLiveness.getLiveness(codeOriginPtr->bytecodeIndex(), appropriateLivenessCalculationPoint(*codeOriginPtr, isCallerOrigin));
|
---|
902 | for (unsigned relativeLocal = codeBlock->numCalleeLocals(); relativeLocal--;) {
|
---|
903 | VirtualRegister reg = stackOffset + virtualRegisterForLocal(relativeLocal);
|
---|
904 |
|
---|
905 | // Don't report if our callee already reported.
|
---|
906 | if (reg >= exclusionStart && reg < exclusionEnd)
|
---|
907 | continue;
|
---|
908 |
|
---|
909 | if (livenessAtBytecode[relativeLocal])
|
---|
910 | functor(reg);
|
---|
911 | }
|
---|
912 |
|
---|
913 | if (codeOriginPtr->bytecodeIndex().checkpoint()) {
|
---|
914 | ASSERT(codeBlock->numTmps());
|
---|
915 | auto liveTmps = tmpLivenessForCheckpoint(*codeBlock, codeOriginPtr->bytecodeIndex());
|
---|
916 | liveTmps.forEachSetBit([&] (size_t tmp) {
|
---|
917 | functor(remapOperand(inlineCallFrame, Operand::tmp(tmp)));
|
---|
918 | });
|
---|
919 | }
|
---|
920 |
|
---|
921 | if (!inlineCallFrame)
|
---|
922 | break;
|
---|
923 |
|
---|
924 | // Arguments are always live. This would be redundant if it wasn't for our
|
---|
925 | // op_call_varargs inlining. See the comment above.
|
---|
926 | exclusionStart = stackOffset + CallFrame::argumentOffsetIncludingThis(0);
|
---|
927 | exclusionEnd = stackOffset + CallFrame::argumentOffsetIncludingThis(inlineCallFrame->m_argumentsWithFixup.size());
|
---|
928 |
|
---|
929 | // We will always have a "this" argument and exclusionStart should be a smaller stack
|
---|
930 | // offset than exclusionEnd.
|
---|
931 | ASSERT(exclusionStart < exclusionEnd);
|
---|
932 |
|
---|
933 | for (VirtualRegister reg = exclusionStart; reg < exclusionEnd; reg += 1)
|
---|
934 | functor(reg);
|
---|
935 |
|
---|
936 | // We need to handle tail callers because we may decide to exit to the
|
---|
937 | // the return bytecode following the tail call.
|
---|
938 | codeOriginPtr = &inlineCallFrame->directCaller;
|
---|
939 | isCallerOrigin = true;
|
---|
940 | }
|
---|
941 | }
|
---|
942 |
|
---|
943 | // Get a BitVector of all of the locals and tmps live right now. This is mostly useful if
|
---|
944 | // you want to compare two sets of live locals from two different CodeOrigins.
|
---|
945 | BitVector localsAndTmpsLiveInBytecode(CodeOrigin);
|
---|
946 |
|
---|
947 | LivenessCalculationPoint appropriateLivenessCalculationPoint(CodeOrigin origin, bool isCallerOrigin)
|
---|
948 | {
|
---|
949 | if (isCallerOrigin) {
|
---|
950 | // We do not need to keep used registers of call bytecodes live when terminating in inlined function,
|
---|
951 | // except for inlining invoked by non call bytecodes including getter/setter calls.
|
---|
952 | BytecodeIndex bytecodeIndex = origin.bytecodeIndex();
|
---|
953 | InlineCallFrame* inlineCallFrame = origin.inlineCallFrame();
|
---|
954 | CodeBlock* codeBlock = baselineCodeBlockFor(inlineCallFrame);
|
---|
955 | auto instruction = codeBlock->instructions().at(bytecodeIndex.offset());
|
---|
956 | switch (instruction->opcodeID()) {
|
---|
957 | case op_call_varargs:
|
---|
958 | case op_tail_call_varargs:
|
---|
959 | case op_construct_varargs:
|
---|
960 | // When inlining varargs call, uses include array used for varargs. But when we are in inlined function,
|
---|
961 | // the content of this is already read and flushed to the stack. So, at this point, we no longer need to
|
---|
962 | // keep these use registers. We can use the liveness at LivenessCalculationPoint::AfterUse point.
|
---|
963 | // This is important to kill arguments allocations in DFG (not in FTL) when calling a function in a
|
---|
964 | // `func.apply(undefined, arguments)` manner.
|
---|
965 | return LivenessCalculationPoint::AfterUse;
|
---|
966 | default:
|
---|
967 | // We could list up the other bytecodes here, like, `op_call`, `op_get_by_id` (getter inlining). But we don't do that.
|
---|
968 | // To list up bytecodes here, we must ensure that these bytecodes never use `uses` registers after inlining. So we cannot
|
---|
969 | // return LivenessCalculationPoint::AfterUse blindly if isCallerOrigin = true. And since excluding liveness in the other
|
---|
970 | // bytecodes does not offer practical benefit, we do not try it.
|
---|
971 | break;
|
---|
972 | }
|
---|
973 | }
|
---|
974 | return LivenessCalculationPoint::BeforeUse;
|
---|
975 | }
|
---|
976 |
|
---|
977 | // Tells you all of the operands live at the given CodeOrigin. This is a small
|
---|
978 | // extension to forAllLocalsOrTmpsLiveInBytecode(), since all arguments are always presumed live.
|
---|
979 | template<typename Functor>
|
---|
980 | void forAllLiveInBytecode(CodeOrigin codeOrigin, const Functor& functor)
|
---|
981 | {
|
---|
982 | forAllLocalsAndTmpsLiveInBytecode(codeOrigin, functor);
|
---|
983 |
|
---|
984 | // Report all arguments as being live.
|
---|
985 | for (unsigned argument = block(0)->variablesAtHead.numberOfArguments(); argument--;)
|
---|
986 | functor(virtualRegisterForArgumentIncludingThis(argument));
|
---|
987 | }
|
---|
988 |
|
---|
989 | static unsigned parameterSlotsForArgCount(unsigned);
|
---|
990 |
|
---|
991 | unsigned frameRegisterCount();
|
---|
992 | unsigned stackPointerOffset();
|
---|
993 | unsigned requiredRegisterCountForExit();
|
---|
994 | unsigned requiredRegisterCountForExecutionAndExit();
|
---|
995 |
|
---|
996 | JSValue tryGetConstantProperty(JSValue base, const RegisteredStructureSet&, PropertyOffset);
|
---|
997 | JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset);
|
---|
998 | JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset);
|
---|
999 | JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset);
|
---|
1000 |
|
---|
1001 | JSValue tryGetConstantClosureVar(JSValue base, ScopeOffset);
|
---|
1002 | JSValue tryGetConstantClosureVar(const AbstractValue&, ScopeOffset);
|
---|
1003 | JSValue tryGetConstantClosureVar(Node*, ScopeOffset);
|
---|
1004 |
|
---|
1005 | JSArrayBufferView* tryGetFoldableView(JSValue);
|
---|
1006 | JSArrayBufferView* tryGetFoldableView(JSValue, ArrayMode arrayMode);
|
---|
1007 |
|
---|
1008 | bool canDoFastSpread(Node*, const AbstractValue&);
|
---|
1009 |
|
---|
1010 | void registerFrozenValues();
|
---|
1011 |
|
---|
1012 | void visitChildren(AbstractSlotVisitor&) final;
|
---|
1013 | void visitChildren(SlotVisitor&) final;
|
---|
1014 |
|
---|
1015 | void logAssertionFailure(
|
---|
1016 | std::nullptr_t, const char* file, int line, const char* function,
|
---|
1017 | const char* assertion);
|
---|
1018 | void logAssertionFailure(
|
---|
1019 | Node*, const char* file, int line, const char* function,
|
---|
1020 | const char* assertion);
|
---|
1021 | void logAssertionFailure(
|
---|
1022 | BasicBlock*, const char* file, int line, const char* function,
|
---|
1023 | const char* assertion);
|
---|
1024 |
|
---|
1025 | bool hasDebuggerEnabled() const { return m_hasDebuggerEnabled; }
|
---|
1026 |
|
---|
1027 | CPSDominators& ensureCPSDominators();
|
---|
1028 | SSADominators& ensureSSADominators();
|
---|
1029 | CPSNaturalLoops& ensureCPSNaturalLoops();
|
---|
1030 | SSANaturalLoops& ensureSSANaturalLoops();
|
---|
1031 | BackwardsCFG& ensureBackwardsCFG();
|
---|
1032 | BackwardsDominators& ensureBackwardsDominators();
|
---|
1033 | ControlEquivalenceAnalysis& ensureControlEquivalenceAnalysis();
|
---|
1034 | CPSCFG& ensureCPSCFG();
|
---|
1035 |
|
---|
1036 | // These functions only makes sense to call after bytecode parsing
|
---|
1037 | // because it queries the m_hasExceptionHandlers boolean whose value
|
---|
1038 | // is only fully determined after bytcode parsing.
|
---|
1039 | bool willCatchExceptionInMachineFrame(CodeOrigin codeOrigin)
|
---|
1040 | {
|
---|
1041 | CodeOrigin ignored;
|
---|
1042 | HandlerInfo* ignored2;
|
---|
1043 | return willCatchExceptionInMachineFrame(codeOrigin, ignored, ignored2);
|
---|
1044 | }
|
---|
1045 | bool willCatchExceptionInMachineFrame(CodeOrigin, CodeOrigin& opCatchOriginOut, HandlerInfo*& catchHandlerOut);
|
---|
1046 |
|
---|
1047 | bool needsScopeRegister() const { return m_hasDebuggerEnabled || m_codeBlock->usesCallEval(); }
|
---|
1048 | bool needsFlushedThis() const { return m_codeBlock->usesCallEval(); }
|
---|
1049 |
|
---|
1050 | void clearCPSCFGData();
|
---|
1051 |
|
---|
1052 | bool isRoot(BasicBlock* block) const
|
---|
1053 | {
|
---|
1054 | ASSERT_WITH_MESSAGE(!m_isInSSAConversion, "This is not written to work during SSA conversion.");
|
---|
1055 |
|
---|
1056 | if (m_form == SSA) {
|
---|
1057 | ASSERT(m_roots.size() == 1);
|
---|
1058 | ASSERT(m_roots.contains(this->block(0)));
|
---|
1059 | return block == this->block(0);
|
---|
1060 | }
|
---|
1061 |
|
---|
1062 | if (m_roots.size() <= 4) {
|
---|
1063 | bool result = m_roots.contains(block);
|
---|
1064 | ASSERT(result == m_rootToArguments.contains(block));
|
---|
1065 | return result;
|
---|
1066 | }
|
---|
1067 | bool result = m_rootToArguments.contains(block);
|
---|
1068 | ASSERT(result == m_roots.contains(block));
|
---|
1069 | return result;
|
---|
1070 | }
|
---|
1071 |
|
---|
1072 | Prefix& prefix() { return m_prefix; }
|
---|
1073 | void nextPhase() { m_prefix.phaseNumber++; }
|
---|
1074 |
|
---|
1075 | const UnlinkedSimpleJumpTable& unlinkedSwitchJumpTable(unsigned index) const { return *m_unlinkedSwitchJumpTables[index]; }
|
---|
1076 | SimpleJumpTable& switchJumpTable(unsigned index) { return m_switchJumpTables[index]; }
|
---|
1077 |
|
---|
1078 | const UnlinkedStringJumpTable& unlinkedStringSwitchJumpTable(unsigned index) const { return *m_unlinkedStringSwitchJumpTables[index]; }
|
---|
1079 | StringJumpTable& stringSwitchJumpTable(unsigned index) { return m_stringSwitchJumpTables[index]; }
|
---|
1080 |
|
---|
1081 | void appendCatchEntrypoint(BytecodeIndex bytecodeIndex, MacroAssemblerCodePtr<ExceptionHandlerPtrTag> machineCode, Vector<FlushFormat>&& argumentFormats)
|
---|
1082 | {
|
---|
1083 | m_catchEntrypoints.append(CatchEntrypointData { machineCode, FixedVector<FlushFormat>(WTFMove(argumentFormats)), bytecodeIndex });
|
---|
1084 | }
|
---|
1085 |
|
---|
1086 | void freeDFGIRAfterLowering();
|
---|
1087 |
|
---|
1088 | StackCheck m_stackChecker;
|
---|
1089 | VM& m_vm;
|
---|
1090 | Plan& m_plan;
|
---|
1091 | CodeBlock* m_codeBlock;
|
---|
1092 | CodeBlock* m_profiledBlock;
|
---|
1093 |
|
---|
1094 | Vector<RefPtr<BasicBlock>, 8> m_blocks;
|
---|
1095 | Vector<BasicBlock*, 1> m_roots;
|
---|
1096 | Vector<Edge, 16> m_varArgChildren;
|
---|
1097 |
|
---|
1098 | // UnlinkedSimpleJumpTable/UnlinkedStringJumpTable are kept by UnlinkedCodeBlocks retained by baseline CodeBlocks handled by DFG / FTL.
|
---|
1099 | Vector<const UnlinkedSimpleJumpTable*> m_unlinkedSwitchJumpTables;
|
---|
1100 | Vector<SimpleJumpTable> m_switchJumpTables;
|
---|
1101 | Vector<const UnlinkedStringJumpTable*> m_unlinkedStringSwitchJumpTables;
|
---|
1102 | Vector<StringJumpTable> m_stringSwitchJumpTables;
|
---|
1103 |
|
---|
1104 | HashMap<EncodedJSValue, FrozenValue*, EncodedJSValueHash, EncodedJSValueHashTraits> m_frozenValueMap;
|
---|
1105 | Bag<FrozenValue> m_frozenValues;
|
---|
1106 |
|
---|
1107 | Vector<uint32_t> m_uint32ValuesInUse;
|
---|
1108 |
|
---|
1109 | Bag<StorageAccessData> m_storageAccessData;
|
---|
1110 |
|
---|
1111 | // In CPS, this is all of the SetArgumentDefinitely nodes for the arguments in the machine code block
|
---|
1112 | // that survived DCE. All of them except maybe "this" will survive DCE, because of the Flush
|
---|
1113 | // nodes. In SSA, this has no meaning. It's empty.
|
---|
1114 | HashMap<BasicBlock*, ArgumentsVector> m_rootToArguments;
|
---|
1115 |
|
---|
1116 | // In SSA, this is the argument speculation that we've locked in for an entrypoint block.
|
---|
1117 | //
|
---|
1118 | // We must speculate on the argument types at each entrypoint even if operations involving
|
---|
1119 | // arguments get killed. For example:
|
---|
1120 | //
|
---|
1121 | // function foo(x) {
|
---|
1122 | // var tmp = x + 1;
|
---|
1123 | // }
|
---|
1124 | //
|
---|
1125 | // Assume that x is always int during profiling. The ArithAdd for "x + 1" will be dead and will
|
---|
1126 | // have a proven check for the edge to "x". So, we will not insert a Check node and we will
|
---|
1127 | // kill the GetStack for "x". But, we must do the int check in the progolue, because that's the
|
---|
1128 | // thing we used to allow DCE of ArithAdd. Otherwise the add could be impure:
|
---|
1129 | //
|
---|
1130 | // var o = {
|
---|
1131 | // valueOf: function() { do side effects; }
|
---|
1132 | // };
|
---|
1133 | // foo(o);
|
---|
1134 | //
|
---|
1135 | // If we DCE the ArithAdd and we remove the int check on x, then this won't do the side
|
---|
1136 | // effects.
|
---|
1137 | //
|
---|
1138 | // By convention, entrypoint index 0 is used for the CodeBlock's op_enter entrypoint.
|
---|
1139 | // So argumentFormats[0] are the argument formats for the normal call entrypoint.
|
---|
1140 | Vector<Vector<FlushFormat>> m_argumentFormats;
|
---|
1141 |
|
---|
1142 | SegmentedVector<VariableAccessData, 16> m_variableAccessData;
|
---|
1143 | SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
|
---|
1144 | Bag<Transition> m_transitions;
|
---|
1145 | Bag<BranchData> m_branchData;
|
---|
1146 | Bag<SwitchData> m_switchData;
|
---|
1147 | Bag<MultiGetByOffsetData> m_multiGetByOffsetData;
|
---|
1148 | Bag<MultiPutByOffsetData> m_multiPutByOffsetData;
|
---|
1149 | Bag<MultiDeleteByOffsetData> m_multiDeleteByOffsetData;
|
---|
1150 | Bag<MatchStructureData> m_matchStructureData;
|
---|
1151 | Bag<ObjectMaterializationData> m_objectMaterializationData;
|
---|
1152 | Bag<CallVarargsData> m_callVarargsData;
|
---|
1153 | Bag<LoadVarargsData> m_loadVarargsData;
|
---|
1154 | Bag<StackAccessData> m_stackAccessData;
|
---|
1155 | Bag<LazyJSValue> m_lazyJSValues;
|
---|
1156 | Bag<CallDOMGetterData> m_callDOMGetterData;
|
---|
1157 | Bag<BitVector> m_bitVectors;
|
---|
1158 | Vector<InlineVariableData, 4> m_inlineVariableData;
|
---|
1159 | HashMap<CodeBlock*, std::unique_ptr<FullBytecodeLiveness>> m_bytecodeLiveness;
|
---|
1160 | HashSet<std::pair<JSObject*, PropertyOffset>> m_safeToLoad;
|
---|
1161 | Vector<Ref<Snippet>> m_domJITSnippets;
|
---|
1162 | std::unique_ptr<CPSDominators> m_cpsDominators;
|
---|
1163 | std::unique_ptr<SSADominators> m_ssaDominators;
|
---|
1164 | std::unique_ptr<CPSNaturalLoops> m_cpsNaturalLoops;
|
---|
1165 | std::unique_ptr<SSANaturalLoops> m_ssaNaturalLoops;
|
---|
1166 | std::unique_ptr<SSACFG> m_ssaCFG;
|
---|
1167 | std::unique_ptr<CPSCFG> m_cpsCFG;
|
---|
1168 | std::unique_ptr<BackwardsCFG> m_backwardsCFG;
|
---|
1169 | std::unique_ptr<BackwardsDominators> m_backwardsDominators;
|
---|
1170 | std::unique_ptr<ControlEquivalenceAnalysis> m_controlEquivalenceAnalysis;
|
---|
1171 | unsigned m_tmps;
|
---|
1172 | unsigned m_localVars;
|
---|
1173 | unsigned m_nextMachineLocal;
|
---|
1174 | unsigned m_parameterSlots;
|
---|
1175 |
|
---|
1176 | // This is the number of logical entrypoints that we're compiling. This is only used
|
---|
1177 | // in SSA. Each EntrySwitch node must have m_numberOfEntrypoints cases. Note, this is
|
---|
1178 | // not the same as m_roots.size(). m_roots.size() represents the number of roots in
|
---|
1179 | // the CFG. In SSA, m_roots.size() == 1 even if we're compiling more than one entrypoint.
|
---|
1180 | unsigned m_numberOfEntrypoints { UINT_MAX };
|
---|
1181 |
|
---|
1182 | // This maps an entrypoint index to a particular op_catch bytecode offset. By convention,
|
---|
1183 | // it'll never have zero as a key because we use zero to mean the op_enter entrypoint.
|
---|
1184 | HashMap<unsigned, BytecodeIndex> m_entrypointIndexToCatchBytecodeIndex;
|
---|
1185 | Vector<CatchEntrypointData> m_catchEntrypoints;
|
---|
1186 |
|
---|
1187 | HashSet<String> m_localStrings;
|
---|
1188 | HashSet<String> m_copiedStrings;
|
---|
1189 |
|
---|
1190 | #if USE(JSVALUE32_64)
|
---|
1191 | HashMap<GenericHashKey<int64_t>, double*> m_doubleConstantsMap;
|
---|
1192 | Bag<double> m_doubleConstants;
|
---|
1193 | #endif
|
---|
1194 |
|
---|
1195 | OptimizationFixpointState m_fixpointState;
|
---|
1196 | StructureRegistrationState m_structureRegistrationState;
|
---|
1197 | GraphForm m_form;
|
---|
1198 | UnificationState m_unificationState;
|
---|
1199 | PlanStage m_planStage { PlanStage::Initial };
|
---|
1200 | RefCountState m_refCountState;
|
---|
1201 | bool m_hasDebuggerEnabled;
|
---|
1202 | bool m_hasExceptionHandlers { false };
|
---|
1203 | bool m_isInSSAConversion { false };
|
---|
1204 | bool m_isValidating { false };
|
---|
1205 | std::optional<uint32_t> m_maxLocalsForCatchOSREntry;
|
---|
1206 | std::unique_ptr<FlowIndexing> m_indexingCache;
|
---|
1207 | std::unique_ptr<FlowMap<AbstractValue>> m_abstractValuesCache;
|
---|
1208 | Bag<EntrySwitchData> m_entrySwitchData;
|
---|
1209 |
|
---|
1210 | RegisteredStructure stringStructure;
|
---|
1211 | RegisteredStructure symbolStructure;
|
---|
1212 |
|
---|
1213 | HashSet<Node*> m_slowGetByVal;
|
---|
1214 | HashSet<Node*> m_slowPutByVal;
|
---|
1215 |
|
---|
1216 | private:
|
---|
1217 | template<typename Visitor> void visitChildrenImpl(Visitor&);
|
---|
1218 |
|
---|
1219 | bool isStringPrototypeMethodSane(JSGlobalObject*, UniquedStringImpl*);
|
---|
1220 |
|
---|
1221 | void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor);
|
---|
1222 |
|
---|
1223 | AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* operand, Node*immediate, RareCaseProfilingSource source)
|
---|
1224 | {
|
---|
1225 | ASSERT(immediate->hasConstant());
|
---|
1226 |
|
---|
1227 | JSValue immediateValue = immediate->asJSValue();
|
---|
1228 | if (!immediateValue.isNumber() && !immediateValue.isBoolean())
|
---|
1229 | return DontSpeculateInt32;
|
---|
1230 |
|
---|
1231 | if (!variableShouldSpeculateInt32)
|
---|
1232 | return DontSpeculateInt32;
|
---|
1233 |
|
---|
1234 | // Integer constants can be typed Double if they are written like a double in the source code (e.g. 42.0).
|
---|
1235 | // In that case, we stay conservative unless the other operand was explicitly typed as integer.
|
---|
1236 | NodeFlags operandResultType = operand->result();
|
---|
1237 | if (operandResultType != NodeResultInt32 && immediateValue.isDouble())
|
---|
1238 | return DontSpeculateInt32;
|
---|
1239 |
|
---|
1240 | if (immediateValue.isBoolean() || jsNumber(immediateValue.asNumber()).isInt32())
|
---|
1241 | return add->canSpeculateInt32(source) ? SpeculateInt32 : DontSpeculateInt32;
|
---|
1242 |
|
---|
1243 | double doubleImmediate = immediateValue.asDouble();
|
---|
1244 | const double twoToThe48 = 281474976710656.0;
|
---|
1245 | if (doubleImmediate < -twoToThe48 || doubleImmediate > twoToThe48)
|
---|
1246 | return DontSpeculateInt32;
|
---|
1247 |
|
---|
1248 | return bytecodeCanTruncateInteger(add->arithNodeFlags()) ? SpeculateInt32AndTruncateConstants : DontSpeculateInt32;
|
---|
1249 | }
|
---|
1250 |
|
---|
1251 | B3::SparseCollection<Node> m_nodes;
|
---|
1252 | SegmentedVector<RegisteredStructureSet, 16> m_structureSets;
|
---|
1253 | Prefix m_prefix;
|
---|
1254 | };
|
---|
1255 |
|
---|
1256 | } } // namespace JSC::DFG
|
---|
1257 |
|
---|
1258 | #endif
|
---|