1 | /*
|
---|
2 | * Copyright (C) 2015-2020 Apple Inc. All rights reserved.
|
---|
3 | *
|
---|
4 | * Redistribution and use in source and binary forms, with or without
|
---|
5 | * modification, are permitted provided that the following conditions
|
---|
6 | * are met:
|
---|
7 | * 1. Redistributions of source code must retain the above copyright
|
---|
8 | * notice, this list of conditions and the following disclaimer.
|
---|
9 | * 2. Redistributions in binary form must reproduce the above copyright
|
---|
10 | * notice, this list of conditions and the following disclaimer in the
|
---|
11 | * documentation and/or other materials provided with the distribution.
|
---|
12 | *
|
---|
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
|
---|
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
---|
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
---|
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
|
---|
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
---|
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
---|
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
---|
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
|
---|
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
---|
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
---|
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
---|
24 | */
|
---|
25 |
|
---|
26 | #include "config.h"
|
---|
27 | #include "DFGObjectAllocationSinkingPhase.h"
|
---|
28 |
|
---|
29 | #if ENABLE(DFG_JIT)
|
---|
30 |
|
---|
31 | #include "DFGBlockMapInlines.h"
|
---|
32 | #include "DFGClobbersExitState.h"
|
---|
33 | #include "DFGCombinedLiveness.h"
|
---|
34 | #include "DFGGraph.h"
|
---|
35 | #include "DFGInsertionSet.h"
|
---|
36 | #include "DFGLazyNode.h"
|
---|
37 | #include "DFGLivenessAnalysisPhase.h"
|
---|
38 | #include "DFGOSRAvailabilityAnalysisPhase.h"
|
---|
39 | #include "DFGPhase.h"
|
---|
40 | #include "DFGPromotedHeapLocation.h"
|
---|
41 | #include "DFGSSACalculator.h"
|
---|
42 | #include "DFGValidate.h"
|
---|
43 | #include "JSArrayIterator.h"
|
---|
44 | #include "JSInternalPromise.h"
|
---|
45 | #include "JSMapIterator.h"
|
---|
46 | #include "JSSetIterator.h"
|
---|
47 | #include "StructureInlines.h"
|
---|
48 | #include <wtf/StdList.h>
|
---|
49 |
|
---|
50 | namespace JSC { namespace DFG {
|
---|
51 |
|
---|
52 | namespace {
|
---|
53 |
|
---|
54 | namespace DFGObjectAllocationSinkingPhaseInternal {
|
---|
55 | static constexpr bool verbose = false;
|
---|
56 | }
|
---|
57 |
|
---|
58 | // In order to sink object cycles, we use a points-to analysis coupled
|
---|
59 | // with an escape analysis. This analysis is actually similar to an
|
---|
60 | // abstract interpreter focused on local allocations and ignoring
|
---|
61 | // everything else.
|
---|
62 | //
|
---|
63 | // We represent the local heap using two mappings:
|
---|
64 | //
|
---|
65 | // - A set of the local allocations present in the function, where
|
---|
66 | // each of those have a further mapping from
|
---|
67 | // PromotedLocationDescriptor to local allocations they must point
|
---|
68 | // to.
|
---|
69 | //
|
---|
70 | // - A "pointer" mapping from nodes to local allocations, if they must
|
---|
71 | // be equal to said local allocation and are currently live. This
|
---|
72 | // can be because the node is the actual node that created the
|
---|
73 | // allocation, or any other node that must currently point to it -
|
---|
74 | // we don't make a difference.
|
---|
75 | //
|
---|
76 | // The following graph is a motivation for why we separate allocations
|
---|
77 | // from pointers:
|
---|
78 | //
|
---|
79 | // Block #0
|
---|
80 | // 0: NewObject({})
|
---|
81 | // 1: NewObject({})
|
---|
82 | // -: PutByOffset(@0, @1, x)
|
---|
83 | // -: PutStructure(@0, {x:0})
|
---|
84 | // 2: GetByOffset(@0, x)
|
---|
85 | // -: Jump(#1)
|
---|
86 | //
|
---|
87 | // Block #1
|
---|
88 | // -: Return(@2)
|
---|
89 | //
|
---|
90 | // Here, we need to remember in block #1 that @2 points to a local
|
---|
91 | // allocation with appropriate fields and structures information
|
---|
92 | // (because we should be able to place a materialization on top of
|
---|
93 | // block #1 here), even though @1 is dead. We *could* just keep @1
|
---|
94 | // artificially alive here, but there is no real reason to do it:
|
---|
95 | // after all, by the end of block #0, @1 and @2 should be completely
|
---|
96 | // interchangeable, and there is no reason for us to artificially make
|
---|
97 | // @1 more important.
|
---|
98 | //
|
---|
99 | // An important point to consider to understand this separation is
|
---|
100 | // that we should think of the local heap as follow: we have a
|
---|
101 | // bunch of nodes that are pointers to "allocations" that live
|
---|
102 | // someplace on the heap, and those allocations can have pointers in
|
---|
103 | // between themselves as well. We shouldn't care about whatever
|
---|
104 | // names we give to the allocations ; what matters when
|
---|
105 | // comparing/merging two heaps is the isomorphism/comparison between
|
---|
106 | // the allocation graphs as seen by the nodes.
|
---|
107 | //
|
---|
108 | // For instance, in the following graph:
|
---|
109 | //
|
---|
110 | // Block #0
|
---|
111 | // 0: NewObject({})
|
---|
112 | // -: Branch(#1, #2)
|
---|
113 | //
|
---|
114 | // Block #1
|
---|
115 | // 1: NewObject({})
|
---|
116 | // -: PutByOffset(@0, @1, x)
|
---|
117 | // -: PutStructure(@0, {x:0})
|
---|
118 | // -: Jump(#3)
|
---|
119 | //
|
---|
120 | // Block #2
|
---|
121 | // 2: NewObject({})
|
---|
122 | // -: PutByOffset(@2, undefined, x)
|
---|
123 | // -: PutStructure(@2, {x:0})
|
---|
124 | // -: PutByOffset(@0, @2, x)
|
---|
125 | // -: PutStructure(@0, {x:0})
|
---|
126 | // -: Jump(#3)
|
---|
127 | //
|
---|
128 | // Block #3
|
---|
129 | // -: Return(@0)
|
---|
130 | //
|
---|
131 | // we should think of the heaps at tail of blocks #1 and #2 as being
|
---|
132 | // exactly the same, even though one has @0.x pointing to @1 and the
|
---|
133 | // other has @0.x pointing to @2, because in essence this should not
|
---|
134 | // be different from the graph where we hoisted @1 and @2 into a
|
---|
135 | // single allocation in block #0. We currently will not handle this
|
---|
136 | // case, because we merge allocations based on the node they are
|
---|
137 | // coming from, but this is only a technicality for the sake of
|
---|
138 | // simplicity that shouldn't hide the deeper idea outlined here.
|
---|
139 |
|
---|
140 | class Allocation {
|
---|
141 | public:
|
---|
142 | // We use Escaped as a special allocation kind because when we
|
---|
143 | // decide to sink an allocation, we still need to keep track of it
|
---|
144 | // once it is escaped if it still has pointers to it in order to
|
---|
145 | // replace any use of those pointers by the corresponding
|
---|
146 | // materialization
|
---|
147 | enum class Kind { Escaped, Object, Activation, Function, GeneratorFunction, AsyncFunction, AsyncGeneratorFunction, InternalFieldObject, RegExpObject };
|
---|
148 |
|
---|
149 | using Fields = HashMap<PromotedLocationDescriptor, Node*>;
|
---|
150 |
|
---|
151 | explicit Allocation(Node* identifier = nullptr, Kind kind = Kind::Escaped)
|
---|
152 | : m_identifier(identifier)
|
---|
153 | , m_kind(kind)
|
---|
154 | {
|
---|
155 | }
|
---|
156 |
|
---|
157 |
|
---|
158 | const Fields& fields() const
|
---|
159 | {
|
---|
160 | return m_fields;
|
---|
161 | }
|
---|
162 |
|
---|
163 | Fields& fields()
|
---|
164 | {
|
---|
165 | return m_fields;
|
---|
166 | }
|
---|
167 |
|
---|
168 | Node* get(PromotedLocationDescriptor descriptor)
|
---|
169 | {
|
---|
170 | return m_fields.get(descriptor);
|
---|
171 | }
|
---|
172 |
|
---|
173 | Allocation& set(PromotedLocationDescriptor descriptor, Node* value)
|
---|
174 | {
|
---|
175 | // Pointing to anything else than an unescaped local
|
---|
176 | // allocation is represented by simply not having the
|
---|
177 | // field
|
---|
178 | if (value)
|
---|
179 | m_fields.set(descriptor, value);
|
---|
180 | else
|
---|
181 | m_fields.remove(descriptor);
|
---|
182 | return *this;
|
---|
183 | }
|
---|
184 |
|
---|
185 | void remove(PromotedLocationDescriptor descriptor)
|
---|
186 | {
|
---|
187 | set(descriptor, nullptr);
|
---|
188 | }
|
---|
189 |
|
---|
190 | bool hasStructures() const
|
---|
191 | {
|
---|
192 | switch (kind()) {
|
---|
193 | case Kind::Object:
|
---|
194 | return true;
|
---|
195 |
|
---|
196 | default:
|
---|
197 | return false;
|
---|
198 | }
|
---|
199 | }
|
---|
200 |
|
---|
201 | Allocation& setStructures(const RegisteredStructureSet& structures)
|
---|
202 | {
|
---|
203 | ASSERT(hasStructures() && !structures.isEmpty());
|
---|
204 | m_structures = structures;
|
---|
205 | m_structuresForMaterialization = structures;
|
---|
206 | return *this;
|
---|
207 | }
|
---|
208 |
|
---|
209 | Allocation& mergeStructures(const Allocation& other)
|
---|
210 | {
|
---|
211 | ASSERT(hasStructures() || (other.structuresForMaterialization().isEmpty() && other.structures().isEmpty()));
|
---|
212 | m_structures.filter(other.structures());
|
---|
213 | m_structuresForMaterialization.merge(other.structuresForMaterialization());
|
---|
214 | ASSERT(m_structures.isSubsetOf(m_structuresForMaterialization));
|
---|
215 | return *this;
|
---|
216 | }
|
---|
217 |
|
---|
218 | Allocation& filterStructures(const RegisteredStructureSet& structures)
|
---|
219 | {
|
---|
220 | ASSERT(hasStructures());
|
---|
221 | m_structures.filter(structures);
|
---|
222 | m_structuresForMaterialization.filter(structures);
|
---|
223 | RELEASE_ASSERT(!m_structures.isEmpty());
|
---|
224 | return *this;
|
---|
225 | }
|
---|
226 |
|
---|
227 | const RegisteredStructureSet& structures() const
|
---|
228 | {
|
---|
229 | return m_structures;
|
---|
230 | }
|
---|
231 |
|
---|
232 | const RegisteredStructureSet& structuresForMaterialization() const
|
---|
233 | {
|
---|
234 | return m_structuresForMaterialization;
|
---|
235 | }
|
---|
236 |
|
---|
237 | Node* identifier() const { return m_identifier; }
|
---|
238 |
|
---|
239 | Kind kind() const { return m_kind; }
|
---|
240 |
|
---|
241 | bool isEscapedAllocation() const
|
---|
242 | {
|
---|
243 | return kind() == Kind::Escaped;
|
---|
244 | }
|
---|
245 |
|
---|
246 | bool isObjectAllocation() const
|
---|
247 | {
|
---|
248 | return m_kind == Kind::Object;
|
---|
249 | }
|
---|
250 |
|
---|
251 | bool isActivationAllocation() const
|
---|
252 | {
|
---|
253 | return m_kind == Kind::Activation;
|
---|
254 | }
|
---|
255 |
|
---|
256 | bool isFunctionAllocation() const
|
---|
257 | {
|
---|
258 | return m_kind == Kind::Function || m_kind == Kind::GeneratorFunction || m_kind == Kind::AsyncFunction;
|
---|
259 | }
|
---|
260 |
|
---|
261 | bool isInternalFieldObjectAllocation() const
|
---|
262 | {
|
---|
263 | return m_kind == Kind::InternalFieldObject;
|
---|
264 | }
|
---|
265 |
|
---|
266 | bool isRegExpObjectAllocation() const
|
---|
267 | {
|
---|
268 | return m_kind == Kind::RegExpObject;
|
---|
269 | }
|
---|
270 |
|
---|
271 | bool operator==(const Allocation& other) const
|
---|
272 | {
|
---|
273 | return m_identifier == other.m_identifier
|
---|
274 | && m_kind == other.m_kind
|
---|
275 | && m_fields == other.m_fields
|
---|
276 | && m_structures == other.m_structures
|
---|
277 | && m_structuresForMaterialization == other.m_structuresForMaterialization;
|
---|
278 | }
|
---|
279 |
|
---|
280 | bool operator!=(const Allocation& other) const
|
---|
281 | {
|
---|
282 | return !(*this == other);
|
---|
283 | }
|
---|
284 |
|
---|
285 | void dump(PrintStream& out) const
|
---|
286 | {
|
---|
287 | dumpInContext(out, nullptr);
|
---|
288 | }
|
---|
289 |
|
---|
290 | void dumpInContext(PrintStream& out, DumpContext* context) const
|
---|
291 | {
|
---|
292 | switch (m_kind) {
|
---|
293 | case Kind::Escaped:
|
---|
294 | out.print("Escaped");
|
---|
295 | break;
|
---|
296 |
|
---|
297 | case Kind::Object:
|
---|
298 | out.print("Object");
|
---|
299 | break;
|
---|
300 |
|
---|
301 | case Kind::Function:
|
---|
302 | out.print("Function");
|
---|
303 | break;
|
---|
304 |
|
---|
305 | case Kind::GeneratorFunction:
|
---|
306 | out.print("GeneratorFunction");
|
---|
307 | break;
|
---|
308 |
|
---|
309 | case Kind::AsyncFunction:
|
---|
310 | out.print("AsyncFunction");
|
---|
311 | break;
|
---|
312 |
|
---|
313 | case Kind::InternalFieldObject:
|
---|
314 | out.print("InternalFieldObject");
|
---|
315 | break;
|
---|
316 |
|
---|
317 | case Kind::AsyncGeneratorFunction:
|
---|
318 | out.print("AsyncGeneratorFunction");
|
---|
319 | break;
|
---|
320 |
|
---|
321 | case Kind::Activation:
|
---|
322 | out.print("Activation");
|
---|
323 | break;
|
---|
324 |
|
---|
325 | case Kind::RegExpObject:
|
---|
326 | out.print("RegExpObject");
|
---|
327 | break;
|
---|
328 | }
|
---|
329 | out.print("Allocation(");
|
---|
330 | if (!m_structuresForMaterialization.isEmpty())
|
---|
331 | out.print(inContext(m_structuresForMaterialization.toStructureSet(), context));
|
---|
332 | if (!m_fields.isEmpty()) {
|
---|
333 | if (!m_structuresForMaterialization.isEmpty())
|
---|
334 | out.print(", ");
|
---|
335 | out.print(mapDump(m_fields, " => #", ", "));
|
---|
336 | }
|
---|
337 | out.print(")");
|
---|
338 | }
|
---|
339 |
|
---|
340 | private:
|
---|
341 | Node* m_identifier; // This is the actual node that created the allocation
|
---|
342 | Kind m_kind;
|
---|
343 | Fields m_fields;
|
---|
344 |
|
---|
345 | // This set of structures is the intersection of structures seen at control flow edges. It's used
|
---|
346 | // for checks and speculation since it can't be widened.
|
---|
347 | RegisteredStructureSet m_structures;
|
---|
348 |
|
---|
349 | // The second set of structures is the union of the structures at control flow edges. It's used
|
---|
350 | // for materializations, where we need to generate code for all possible incoming structures.
|
---|
351 | RegisteredStructureSet m_structuresForMaterialization;
|
---|
352 | };
|
---|
353 |
|
---|
354 | class LocalHeap {
|
---|
355 | public:
|
---|
356 | Allocation& newAllocation(Node* node, Allocation::Kind kind)
|
---|
357 | {
|
---|
358 | ASSERT(!m_pointers.contains(node) && !isAllocation(node));
|
---|
359 | m_pointers.add(node, node);
|
---|
360 | return m_allocations.set(node, Allocation(node, kind)).iterator->value;
|
---|
361 | }
|
---|
362 |
|
---|
363 | bool isAllocation(Node* identifier) const
|
---|
364 | {
|
---|
365 | return m_allocations.contains(identifier);
|
---|
366 | }
|
---|
367 |
|
---|
368 | // Note that this is fundamentally different from
|
---|
369 | // onlyLocalAllocation() below. getAllocation() takes as argument
|
---|
370 | // a node-as-identifier, that is, an allocation node. This
|
---|
371 | // allocation node doesn't have to be alive; it may only be
|
---|
372 | // pointed to by other nodes or allocation fields.
|
---|
373 | // For instance, in the following graph:
|
---|
374 | //
|
---|
375 | // Block #0
|
---|
376 | // 0: NewObject({})
|
---|
377 | // 1: NewObject({})
|
---|
378 | // -: PutByOffset(@0, @1, x)
|
---|
379 | // -: PutStructure(@0, {x:0})
|
---|
380 | // 2: GetByOffset(@0, x)
|
---|
381 | // -: Jump(#1)
|
---|
382 | //
|
---|
383 | // Block #1
|
---|
384 | // -: Return(@2)
|
---|
385 | //
|
---|
386 | // At head of block #1, the only reachable allocation is #@1,
|
---|
387 | // which can be reached through node @2. Thus, getAllocation(#@1)
|
---|
388 | // contains the appropriate metadata for this allocation, but
|
---|
389 | // onlyLocalAllocation(@1) is null, as @1 is no longer a pointer
|
---|
390 | // to #@1 (since it is dead). Conversely, onlyLocalAllocation(@2)
|
---|
391 | // is the same as getAllocation(#@1), while getAllocation(#@2)
|
---|
392 | // does not make sense since @2 is not an allocation node.
|
---|
393 | //
|
---|
394 | // This is meant to be used when the node is already known to be
|
---|
395 | // an identifier (i.e. an allocation) - probably because it was
|
---|
396 | // found as value of a field or pointer in the current heap, or
|
---|
397 | // was the result of a call to follow(). In any other cases (such
|
---|
398 | // as when doing anything while traversing the graph), the
|
---|
399 | // appropriate function to call is probably onlyLocalAllocation.
|
---|
400 | Allocation& getAllocation(Node* identifier)
|
---|
401 | {
|
---|
402 | auto iter = m_allocations.find(identifier);
|
---|
403 | ASSERT(iter != m_allocations.end());
|
---|
404 | return iter->value;
|
---|
405 | }
|
---|
406 |
|
---|
407 | void newPointer(Node* node, Node* identifier)
|
---|
408 | {
|
---|
409 | ASSERT(!m_allocations.contains(node) && !m_pointers.contains(node));
|
---|
410 | ASSERT(isAllocation(identifier));
|
---|
411 | m_pointers.add(node, identifier);
|
---|
412 | }
|
---|
413 |
|
---|
414 | // follow solves the points-to problem. Given a live node, which
|
---|
415 | // may be either an allocation itself or a heap read (e.g. a
|
---|
416 | // GetByOffset node), it returns the corresponding allocation
|
---|
417 | // node, if there is one. If the argument node is neither an
|
---|
418 | // allocation or a heap read, or may point to different nodes,
|
---|
419 | // nullptr will be returned. Note that a node that points to
|
---|
420 | // different nodes can never point to an unescaped local
|
---|
421 | // allocation.
|
---|
422 | Node* follow(Node* node) const
|
---|
423 | {
|
---|
424 | auto iter = m_pointers.find(node);
|
---|
425 | ASSERT(iter == m_pointers.end() || (!iter->value || m_allocations.contains(iter->value)));
|
---|
426 | return iter == m_pointers.end() ? nullptr : iter->value;
|
---|
427 | }
|
---|
428 |
|
---|
429 | Node* follow(PromotedHeapLocation location) const
|
---|
430 | {
|
---|
431 | const Allocation& base = m_allocations.find(location.base())->value;
|
---|
432 | auto iter = base.fields().find(location.descriptor());
|
---|
433 | if (iter == base.fields().end())
|
---|
434 | return nullptr;
|
---|
435 |
|
---|
436 | return iter->value;
|
---|
437 | }
|
---|
438 |
|
---|
439 | // onlyLocalAllocation find the corresponding allocation metadata
|
---|
440 | // for any live node. onlyLocalAllocation(node) is essentially
|
---|
441 | // getAllocation(follow(node)), with appropriate null handling.
|
---|
442 | Allocation* onlyLocalAllocation(Node* node)
|
---|
443 | {
|
---|
444 | Node* identifier = follow(node);
|
---|
445 | if (!identifier)
|
---|
446 | return nullptr;
|
---|
447 |
|
---|
448 | return &getAllocation(identifier);
|
---|
449 | }
|
---|
450 |
|
---|
451 | Allocation* onlyLocalAllocation(PromotedHeapLocation location)
|
---|
452 | {
|
---|
453 | Node* identifier = follow(location);
|
---|
454 | if (!identifier)
|
---|
455 | return nullptr;
|
---|
456 |
|
---|
457 | return &getAllocation(identifier);
|
---|
458 | }
|
---|
459 |
|
---|
460 | bool isUnescapedAllocation(Node* identifier) const
|
---|
461 | {
|
---|
462 | auto iter = m_allocations.find(identifier);
|
---|
463 | return iter != m_allocations.end() && !iter->value.isEscapedAllocation();
|
---|
464 | }
|
---|
465 |
|
---|
466 | // This allows us to store the escapees only when necessary. If
|
---|
467 | // set, the current escapees can be retrieved at any time using
|
---|
468 | // takeEscapees(), which will clear the cached set of escapees;
|
---|
469 | // otherwise the heap won't remember escaping allocations.
|
---|
470 | void setWantEscapees()
|
---|
471 | {
|
---|
472 | m_wantEscapees = true;
|
---|
473 | }
|
---|
474 |
|
---|
475 | HashMap<Node*, Allocation> takeEscapees()
|
---|
476 | {
|
---|
477 | return WTFMove(m_escapees);
|
---|
478 | }
|
---|
479 |
|
---|
480 | void escape(Node* node)
|
---|
481 | {
|
---|
482 | Node* identifier = follow(node);
|
---|
483 | if (!identifier)
|
---|
484 | return;
|
---|
485 |
|
---|
486 | escapeAllocation(identifier);
|
---|
487 | }
|
---|
488 |
|
---|
489 | void merge(const LocalHeap& other)
|
---|
490 | {
|
---|
491 | assertIsValid();
|
---|
492 | other.assertIsValid();
|
---|
493 | ASSERT(!m_wantEscapees);
|
---|
494 |
|
---|
495 | if (!reached()) {
|
---|
496 | ASSERT(other.reached());
|
---|
497 | *this = other;
|
---|
498 | return;
|
---|
499 | }
|
---|
500 |
|
---|
501 | NodeSet toEscape;
|
---|
502 |
|
---|
503 | for (auto& allocationEntry : other.m_allocations)
|
---|
504 | m_allocations.add(allocationEntry.key, allocationEntry.value);
|
---|
505 | for (auto& allocationEntry : m_allocations) {
|
---|
506 | auto allocationIter = other.m_allocations.find(allocationEntry.key);
|
---|
507 |
|
---|
508 | // If we have it and they don't, it died for them but we
|
---|
509 | // are keeping it alive from another field somewhere.
|
---|
510 | // There is nothing to do - we will be escaped
|
---|
511 | // automatically when we handle that other field.
|
---|
512 | // This will also happen for allocation that we have and
|
---|
513 | // they don't, and all of those will get pruned.
|
---|
514 | if (allocationIter == other.m_allocations.end())
|
---|
515 | continue;
|
---|
516 |
|
---|
517 | if (allocationEntry.value.kind() != allocationIter->value.kind()) {
|
---|
518 | toEscape.addVoid(allocationEntry.key);
|
---|
519 | for (const auto& fieldEntry : allocationIter->value.fields())
|
---|
520 | toEscape.addVoid(fieldEntry.value);
|
---|
521 | } else {
|
---|
522 | mergePointerSets(allocationEntry.value.fields(), allocationIter->value.fields(), toEscape);
|
---|
523 | allocationEntry.value.mergeStructures(allocationIter->value);
|
---|
524 | }
|
---|
525 | }
|
---|
526 |
|
---|
527 | {
|
---|
528 | // This works because we won't collect all pointers until all of our predecessors
|
---|
529 | // merge their pointer sets with ours. That allows us to see the full state of the
|
---|
530 | // world during our fixpoint analysis. Once we have the full set of pointers, we
|
---|
531 | // only mark pointers to TOP, so we will eventually converge.
|
---|
532 | for (auto entry : other.m_pointers) {
|
---|
533 | auto addResult = m_pointers.add(entry.key, entry.value);
|
---|
534 | if (addResult.iterator->value != entry.value) {
|
---|
535 | if (addResult.iterator->value) {
|
---|
536 | toEscape.addVoid(addResult.iterator->value);
|
---|
537 | addResult.iterator->value = nullptr;
|
---|
538 | }
|
---|
539 | if (entry.value)
|
---|
540 | toEscape.addVoid(entry.value);
|
---|
541 | }
|
---|
542 | }
|
---|
543 | // This allows us to rule out pointers for graphs like this:
|
---|
544 | // bb#0
|
---|
545 | // branch #1, #2
|
---|
546 | // #1:
|
---|
547 | // x = pointer A
|
---|
548 | // jump #3
|
---|
549 | // #2:
|
---|
550 | // y = pointer B
|
---|
551 | // jump #3
|
---|
552 | // #3:
|
---|
553 | // ...
|
---|
554 | //
|
---|
555 | // When we merge state at #3, we'll very likely prune away the x and y pointer,
|
---|
556 | // since they're not live. But if they do happen to make it to this merge function, when
|
---|
557 | // #3 merges with #2 and #1, it'll eventually rule out x and y as not existing
|
---|
558 | // in the other, and therefore not existing in #3, which is the desired behavior.
|
---|
559 | //
|
---|
560 | // This also is necessary for a graph like this:
|
---|
561 | // #0
|
---|
562 | // o = {}
|
---|
563 | // o2 = {}
|
---|
564 | // jump #1
|
---|
565 | //
|
---|
566 | // #1
|
---|
567 | // o.f = o2
|
---|
568 | // effects()
|
---|
569 | // x = o.f
|
---|
570 | // escape(o)
|
---|
571 | // branch #2, #1
|
---|
572 | //
|
---|
573 | // #2
|
---|
574 | // x cannot be o2 here, it has to be TOP
|
---|
575 | // ...
|
---|
576 | //
|
---|
577 | // On the first fixpoint iteration, we might think that x is o2 at the head
|
---|
578 | // of #2. However, when we fixpoint our analysis, we determine that o gets
|
---|
579 | // escaped. This means that when we fixpoint, x will eventually not be a pointer.
|
---|
580 | // When we merge again here, we'll notice and mark o2 as escaped.
|
---|
581 | for (auto& entry : m_pointers) {
|
---|
582 | if (!other.m_pointers.contains(entry.key)) {
|
---|
583 | if (entry.value) {
|
---|
584 | toEscape.addVoid(entry.value);
|
---|
585 | entry.value = nullptr;
|
---|
586 | ASSERT(!m_pointers.find(entry.key)->value);
|
---|
587 | }
|
---|
588 | }
|
---|
589 | }
|
---|
590 | }
|
---|
591 |
|
---|
592 | for (Node* identifier : toEscape)
|
---|
593 | escapeAllocation(identifier);
|
---|
594 |
|
---|
595 | if (ASSERT_ENABLED) {
|
---|
596 | for (const auto& entry : m_allocations)
|
---|
597 | ASSERT_UNUSED(entry, entry.value.isEscapedAllocation() || other.m_allocations.contains(entry.key));
|
---|
598 | }
|
---|
599 |
|
---|
600 | // If there is no remaining pointer to an allocation, we can
|
---|
601 | // remove it. This should only happen for escaped allocations,
|
---|
602 | // because we only merge liveness-pruned heaps in the first
|
---|
603 | // place.
|
---|
604 | prune();
|
---|
605 |
|
---|
606 | assertIsValid();
|
---|
607 | }
|
---|
608 |
|
---|
609 | void pruneByLiveness(const NodeSet& live)
|
---|
610 | {
|
---|
611 | m_pointers.removeIf(
|
---|
612 | [&] (const auto& entry) {
|
---|
613 | return !live.contains(entry.key);
|
---|
614 | });
|
---|
615 | prune();
|
---|
616 | }
|
---|
617 |
|
---|
618 | void assertIsValid() const
|
---|
619 | {
|
---|
620 | if (!ASSERT_ENABLED)
|
---|
621 | return;
|
---|
622 |
|
---|
623 | // Pointers should point to an actual allocation
|
---|
624 | for (const auto& entry : m_pointers) {
|
---|
625 | if (entry.value)
|
---|
626 | ASSERT(m_allocations.contains(entry.value));
|
---|
627 | }
|
---|
628 |
|
---|
629 | for (const auto& allocationEntry : m_allocations) {
|
---|
630 | // Fields should point to an actual allocation
|
---|
631 | for (const auto& fieldEntry : allocationEntry.value.fields()) {
|
---|
632 | ASSERT_UNUSED(fieldEntry, fieldEntry.value);
|
---|
633 | ASSERT(m_allocations.contains(fieldEntry.value));
|
---|
634 | }
|
---|
635 | }
|
---|
636 | }
|
---|
637 |
|
---|
638 | bool operator==(const LocalHeap& other) const
|
---|
639 | {
|
---|
640 | assertIsValid();
|
---|
641 | other.assertIsValid();
|
---|
642 | return m_allocations == other.m_allocations
|
---|
643 | && m_pointers == other.m_pointers;
|
---|
644 | }
|
---|
645 |
|
---|
646 | bool operator!=(const LocalHeap& other) const
|
---|
647 | {
|
---|
648 | return !(*this == other);
|
---|
649 | }
|
---|
650 |
|
---|
651 | const HashMap<Node*, Allocation>& allocations() const
|
---|
652 | {
|
---|
653 | return m_allocations;
|
---|
654 | }
|
---|
655 |
|
---|
656 | void dump(PrintStream& out) const
|
---|
657 | {
|
---|
658 | out.print(" Allocations:\n");
|
---|
659 | for (const auto& entry : m_allocations)
|
---|
660 | out.print(" #", entry.key, ": ", entry.value, "\n");
|
---|
661 | out.print(" Pointers:\n");
|
---|
662 | for (const auto& entry : m_pointers) {
|
---|
663 | out.print(" ", entry.key, " => #");
|
---|
664 | if (entry.value)
|
---|
665 | out.print(entry.value, "\n");
|
---|
666 | else
|
---|
667 | out.print("TOP\n");
|
---|
668 | }
|
---|
669 | }
|
---|
670 |
|
---|
671 | bool reached() const
|
---|
672 | {
|
---|
673 | return m_reached;
|
---|
674 | }
|
---|
675 |
|
---|
676 | void setReached()
|
---|
677 | {
|
---|
678 | m_reached = true;
|
---|
679 | }
|
---|
680 |
|
---|
681 | private:
|
---|
682 | // When we merge two heaps, we escape all fields of allocations,
|
---|
683 | // unless they point to the same thing in both heaps.
|
---|
684 | // The reason for this is that it allows us not to do extra work
|
---|
685 | // for diamond graphs where we would otherwise have to check
|
---|
686 | // whether we have a single definition or not, which would be
|
---|
687 | // cumbersome.
|
---|
688 | //
|
---|
689 | // Note that we should try to unify nodes even when they are not
|
---|
690 | // from the same allocation; for instance we should be able to
|
---|
691 | // completely eliminate all allocations from the following graph:
|
---|
692 | //
|
---|
693 | // Block #0
|
---|
694 | // 0: NewObject({})
|
---|
695 | // -: Branch(#1, #2)
|
---|
696 | //
|
---|
697 | // Block #1
|
---|
698 | // 1: NewObject({})
|
---|
699 | // -: PutByOffset(@1, "left", val)
|
---|
700 | // -: PutStructure(@1, {val:0})
|
---|
701 | // -: PutByOffset(@0, @1, x)
|
---|
702 | // -: PutStructure(@0, {x:0})
|
---|
703 | // -: Jump(#3)
|
---|
704 | //
|
---|
705 | // Block #2
|
---|
706 | // 2: NewObject({})
|
---|
707 | // -: PutByOffset(@2, "right", val)
|
---|
708 | // -: PutStructure(@2, {val:0})
|
---|
709 | // -: PutByOffset(@0, @2, x)
|
---|
710 | // -: PutStructure(@0, {x:0})
|
---|
711 | // -: Jump(#3)
|
---|
712 | //
|
---|
713 | // Block #3:
|
---|
714 | // 3: GetByOffset(@0, x)
|
---|
715 | // 4: GetByOffset(@3, val)
|
---|
716 | // -: Return(@4)
|
---|
717 | template<typename Key>
|
---|
718 | static void mergePointerSets(HashMap<Key, Node*>& my, const HashMap<Key, Node*>& their, NodeSet& toEscape)
|
---|
719 | {
|
---|
720 | auto escape = [&] (Node* identifier) {
|
---|
721 | toEscape.addVoid(identifier);
|
---|
722 | };
|
---|
723 |
|
---|
724 | for (const auto& entry : their) {
|
---|
725 | if (!my.contains(entry.key))
|
---|
726 | escape(entry.value);
|
---|
727 | }
|
---|
728 | my.removeIf([&] (const auto& entry) {
|
---|
729 | auto iter = their.find(entry.key);
|
---|
730 | if (iter == their.end()) {
|
---|
731 | escape(entry.value);
|
---|
732 | return true;
|
---|
733 | }
|
---|
734 | if (iter->value != entry.value) {
|
---|
735 | escape(entry.value);
|
---|
736 | escape(iter->value);
|
---|
737 | return true;
|
---|
738 | }
|
---|
739 | return false;
|
---|
740 | });
|
---|
741 | }
|
---|
742 |
|
---|
743 | void escapeAllocation(Node* identifier)
|
---|
744 | {
|
---|
745 | Allocation& allocation = getAllocation(identifier);
|
---|
746 | if (allocation.isEscapedAllocation())
|
---|
747 | return;
|
---|
748 |
|
---|
749 | Allocation unescaped = WTFMove(allocation);
|
---|
750 | allocation = Allocation(unescaped.identifier(), Allocation::Kind::Escaped);
|
---|
751 |
|
---|
752 | for (const auto& entry : unescaped.fields())
|
---|
753 | escapeAllocation(entry.value);
|
---|
754 |
|
---|
755 | if (m_wantEscapees)
|
---|
756 | m_escapees.add(unescaped.identifier(), WTFMove(unescaped));
|
---|
757 | }
|
---|
758 |
|
---|
759 | void prune()
|
---|
760 | {
|
---|
761 | NodeSet reachable;
|
---|
762 | for (const auto& entry : m_pointers) {
|
---|
763 | if (entry.value)
|
---|
764 | reachable.addVoid(entry.value);
|
---|
765 | }
|
---|
766 |
|
---|
767 | // Repeatedly mark as reachable allocations in fields of other
|
---|
768 | // reachable allocations
|
---|
769 | {
|
---|
770 | Vector<Node*> worklist;
|
---|
771 | worklist.appendRange(reachable.begin(), reachable.end());
|
---|
772 |
|
---|
773 | while (!worklist.isEmpty()) {
|
---|
774 | Node* identifier = worklist.takeLast();
|
---|
775 | Allocation& allocation = m_allocations.find(identifier)->value;
|
---|
776 | for (const auto& entry : allocation.fields()) {
|
---|
777 | if (reachable.add(entry.value).isNewEntry)
|
---|
778 | worklist.append(entry.value);
|
---|
779 | }
|
---|
780 | }
|
---|
781 | }
|
---|
782 |
|
---|
783 | // Remove unreachable allocations
|
---|
784 | m_allocations.removeIf(
|
---|
785 | [&] (const auto& entry) {
|
---|
786 | return !reachable.contains(entry.key);
|
---|
787 | });
|
---|
788 | }
|
---|
789 |
|
---|
790 | bool m_reached = false;
|
---|
791 | HashMap<Node*, Node*> m_pointers;
|
---|
792 | HashMap<Node*, Allocation> m_allocations;
|
---|
793 |
|
---|
794 | bool m_wantEscapees = false;
|
---|
795 | HashMap<Node*, Allocation> m_escapees;
|
---|
796 | };
|
---|
797 |
|
---|
798 | class ObjectAllocationSinkingPhase : public Phase {
|
---|
799 | public:
|
---|
800 | ObjectAllocationSinkingPhase(Graph& graph)
|
---|
801 | : Phase(graph, "object allocation elimination")
|
---|
802 | , m_pointerSSA(graph)
|
---|
803 | , m_allocationSSA(graph)
|
---|
804 | , m_insertionSet(graph)
|
---|
805 | {
|
---|
806 | }
|
---|
807 |
|
---|
808 | bool run()
|
---|
809 | {
|
---|
810 | ASSERT(m_graph.m_form == SSA);
|
---|
811 | ASSERT(m_graph.m_fixpointState == FixpointNotConverged);
|
---|
812 |
|
---|
813 | if (!performSinking())
|
---|
814 | return false;
|
---|
815 |
|
---|
816 | if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
|
---|
817 | dataLog("Graph after elimination:\n");
|
---|
818 | m_graph.dump();
|
---|
819 | }
|
---|
820 |
|
---|
821 | return true;
|
---|
822 | }
|
---|
823 |
|
---|
824 | private:
|
---|
825 | bool performSinking()
|
---|
826 | {
|
---|
827 | m_graph.computeRefCounts();
|
---|
828 | m_graph.initializeNodeOwners();
|
---|
829 | m_graph.ensureSSADominators();
|
---|
830 | performLivenessAnalysis(m_graph);
|
---|
831 | performOSRAvailabilityAnalysis(m_graph);
|
---|
832 | m_combinedLiveness = CombinedLiveness(m_graph);
|
---|
833 |
|
---|
834 | CString graphBeforeSinking;
|
---|
835 | if (UNLIKELY(Options::verboseValidationFailure() && Options::validateGraphAtEachPhase())) {
|
---|
836 | StringPrintStream out;
|
---|
837 | m_graph.dump(out);
|
---|
838 | graphBeforeSinking = out.toCString();
|
---|
839 | }
|
---|
840 |
|
---|
841 | if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
|
---|
842 | dataLog("Graph before elimination:\n");
|
---|
843 | m_graph.dump();
|
---|
844 | }
|
---|
845 |
|
---|
846 | performAnalysis();
|
---|
847 |
|
---|
848 | if (!determineSinkCandidates())
|
---|
849 | return false;
|
---|
850 |
|
---|
851 | if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
|
---|
852 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
|
---|
853 | dataLog("Heap at head of ", *block, ": \n", m_heapAtHead[block]);
|
---|
854 | dataLog("Heap at tail of ", *block, ": \n", m_heapAtTail[block]);
|
---|
855 | }
|
---|
856 | }
|
---|
857 |
|
---|
858 | promoteLocalHeap();
|
---|
859 | removeICStatusFilters();
|
---|
860 |
|
---|
861 | if (Options::validateGraphAtEachPhase())
|
---|
862 | DFG::validate(m_graph, DumpGraph, graphBeforeSinking);
|
---|
863 | return true;
|
---|
864 | }
|
---|
865 |
|
---|
866 | void performAnalysis()
|
---|
867 | {
|
---|
868 | m_heapAtHead = BlockMap<LocalHeap>(m_graph);
|
---|
869 | m_heapAtTail = BlockMap<LocalHeap>(m_graph);
|
---|
870 |
|
---|
871 | bool changed;
|
---|
872 | do {
|
---|
873 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
874 | dataLog("Doing iteration of escape analysis.\n");
|
---|
875 | changed = false;
|
---|
876 |
|
---|
877 | for (BasicBlock* block : m_graph.blocksInPreOrder()) {
|
---|
878 | m_heapAtHead[block].setReached();
|
---|
879 | m_heap = m_heapAtHead[block];
|
---|
880 |
|
---|
881 | for (Node* node : *block) {
|
---|
882 | handleNode(
|
---|
883 | node,
|
---|
884 | [] (PromotedHeapLocation, LazyNode) { },
|
---|
885 | [&] (PromotedHeapLocation) -> Node* {
|
---|
886 | return nullptr;
|
---|
887 | });
|
---|
888 | }
|
---|
889 |
|
---|
890 | if (m_heap == m_heapAtTail[block])
|
---|
891 | continue;
|
---|
892 |
|
---|
893 | m_heapAtTail[block] = m_heap;
|
---|
894 | changed = true;
|
---|
895 |
|
---|
896 | m_heap.assertIsValid();
|
---|
897 |
|
---|
898 | // We keep only pointers that are live, and only
|
---|
899 | // allocations that are either live, pointed to by a
|
---|
900 | // live pointer, or (recursively) stored in a field of
|
---|
901 | // a live allocation.
|
---|
902 | //
|
---|
903 | // This means we can accidentally leak non-dominating
|
---|
904 | // nodes into the successor. However, due to the
|
---|
905 | // non-dominance property, we are guaranteed that the
|
---|
906 | // successor has at least one predecessor that is not
|
---|
907 | // dominated either: this means any reference to a
|
---|
908 | // non-dominating allocation in the successor will
|
---|
909 | // trigger an escape and get pruned during the merge.
|
---|
910 | m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
|
---|
911 |
|
---|
912 | for (BasicBlock* successorBlock : block->successors()) {
|
---|
913 | // FIXME: Maybe we should:
|
---|
914 | // 1. Store the liveness pruned heap as part of m_heapAtTail
|
---|
915 | // 2. Move this code above where we make block merge with
|
---|
916 | // its predecessors before walking the block forward.
|
---|
917 | // https://bugs.webkit.org/show_bug.cgi?id=206041
|
---|
918 | LocalHeap heap = m_heapAtHead[successorBlock];
|
---|
919 | m_heapAtHead[successorBlock].merge(m_heap);
|
---|
920 | if (heap != m_heapAtHead[successorBlock])
|
---|
921 | changed = true;
|
---|
922 | }
|
---|
923 | }
|
---|
924 | } while (changed);
|
---|
925 | }
|
---|
926 |
|
---|
927 | template<typename InternalFieldClass>
|
---|
928 | Allocation* handleInternalFieldClass(Node* node, HashMap<PromotedLocationDescriptor, LazyNode>& writes)
|
---|
929 | {
|
---|
930 | Allocation* result = &m_heap.newAllocation(node, Allocation::Kind::InternalFieldObject);
|
---|
931 | writes.add(StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
|
---|
932 | auto initialValues = InternalFieldClass::initialValues();
|
---|
933 | static_assert(initialValues.size() == InternalFieldClass::numberOfInternalFields);
|
---|
934 | for (unsigned index = 0; index < initialValues.size(); ++index)
|
---|
935 | writes.add(PromotedLocationDescriptor(InternalFieldObjectPLoc, index), LazyNode(m_graph.freeze(initialValues[index])));
|
---|
936 |
|
---|
937 | return result;
|
---|
938 | }
|
---|
939 |
|
---|
940 | template<typename WriteFunctor, typename ResolveFunctor>
|
---|
941 | void handleNode(
|
---|
942 | Node* node,
|
---|
943 | const WriteFunctor& heapWrite,
|
---|
944 | const ResolveFunctor& heapResolve)
|
---|
945 | {
|
---|
946 | m_heap.assertIsValid();
|
---|
947 | ASSERT(m_heap.takeEscapees().isEmpty());
|
---|
948 |
|
---|
949 | Allocation* target = nullptr;
|
---|
950 | HashMap<PromotedLocationDescriptor, LazyNode> writes;
|
---|
951 | PromotedLocationDescriptor exactRead;
|
---|
952 |
|
---|
953 | switch (node->op()) {
|
---|
954 | case NewObject:
|
---|
955 | target = &m_heap.newAllocation(node, Allocation::Kind::Object);
|
---|
956 | target->setStructures(node->structure());
|
---|
957 | writes.add(
|
---|
958 | StructurePLoc, LazyNode(m_graph.freeze(node->structure().get())));
|
---|
959 | break;
|
---|
960 |
|
---|
961 | case NewFunction:
|
---|
962 | case NewGeneratorFunction:
|
---|
963 | case NewAsyncGeneratorFunction:
|
---|
964 | case NewAsyncFunction: {
|
---|
965 | if (isStillValid(node->castOperand<FunctionExecutable*>())) {
|
---|
966 | m_heap.escape(node->child1().node());
|
---|
967 | break;
|
---|
968 | }
|
---|
969 |
|
---|
970 | if (node->op() == NewGeneratorFunction)
|
---|
971 | target = &m_heap.newAllocation(node, Allocation::Kind::GeneratorFunction);
|
---|
972 | else if (node->op() == NewAsyncFunction)
|
---|
973 | target = &m_heap.newAllocation(node, Allocation::Kind::AsyncFunction);
|
---|
974 | else if (node->op() == NewAsyncGeneratorFunction)
|
---|
975 | target = &m_heap.newAllocation(node, Allocation::Kind::AsyncGeneratorFunction);
|
---|
976 | else
|
---|
977 | target = &m_heap.newAllocation(node, Allocation::Kind::Function);
|
---|
978 |
|
---|
979 | writes.add(FunctionExecutablePLoc, LazyNode(node->cellOperand()));
|
---|
980 | writes.add(FunctionActivationPLoc, LazyNode(node->child1().node()));
|
---|
981 | break;
|
---|
982 | }
|
---|
983 |
|
---|
984 | case NewInternalFieldObject: {
|
---|
985 | switch (node->structure()->typeInfo().type()) {
|
---|
986 | case JSArrayIteratorType:
|
---|
987 | target = handleInternalFieldClass<JSArrayIterator>(node, writes);
|
---|
988 | break;
|
---|
989 | case JSMapIteratorType:
|
---|
990 | target = handleInternalFieldClass<JSMapIterator>(node, writes);
|
---|
991 | break;
|
---|
992 | case JSSetIteratorType:
|
---|
993 | target = handleInternalFieldClass<JSSetIterator>(node, writes);
|
---|
994 | break;
|
---|
995 | case JSPromiseType:
|
---|
996 | if (node->structure()->classInfoForCells() == JSInternalPromise::info())
|
---|
997 | target = handleInternalFieldClass<JSInternalPromise>(node, writes);
|
---|
998 | else {
|
---|
999 | ASSERT(node->structure()->classInfoForCells() == JSPromise::info());
|
---|
1000 | target = handleInternalFieldClass<JSPromise>(node, writes);
|
---|
1001 | }
|
---|
1002 | break;
|
---|
1003 | default:
|
---|
1004 | DFG_CRASH(m_graph, node, "Bad structure");
|
---|
1005 | }
|
---|
1006 | break;
|
---|
1007 | }
|
---|
1008 |
|
---|
1009 | case NewRegexp: {
|
---|
1010 | target = &m_heap.newAllocation(node, Allocation::Kind::RegExpObject);
|
---|
1011 |
|
---|
1012 | writes.add(RegExpObjectRegExpPLoc, LazyNode(node->cellOperand()));
|
---|
1013 | writes.add(RegExpObjectLastIndexPLoc, LazyNode(node->child1().node()));
|
---|
1014 | break;
|
---|
1015 | }
|
---|
1016 |
|
---|
1017 | case CreateActivation: {
|
---|
1018 | if (isStillValid(node->castOperand<SymbolTable*>())) {
|
---|
1019 | m_heap.escape(node->child1().node());
|
---|
1020 | break;
|
---|
1021 | }
|
---|
1022 | target = &m_heap.newAllocation(node, Allocation::Kind::Activation);
|
---|
1023 | writes.add(ActivationSymbolTablePLoc, LazyNode(node->cellOperand()));
|
---|
1024 | writes.add(ActivationScopePLoc, LazyNode(node->child1().node()));
|
---|
1025 | {
|
---|
1026 | SymbolTable* symbolTable = node->castOperand<SymbolTable*>();
|
---|
1027 | LazyNode initialValue(m_graph.freeze(node->initializationValueForActivation()));
|
---|
1028 | for (unsigned offset = 0; offset < symbolTable->scopeSize(); ++offset) {
|
---|
1029 | writes.add(
|
---|
1030 | PromotedLocationDescriptor(ClosureVarPLoc, offset),
|
---|
1031 | initialValue);
|
---|
1032 | }
|
---|
1033 | }
|
---|
1034 | break;
|
---|
1035 | }
|
---|
1036 |
|
---|
1037 | case PutStructure:
|
---|
1038 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1039 | if (target && target->isObjectAllocation()) {
|
---|
1040 | writes.add(StructurePLoc, LazyNode(m_graph.freeze(JSValue(node->transition()->next.get()))));
|
---|
1041 | target->setStructures(node->transition()->next);
|
---|
1042 | } else
|
---|
1043 | m_heap.escape(node->child1().node());
|
---|
1044 | break;
|
---|
1045 |
|
---|
1046 | case CheckStructureOrEmpty:
|
---|
1047 | case CheckStructure: {
|
---|
1048 | Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1049 | if (allocation && allocation->isObjectAllocation()) {
|
---|
1050 | RegisteredStructureSet filteredStructures = allocation->structures();
|
---|
1051 | filteredStructures.filter(node->structureSet());
|
---|
1052 | if (filteredStructures.isEmpty()) {
|
---|
1053 | // FIXME: Write a test for this:
|
---|
1054 | // https://bugs.webkit.org/show_bug.cgi?id=174322
|
---|
1055 | m_heap.escape(node->child1().node());
|
---|
1056 | break;
|
---|
1057 | }
|
---|
1058 | allocation->setStructures(filteredStructures);
|
---|
1059 | if (Node* value = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc)))
|
---|
1060 | node->convertToCheckStructureImmediate(value);
|
---|
1061 | } else
|
---|
1062 | m_heap.escape(node->child1().node());
|
---|
1063 | break;
|
---|
1064 | }
|
---|
1065 |
|
---|
1066 | case GetByOffset:
|
---|
1067 | case GetGetterSetterByOffset:
|
---|
1068 | target = m_heap.onlyLocalAllocation(node->child2().node());
|
---|
1069 | if (target && target->isObjectAllocation()) {
|
---|
1070 | unsigned identifierNumber = node->storageAccessData().identifierNumber;
|
---|
1071 | exactRead = PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber);
|
---|
1072 | } else {
|
---|
1073 | m_heap.escape(node->child1().node());
|
---|
1074 | m_heap.escape(node->child2().node());
|
---|
1075 | }
|
---|
1076 | break;
|
---|
1077 |
|
---|
1078 | case MultiGetByOffset: {
|
---|
1079 | Allocation* allocation = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1080 | if (allocation && allocation->isObjectAllocation()) {
|
---|
1081 | MultiGetByOffsetData& data = node->multiGetByOffsetData();
|
---|
1082 | RegisteredStructureSet validStructures;
|
---|
1083 | bool hasInvalidStructures = false;
|
---|
1084 | for (const auto& multiGetByOffsetCase : data.cases) {
|
---|
1085 | if (!allocation->structures().overlaps(multiGetByOffsetCase.set()))
|
---|
1086 | continue;
|
---|
1087 |
|
---|
1088 | switch (multiGetByOffsetCase.method().kind()) {
|
---|
1089 | case GetByOffsetMethod::LoadFromPrototype: // We need to escape those
|
---|
1090 | case GetByOffsetMethod::Constant: // We don't really have a way of expressing this
|
---|
1091 | hasInvalidStructures = true;
|
---|
1092 | break;
|
---|
1093 |
|
---|
1094 | case GetByOffsetMethod::Load: // We're good
|
---|
1095 | validStructures.merge(multiGetByOffsetCase.set());
|
---|
1096 | break;
|
---|
1097 |
|
---|
1098 | default:
|
---|
1099 | RELEASE_ASSERT_NOT_REACHED();
|
---|
1100 | }
|
---|
1101 | }
|
---|
1102 | if (hasInvalidStructures || validStructures.isEmpty()) {
|
---|
1103 | m_heap.escape(node->child1().node());
|
---|
1104 | break;
|
---|
1105 | }
|
---|
1106 | unsigned identifierNumber = data.identifierNumber;
|
---|
1107 | PromotedHeapLocation location(NamedPropertyPLoc, allocation->identifier(), identifierNumber);
|
---|
1108 | if (Node* value = heapResolve(location)) {
|
---|
1109 | if (allocation->structures().isSubsetOf(validStructures))
|
---|
1110 | node->replaceWithWithoutChecks(value);
|
---|
1111 | else {
|
---|
1112 | Node* structure = heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
|
---|
1113 | ASSERT(structure);
|
---|
1114 | allocation->filterStructures(validStructures);
|
---|
1115 | node->convertToCheckStructure(m_graph.addStructureSet(allocation->structures()));
|
---|
1116 | node->convertToCheckStructureImmediate(structure);
|
---|
1117 | node->setReplacement(value);
|
---|
1118 | }
|
---|
1119 | } else if (!allocation->structures().isSubsetOf(validStructures)) {
|
---|
1120 | // Even though we don't need the result here, we still need
|
---|
1121 | // to make the call to tell our caller that we could need
|
---|
1122 | // the StructurePLoc.
|
---|
1123 | // The reason for this is that when we decide not to sink a
|
---|
1124 | // node, we will still lower any read to its fields before
|
---|
1125 | // it escapes (which are usually reads across a function
|
---|
1126 | // call that DFGClobberize can't handle) - but we only do
|
---|
1127 | // this for PromotedHeapLocations that we have seen read
|
---|
1128 | // during the analysis!
|
---|
1129 | heapResolve(PromotedHeapLocation(allocation->identifier(), StructurePLoc));
|
---|
1130 | allocation->filterStructures(validStructures);
|
---|
1131 | }
|
---|
1132 | Node* identifier = allocation->get(location.descriptor());
|
---|
1133 | if (identifier)
|
---|
1134 | m_heap.newPointer(node, identifier);
|
---|
1135 | } else
|
---|
1136 | m_heap.escape(node->child1().node());
|
---|
1137 | break;
|
---|
1138 | }
|
---|
1139 |
|
---|
1140 | case PutByOffset:
|
---|
1141 | target = m_heap.onlyLocalAllocation(node->child2().node());
|
---|
1142 | if (target && target->isObjectAllocation()) {
|
---|
1143 | unsigned identifierNumber = node->storageAccessData().identifierNumber;
|
---|
1144 | writes.add(
|
---|
1145 | PromotedLocationDescriptor(NamedPropertyPLoc, identifierNumber),
|
---|
1146 | LazyNode(node->child3().node()));
|
---|
1147 | } else {
|
---|
1148 | m_heap.escape(node->child1().node());
|
---|
1149 | m_heap.escape(node->child2().node());
|
---|
1150 | m_heap.escape(node->child3().node());
|
---|
1151 | }
|
---|
1152 | break;
|
---|
1153 |
|
---|
1154 | case GetClosureVar:
|
---|
1155 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1156 | if (target && target->isActivationAllocation()) {
|
---|
1157 | exactRead =
|
---|
1158 | PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset());
|
---|
1159 | } else
|
---|
1160 | m_heap.escape(node->child1().node());
|
---|
1161 | break;
|
---|
1162 |
|
---|
1163 | case PutClosureVar:
|
---|
1164 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1165 | if (target && target->isActivationAllocation()) {
|
---|
1166 | writes.add(
|
---|
1167 | PromotedLocationDescriptor(ClosureVarPLoc, node->scopeOffset().offset()),
|
---|
1168 | LazyNode(node->child2().node()));
|
---|
1169 | } else {
|
---|
1170 | m_heap.escape(node->child1().node());
|
---|
1171 | m_heap.escape(node->child2().node());
|
---|
1172 | }
|
---|
1173 | break;
|
---|
1174 |
|
---|
1175 | case SkipScope:
|
---|
1176 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1177 | if (target && target->isActivationAllocation())
|
---|
1178 | exactRead = ActivationScopePLoc;
|
---|
1179 | else
|
---|
1180 | m_heap.escape(node->child1().node());
|
---|
1181 | break;
|
---|
1182 |
|
---|
1183 | case GetExecutable:
|
---|
1184 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1185 | if (target && target->isFunctionAllocation())
|
---|
1186 | exactRead = FunctionExecutablePLoc;
|
---|
1187 | else
|
---|
1188 | m_heap.escape(node->child1().node());
|
---|
1189 | break;
|
---|
1190 |
|
---|
1191 | case GetScope:
|
---|
1192 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1193 | if (target && target->isFunctionAllocation())
|
---|
1194 | exactRead = FunctionActivationPLoc;
|
---|
1195 | else
|
---|
1196 | m_heap.escape(node->child1().node());
|
---|
1197 | break;
|
---|
1198 |
|
---|
1199 | case GetRegExpObjectLastIndex:
|
---|
1200 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1201 | if (target && target->isRegExpObjectAllocation())
|
---|
1202 | exactRead = RegExpObjectLastIndexPLoc;
|
---|
1203 | else
|
---|
1204 | m_heap.escape(node->child1().node());
|
---|
1205 | break;
|
---|
1206 |
|
---|
1207 | case SetRegExpObjectLastIndex:
|
---|
1208 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1209 | if (target && target->isRegExpObjectAllocation()) {
|
---|
1210 | writes.add(
|
---|
1211 | PromotedLocationDescriptor(RegExpObjectLastIndexPLoc),
|
---|
1212 | LazyNode(node->child2().node()));
|
---|
1213 | } else {
|
---|
1214 | m_heap.escape(node->child1().node());
|
---|
1215 | m_heap.escape(node->child2().node());
|
---|
1216 | }
|
---|
1217 | break;
|
---|
1218 |
|
---|
1219 | case GetInternalField: {
|
---|
1220 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1221 | if (target && target->isInternalFieldObjectAllocation())
|
---|
1222 | exactRead = PromotedLocationDescriptor(InternalFieldObjectPLoc, node->internalFieldIndex());
|
---|
1223 | else
|
---|
1224 | m_heap.escape(node->child1().node());
|
---|
1225 | break;
|
---|
1226 | }
|
---|
1227 |
|
---|
1228 | case PutInternalField: {
|
---|
1229 | target = m_heap.onlyLocalAllocation(node->child1().node());
|
---|
1230 | if (target && target->isInternalFieldObjectAllocation())
|
---|
1231 | writes.add(PromotedLocationDescriptor(InternalFieldObjectPLoc, node->internalFieldIndex()), LazyNode(node->child2().node()));
|
---|
1232 | else {
|
---|
1233 | m_heap.escape(node->child1().node());
|
---|
1234 | m_heap.escape(node->child2().node());
|
---|
1235 | }
|
---|
1236 | break;
|
---|
1237 | }
|
---|
1238 |
|
---|
1239 | case Check:
|
---|
1240 | case CheckVarargs:
|
---|
1241 | m_graph.doToChildren(
|
---|
1242 | node,
|
---|
1243 | [&] (Edge edge) {
|
---|
1244 | if (edge.willNotHaveCheck())
|
---|
1245 | return;
|
---|
1246 |
|
---|
1247 | if (alreadyChecked(edge.useKind(), SpecObject))
|
---|
1248 | return;
|
---|
1249 |
|
---|
1250 | m_heap.escape(edge.node());
|
---|
1251 | });
|
---|
1252 | break;
|
---|
1253 |
|
---|
1254 | case MovHint:
|
---|
1255 | case PutHint:
|
---|
1256 | // Handled by OSR availability analysis
|
---|
1257 | break;
|
---|
1258 |
|
---|
1259 | case FilterCallLinkStatus:
|
---|
1260 | case FilterGetByStatus:
|
---|
1261 | case FilterPutByStatus:
|
---|
1262 | case FilterInByStatus:
|
---|
1263 | case FilterDeleteByStatus:
|
---|
1264 | case FilterCheckPrivateBrandStatus:
|
---|
1265 | case FilterSetPrivateBrandStatus:
|
---|
1266 | break;
|
---|
1267 |
|
---|
1268 | default:
|
---|
1269 | m_graph.doToChildren(
|
---|
1270 | node,
|
---|
1271 | [&] (Edge edge) {
|
---|
1272 | m_heap.escape(edge.node());
|
---|
1273 | });
|
---|
1274 | break;
|
---|
1275 | }
|
---|
1276 |
|
---|
1277 | if (exactRead) {
|
---|
1278 | ASSERT(target);
|
---|
1279 | ASSERT(writes.isEmpty());
|
---|
1280 | if (Node* value = heapResolve(PromotedHeapLocation(target->identifier(), exactRead))) {
|
---|
1281 | ASSERT(!value->replacement());
|
---|
1282 | node->replaceWith(m_graph, value);
|
---|
1283 | }
|
---|
1284 | Node* identifier = target->get(exactRead);
|
---|
1285 | if (identifier)
|
---|
1286 | m_heap.newPointer(node, identifier);
|
---|
1287 | }
|
---|
1288 |
|
---|
1289 | for (auto entry : writes) {
|
---|
1290 | ASSERT(target);
|
---|
1291 | if (entry.value.isNode())
|
---|
1292 | target->set(entry.key, m_heap.follow(entry.value.asNode()));
|
---|
1293 | else
|
---|
1294 | target->remove(entry.key);
|
---|
1295 | heapWrite(PromotedHeapLocation(target->identifier(), entry.key), entry.value);
|
---|
1296 | }
|
---|
1297 |
|
---|
1298 | m_heap.assertIsValid();
|
---|
1299 | }
|
---|
1300 |
|
---|
1301 | bool determineSinkCandidates()
|
---|
1302 | {
|
---|
1303 | m_sinkCandidates.clear();
|
---|
1304 | m_materializationToEscapee.clear();
|
---|
1305 | m_materializationSiteToMaterializations.clear();
|
---|
1306 | m_materializationSiteToRecoveries.clear();
|
---|
1307 | m_materializationSiteToHints.clear();
|
---|
1308 |
|
---|
1309 | // Logically we wish to consider every allocation and sink
|
---|
1310 | // it. However, it is probably not profitable to sink an
|
---|
1311 | // allocation that will always escape. So, we only sink an
|
---|
1312 | // allocation if one of the following is true:
|
---|
1313 | //
|
---|
1314 | // 1) There exists a basic block with only backwards outgoing
|
---|
1315 | // edges (or no outgoing edges) in which the node wasn't
|
---|
1316 | // materialized. This is meant to catch
|
---|
1317 | // effectively-infinite loops in which we don't need to
|
---|
1318 | // have allocated the object.
|
---|
1319 | //
|
---|
1320 | // 2) There exists a basic block at the tail of which the node
|
---|
1321 | // is dead and not materialized.
|
---|
1322 | //
|
---|
1323 | // 3) The sum of execution counts of the materializations is
|
---|
1324 | // less than the sum of execution counts of the original
|
---|
1325 | // node.
|
---|
1326 | //
|
---|
1327 | // We currently implement only rule #2.
|
---|
1328 | // FIXME: Implement the two other rules.
|
---|
1329 | // https://bugs.webkit.org/show_bug.cgi?id=137073 (rule #1)
|
---|
1330 | // https://bugs.webkit.org/show_bug.cgi?id=137074 (rule #3)
|
---|
1331 | //
|
---|
1332 | // However, these rules allow for a sunk object to be put into
|
---|
1333 | // a non-sunk one, which we don't support. We could solve this
|
---|
1334 | // by supporting PutHints on local allocations, making these
|
---|
1335 | // objects only partially correct, and we would need to adapt
|
---|
1336 | // the OSR availability analysis and OSR exit to handle
|
---|
1337 | // this. This would be totally doable, but would create a
|
---|
1338 | // super rare, and thus bug-prone, code path.
|
---|
1339 | // So, instead, we need to implement one of the following
|
---|
1340 | // closure rules:
|
---|
1341 | //
|
---|
1342 | // 1) If we put a sink candidate into a local allocation that
|
---|
1343 | // is not a sink candidate, change our minds and don't
|
---|
1344 | // actually sink the sink candidate.
|
---|
1345 | //
|
---|
1346 | // 2) If we put a sink candidate into a local allocation, that
|
---|
1347 | // allocation becomes a sink candidate as well.
|
---|
1348 | //
|
---|
1349 | // We currently choose to implement closure rule #2.
|
---|
1350 | HashMap<Node*, Vector<Node*>> dependencies;
|
---|
1351 | bool hasUnescapedReads = false;
|
---|
1352 | for (BasicBlock* block : m_graph.blocksInPreOrder()) {
|
---|
1353 | m_heap = m_heapAtHead[block];
|
---|
1354 |
|
---|
1355 | for (Node* node : *block) {
|
---|
1356 | handleNode(
|
---|
1357 | node,
|
---|
1358 | [&] (PromotedHeapLocation location, LazyNode value) {
|
---|
1359 | if (!value.isNode())
|
---|
1360 | return;
|
---|
1361 |
|
---|
1362 | Allocation* allocation = m_heap.onlyLocalAllocation(value.asNode());
|
---|
1363 | if (allocation && !allocation->isEscapedAllocation())
|
---|
1364 | dependencies.add(allocation->identifier(), Vector<Node*>()).iterator->value.append(location.base());
|
---|
1365 | },
|
---|
1366 | [&] (PromotedHeapLocation) -> Node* {
|
---|
1367 | hasUnescapedReads = true;
|
---|
1368 | return nullptr;
|
---|
1369 | });
|
---|
1370 | }
|
---|
1371 |
|
---|
1372 | // The sink candidates are initially the unescaped
|
---|
1373 | // allocations dying at tail of blocks
|
---|
1374 | NodeSet allocations;
|
---|
1375 | for (const auto& entry : m_heap.allocations()) {
|
---|
1376 | if (!entry.value.isEscapedAllocation())
|
---|
1377 | allocations.addVoid(entry.key);
|
---|
1378 | }
|
---|
1379 |
|
---|
1380 | m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
|
---|
1381 |
|
---|
1382 | for (Node* identifier : allocations) {
|
---|
1383 | if (!m_heap.isAllocation(identifier))
|
---|
1384 | m_sinkCandidates.addVoid(identifier);
|
---|
1385 | }
|
---|
1386 | }
|
---|
1387 |
|
---|
1388 | auto forEachEscapee = [&] (auto callback) {
|
---|
1389 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
|
---|
1390 | m_heap = m_heapAtHead[block];
|
---|
1391 | m_heap.setWantEscapees();
|
---|
1392 |
|
---|
1393 | for (Node* node : *block) {
|
---|
1394 | handleNode(
|
---|
1395 | node,
|
---|
1396 | [] (PromotedHeapLocation, LazyNode) { },
|
---|
1397 | [] (PromotedHeapLocation) -> Node* {
|
---|
1398 | return nullptr;
|
---|
1399 | });
|
---|
1400 | auto escapees = m_heap.takeEscapees();
|
---|
1401 | escapees.removeIf([&] (const auto& entry) { return !m_sinkCandidates.contains(entry.key); });
|
---|
1402 | callback(escapees, node);
|
---|
1403 | }
|
---|
1404 |
|
---|
1405 | m_heap.pruneByLiveness(m_combinedLiveness.liveAtTail[block]);
|
---|
1406 |
|
---|
1407 | {
|
---|
1408 | HashMap<Node*, Allocation> escapingOnEdge;
|
---|
1409 | for (const auto& entry : m_heap.allocations()) {
|
---|
1410 | if (entry.value.isEscapedAllocation())
|
---|
1411 | continue;
|
---|
1412 |
|
---|
1413 | bool mustEscape = false;
|
---|
1414 | for (BasicBlock* successorBlock : block->successors()) {
|
---|
1415 | if (!m_heapAtHead[successorBlock].isAllocation(entry.key)
|
---|
1416 | || m_heapAtHead[successorBlock].getAllocation(entry.key).isEscapedAllocation())
|
---|
1417 | mustEscape = true;
|
---|
1418 | }
|
---|
1419 |
|
---|
1420 | if (mustEscape && m_sinkCandidates.contains(entry.key))
|
---|
1421 | escapingOnEdge.add(entry.key, entry.value);
|
---|
1422 | }
|
---|
1423 | callback(escapingOnEdge, block->terminal());
|
---|
1424 | }
|
---|
1425 | }
|
---|
1426 | };
|
---|
1427 |
|
---|
1428 | if (m_sinkCandidates.size()) {
|
---|
1429 | // If we're moving an allocation to `where` in the program, we need to ensure
|
---|
1430 | // we can still walk the stack at that point in the program for the
|
---|
1431 | // InlineCallFrame of the original allocation. Certain InlineCallFrames rely on
|
---|
1432 | // data in the stack when taking a stack trace. All allocation sites can do a
|
---|
1433 | // stack walk (we do a stack walk when we GC). Conservatively, we say we're
|
---|
1434 | // still ok to move this allocation if we are moving within the same InlineCallFrame.
|
---|
1435 | // We could be more precise here and do an analysis of stack writes. However,
|
---|
1436 | // this scenario is so rare that we just take the conservative-and-straight-forward
|
---|
1437 | // approach of checking that we're in the same InlineCallFrame.
|
---|
1438 |
|
---|
1439 | forEachEscapee([&] (HashMap<Node*, Allocation>& escapees, Node* where) {
|
---|
1440 | for (Node* allocation : escapees.keys()) {
|
---|
1441 | InlineCallFrame* inlineCallFrame = allocation->origin.semantic.inlineCallFrame();
|
---|
1442 | if (!inlineCallFrame)
|
---|
1443 | continue;
|
---|
1444 | if ((inlineCallFrame->isClosureCall || inlineCallFrame->isVarargs()) && inlineCallFrame != where->origin.semantic.inlineCallFrame())
|
---|
1445 | m_sinkCandidates.remove(allocation);
|
---|
1446 | }
|
---|
1447 | });
|
---|
1448 | }
|
---|
1449 |
|
---|
1450 | // Ensure that the set of sink candidates is closed for put operations
|
---|
1451 | // This is (2) as described above.
|
---|
1452 | Vector<Node*> worklist;
|
---|
1453 | worklist.appendRange(m_sinkCandidates.begin(), m_sinkCandidates.end());
|
---|
1454 |
|
---|
1455 | while (!worklist.isEmpty()) {
|
---|
1456 | for (Node* identifier : dependencies.get(worklist.takeLast())) {
|
---|
1457 | if (m_sinkCandidates.add(identifier).isNewEntry)
|
---|
1458 | worklist.append(identifier);
|
---|
1459 | }
|
---|
1460 | }
|
---|
1461 |
|
---|
1462 | if (m_sinkCandidates.isEmpty())
|
---|
1463 | return hasUnescapedReads;
|
---|
1464 |
|
---|
1465 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
1466 | dataLog("Candidates: ", listDump(m_sinkCandidates), "\n");
|
---|
1467 |
|
---|
1468 | // Create the materialization nodes.
|
---|
1469 | forEachEscapee([&] (HashMap<Node*, Allocation>& escapees, Node* where) {
|
---|
1470 | placeMaterializations(WTFMove(escapees), where);
|
---|
1471 | });
|
---|
1472 |
|
---|
1473 | return hasUnescapedReads || !m_sinkCandidates.isEmpty();
|
---|
1474 | }
|
---|
1475 |
|
---|
1476 | void placeMaterializations(HashMap<Node*, Allocation> escapees, Node* where)
|
---|
1477 | {
|
---|
1478 | // First collect the hints that will be needed when the node
|
---|
1479 | // we materialize is still stored into other unescaped sink candidates.
|
---|
1480 | // The way to interpret this vector is:
|
---|
1481 | //
|
---|
1482 | // PromotedHeapLocation(NotEscapedAllocation, field) = identifierAllocation
|
---|
1483 | //
|
---|
1484 | // e.g:
|
---|
1485 | // PromotedHeapLocation(@PhantomNewFunction, FunctionActivationPLoc) = IdentifierOf(@MaterializeCreateActivation)
|
---|
1486 | // or:
|
---|
1487 | // PromotedHeapLocation(@PhantomCreateActivation, ClosureVarPLoc(x)) = IdentifierOf(@NewFunction)
|
---|
1488 | //
|
---|
1489 | // When the rhs of the `=` is to be materialized at this `where` point in the program
|
---|
1490 | // and IdentifierOf(Materialization) is the original sunken allocation of the materialization.
|
---|
1491 | //
|
---|
1492 | // The reason we need to collect all the `identifiers` here is that
|
---|
1493 | // we may materialize multiple versions of the allocation along control
|
---|
1494 | // flow edges. We will PutHint these values along those edges. However,
|
---|
1495 | // we also need to PutHint them when we join and have a Phi of the allocations.
|
---|
1496 | Vector<std::pair<PromotedHeapLocation, Node*>> hints;
|
---|
1497 | for (const auto& entry : m_heap.allocations()) {
|
---|
1498 | if (escapees.contains(entry.key))
|
---|
1499 | continue;
|
---|
1500 |
|
---|
1501 | for (const auto& field : entry.value.fields()) {
|
---|
1502 | ASSERT(m_sinkCandidates.contains(entry.key) || !escapees.contains(field.value));
|
---|
1503 | auto iter = escapees.find(field.value);
|
---|
1504 | if (iter != escapees.end()) {
|
---|
1505 | ASSERT(m_sinkCandidates.contains(field.value));
|
---|
1506 | hints.append(std::make_pair(PromotedHeapLocation(entry.key, field.key), field.value));
|
---|
1507 | }
|
---|
1508 | }
|
---|
1509 | }
|
---|
1510 |
|
---|
1511 | // Now we need to order the materialization. Any order is
|
---|
1512 | // valid (as long as we materialize a node first if it is
|
---|
1513 | // needed for the materialization of another node, e.g. a
|
---|
1514 | // function's activation must be materialized before the
|
---|
1515 | // function itself), but we want to try minimizing the number
|
---|
1516 | // of times we have to place Puts to close cycles after a
|
---|
1517 | // materialization. In other words, we are trying to find the
|
---|
1518 | // minimum number of materializations to remove from the
|
---|
1519 | // materialization graph to make it a DAG, known as the
|
---|
1520 | // (vertex) feedback set problem. Unfortunately, this is a
|
---|
1521 | // NP-hard problem, which we don't want to solve exactly.
|
---|
1522 | //
|
---|
1523 | // Instead, we use a simple greedy procedure, that procedes as
|
---|
1524 | // follow:
|
---|
1525 | // - While there is at least one node with no outgoing edge
|
---|
1526 | // amongst the remaining materializations, materialize it
|
---|
1527 | // first
|
---|
1528 | //
|
---|
1529 | // - Similarily, while there is at least one node with no
|
---|
1530 | // incoming edge amongst the remaining materializations,
|
---|
1531 | // materialize it last.
|
---|
1532 | //
|
---|
1533 | // - When both previous conditions are false, we have an
|
---|
1534 | // actual cycle, and we need to pick a node to
|
---|
1535 | // materialize. We try greedily to remove the "pressure" on
|
---|
1536 | // the remaining nodes by choosing the node with maximum
|
---|
1537 | // |incoming edges| * |outgoing edges| as a measure of how
|
---|
1538 | // "central" to the graph it is. We materialize it first,
|
---|
1539 | // so that all the recoveries will be Puts of things into
|
---|
1540 | // it (rather than Puts of the materialization into other
|
---|
1541 | // objects), which means we will have a single
|
---|
1542 | // StoreBarrier.
|
---|
1543 |
|
---|
1544 |
|
---|
1545 | // Compute dependencies between materializations
|
---|
1546 | HashMap<Node*, NodeSet> dependencies;
|
---|
1547 | HashMap<Node*, NodeSet> reverseDependencies;
|
---|
1548 | HashMap<Node*, NodeSet> forMaterialization;
|
---|
1549 | for (const auto& entry : escapees) {
|
---|
1550 | auto& myDependencies = dependencies.add(entry.key, NodeSet()).iterator->value;
|
---|
1551 | auto& myDependenciesForMaterialization = forMaterialization.add(entry.key, NodeSet()).iterator->value;
|
---|
1552 | reverseDependencies.add(entry.key, NodeSet());
|
---|
1553 | for (const auto& field : entry.value.fields()) {
|
---|
1554 | if (escapees.contains(field.value) && field.value != entry.key) {
|
---|
1555 | myDependencies.addVoid(field.value);
|
---|
1556 | reverseDependencies.add(field.value, NodeSet()).iterator->value.addVoid(entry.key);
|
---|
1557 | if (field.key.neededForMaterialization())
|
---|
1558 | myDependenciesForMaterialization.addVoid(field.value);
|
---|
1559 | }
|
---|
1560 | }
|
---|
1561 | }
|
---|
1562 |
|
---|
1563 | // Helper function to update the materialized set and the
|
---|
1564 | // dependencies
|
---|
1565 | NodeSet materialized;
|
---|
1566 | auto materialize = [&] (Node* identifier) {
|
---|
1567 | materialized.addVoid(identifier);
|
---|
1568 | for (Node* dep : dependencies.get(identifier))
|
---|
1569 | reverseDependencies.find(dep)->value.remove(identifier);
|
---|
1570 | for (Node* rdep : reverseDependencies.get(identifier)) {
|
---|
1571 | dependencies.find(rdep)->value.remove(identifier);
|
---|
1572 | forMaterialization.find(rdep)->value.remove(identifier);
|
---|
1573 | }
|
---|
1574 | dependencies.remove(identifier);
|
---|
1575 | reverseDependencies.remove(identifier);
|
---|
1576 | forMaterialization.remove(identifier);
|
---|
1577 | };
|
---|
1578 |
|
---|
1579 | // Nodes without remaining unmaterialized fields will be
|
---|
1580 | // materialized first - amongst the remaining unmaterialized
|
---|
1581 | // nodes
|
---|
1582 | Vector<Allocation> toMaterialize;
|
---|
1583 | toMaterialize.resize(escapees.size());
|
---|
1584 | size_t firstIndex = 0;
|
---|
1585 | size_t lastIndex = toMaterialize.size();
|
---|
1586 | auto materializeFirst = [&] (Allocation&& allocation) {
|
---|
1587 | RELEASE_ASSERT(firstIndex < lastIndex);
|
---|
1588 | materialize(allocation.identifier());
|
---|
1589 | toMaterialize[firstIndex] = WTFMove(allocation);
|
---|
1590 | ++firstIndex;
|
---|
1591 | };
|
---|
1592 |
|
---|
1593 | // Nodes that no other unmaterialized node points to will be
|
---|
1594 | // materialized last - amongst the remaining unmaterialized
|
---|
1595 | // nodes
|
---|
1596 | auto materializeLast = [&] (Allocation&& allocation) {
|
---|
1597 | materialize(allocation.identifier());
|
---|
1598 | RELEASE_ASSERT(firstIndex < lastIndex);
|
---|
1599 | RELEASE_ASSERT(lastIndex);
|
---|
1600 | --lastIndex;
|
---|
1601 | toMaterialize[lastIndex] = WTFMove(allocation);
|
---|
1602 | };
|
---|
1603 |
|
---|
1604 | // These are the promoted locations that contains some of the
|
---|
1605 | // allocations we are currently escaping. If they are a location on
|
---|
1606 | // some other allocation we are currently materializing, we will need
|
---|
1607 | // to "recover" their value with a real put once the corresponding
|
---|
1608 | // allocation is materialized; if they are a location on some other
|
---|
1609 | // not-yet-materialized allocation, we will need a PutHint.
|
---|
1610 | Vector<PromotedHeapLocation> toRecover;
|
---|
1611 |
|
---|
1612 | // This loop does the actual cycle breaking
|
---|
1613 | while (!escapees.isEmpty()) {
|
---|
1614 | materialized.clear();
|
---|
1615 |
|
---|
1616 | // Materialize nodes that won't require recoveries if we can
|
---|
1617 | for (auto& entry : escapees) {
|
---|
1618 | if (!forMaterialization.find(entry.key)->value.isEmpty())
|
---|
1619 | continue;
|
---|
1620 |
|
---|
1621 | if (dependencies.find(entry.key)->value.isEmpty()) {
|
---|
1622 | materializeFirst(WTFMove(entry.value));
|
---|
1623 | continue;
|
---|
1624 | }
|
---|
1625 |
|
---|
1626 | if (reverseDependencies.find(entry.key)->value.isEmpty()) {
|
---|
1627 | materializeLast(WTFMove(entry.value));
|
---|
1628 | continue;
|
---|
1629 | }
|
---|
1630 | }
|
---|
1631 |
|
---|
1632 | // We reach this only if there is an actual cycle that needs
|
---|
1633 | // breaking. Because we do not want to solve a NP-hard problem
|
---|
1634 | // here, we just heuristically pick a node and materialize it
|
---|
1635 | // first.
|
---|
1636 | if (materialized.isEmpty()) {
|
---|
1637 | uint64_t maxEvaluation = 0;
|
---|
1638 | Allocation* bestAllocation = nullptr;
|
---|
1639 | for (auto& entry : escapees) {
|
---|
1640 | if (!forMaterialization.find(entry.key)->value.isEmpty())
|
---|
1641 | continue;
|
---|
1642 |
|
---|
1643 | uint64_t evaluation =
|
---|
1644 | static_cast<uint64_t>(dependencies.get(entry.key).size()) * reverseDependencies.get(entry.key).size();
|
---|
1645 | if (evaluation > maxEvaluation) {
|
---|
1646 | maxEvaluation = evaluation;
|
---|
1647 | bestAllocation = &entry.value;
|
---|
1648 | }
|
---|
1649 | }
|
---|
1650 | RELEASE_ASSERT(maxEvaluation > 0);
|
---|
1651 |
|
---|
1652 | materializeFirst(WTFMove(*bestAllocation));
|
---|
1653 | }
|
---|
1654 | RELEASE_ASSERT(!materialized.isEmpty());
|
---|
1655 |
|
---|
1656 | for (Node* identifier : materialized)
|
---|
1657 | escapees.remove(identifier);
|
---|
1658 | }
|
---|
1659 |
|
---|
1660 | RELEASE_ASSERT(firstIndex == lastIndex);
|
---|
1661 |
|
---|
1662 | materialized.clear();
|
---|
1663 |
|
---|
1664 | NodeSet escaped;
|
---|
1665 | for (const Allocation& allocation : toMaterialize)
|
---|
1666 | escaped.addVoid(allocation.identifier());
|
---|
1667 | for (const Allocation& allocation : toMaterialize) {
|
---|
1668 | for (const auto& field : allocation.fields()) {
|
---|
1669 | if (escaped.contains(field.value) && !materialized.contains(field.value))
|
---|
1670 | toRecover.append(PromotedHeapLocation(allocation.identifier(), field.key));
|
---|
1671 | }
|
---|
1672 | materialized.addVoid(allocation.identifier());
|
---|
1673 | }
|
---|
1674 |
|
---|
1675 | Vector<Node*>& materializations = m_materializationSiteToMaterializations.add(
|
---|
1676 | where, Vector<Node*>()).iterator->value;
|
---|
1677 |
|
---|
1678 | for (const Allocation& allocation : toMaterialize) {
|
---|
1679 | Node* materialization = createMaterialization(allocation, where);
|
---|
1680 | materializations.append(materialization);
|
---|
1681 | m_materializationToEscapee.add(materialization, allocation.identifier());
|
---|
1682 | }
|
---|
1683 |
|
---|
1684 | if (!toRecover.isEmpty()) {
|
---|
1685 | m_materializationSiteToRecoveries.add(
|
---|
1686 | where, Vector<PromotedHeapLocation>()).iterator->value.appendVector(toRecover);
|
---|
1687 | }
|
---|
1688 |
|
---|
1689 | // The hints need to be after the "real" recoveries so that we
|
---|
1690 | // don't hint not-yet-complete objects
|
---|
1691 | m_materializationSiteToHints.add(
|
---|
1692 | where, Vector<std::pair<PromotedHeapLocation, Node*>>()).iterator->value.appendVector(hints);
|
---|
1693 | }
|
---|
1694 |
|
---|
1695 | Node* createMaterialization(const Allocation& allocation, Node* where)
|
---|
1696 | {
|
---|
1697 | // FIXME: This is the only place where we actually use the
|
---|
1698 | // fact that an allocation's identifier is indeed the node
|
---|
1699 | // that created the allocation.
|
---|
1700 | switch (allocation.kind()) {
|
---|
1701 | case Allocation::Kind::Object: {
|
---|
1702 | ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
|
---|
1703 |
|
---|
1704 | return m_graph.addNode(
|
---|
1705 | allocation.identifier()->prediction(), Node::VarArg, MaterializeNewObject,
|
---|
1706 | where->origin.withSemantic(allocation.identifier()->origin.semantic),
|
---|
1707 | OpInfo(m_graph.addStructureSet(allocation.structuresForMaterialization())), OpInfo(data), 0, 0);
|
---|
1708 | }
|
---|
1709 |
|
---|
1710 | case Allocation::Kind::AsyncGeneratorFunction:
|
---|
1711 | case Allocation::Kind::AsyncFunction:
|
---|
1712 | case Allocation::Kind::GeneratorFunction:
|
---|
1713 | case Allocation::Kind::Function: {
|
---|
1714 | FrozenValue* executable = allocation.identifier()->cellOperand();
|
---|
1715 |
|
---|
1716 | NodeType nodeType;
|
---|
1717 | switch (allocation.kind()) {
|
---|
1718 | case Allocation::Kind::GeneratorFunction:
|
---|
1719 | nodeType = NewGeneratorFunction;
|
---|
1720 | break;
|
---|
1721 | case Allocation::Kind::AsyncGeneratorFunction:
|
---|
1722 | nodeType = NewAsyncGeneratorFunction;
|
---|
1723 | break;
|
---|
1724 | case Allocation::Kind::AsyncFunction:
|
---|
1725 | nodeType = NewAsyncFunction;
|
---|
1726 | break;
|
---|
1727 | default:
|
---|
1728 | nodeType = NewFunction;
|
---|
1729 | }
|
---|
1730 |
|
---|
1731 | return m_graph.addNode(
|
---|
1732 | allocation.identifier()->prediction(), nodeType,
|
---|
1733 | where->origin.withSemantic(
|
---|
1734 | allocation.identifier()->origin.semantic),
|
---|
1735 | OpInfo(executable));
|
---|
1736 | }
|
---|
1737 |
|
---|
1738 | case Allocation::Kind::InternalFieldObject: {
|
---|
1739 | ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
|
---|
1740 | return m_graph.addNode(
|
---|
1741 | allocation.identifier()->prediction(), Node::VarArg, MaterializeNewInternalFieldObject,
|
---|
1742 | where->origin.withSemantic(
|
---|
1743 | allocation.identifier()->origin.semantic),
|
---|
1744 | OpInfo(allocation.identifier()->structure()), OpInfo(data), 0, 0);
|
---|
1745 | }
|
---|
1746 |
|
---|
1747 | case Allocation::Kind::Activation: {
|
---|
1748 | ObjectMaterializationData* data = m_graph.m_objectMaterializationData.add();
|
---|
1749 | FrozenValue* symbolTable = allocation.identifier()->cellOperand();
|
---|
1750 |
|
---|
1751 | return m_graph.addNode(
|
---|
1752 | allocation.identifier()->prediction(), Node::VarArg, MaterializeCreateActivation,
|
---|
1753 | where->origin.withSemantic(
|
---|
1754 | allocation.identifier()->origin.semantic),
|
---|
1755 | OpInfo(symbolTable), OpInfo(data), 0, 0);
|
---|
1756 | }
|
---|
1757 |
|
---|
1758 | case Allocation::Kind::RegExpObject: {
|
---|
1759 | FrozenValue* regExp = allocation.identifier()->cellOperand();
|
---|
1760 | return m_graph.addNode(
|
---|
1761 | allocation.identifier()->prediction(), NewRegexp,
|
---|
1762 | where->origin.withSemantic(
|
---|
1763 | allocation.identifier()->origin.semantic),
|
---|
1764 | OpInfo(regExp));
|
---|
1765 | }
|
---|
1766 |
|
---|
1767 | default:
|
---|
1768 | DFG_CRASH(m_graph, allocation.identifier(), "Bad allocation kind");
|
---|
1769 | }
|
---|
1770 | }
|
---|
1771 |
|
---|
1772 | void promoteLocalHeap()
|
---|
1773 | {
|
---|
1774 | // Collect the set of heap locations that we will be operating
|
---|
1775 | // over.
|
---|
1776 | HashSet<PromotedHeapLocation> locations;
|
---|
1777 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
|
---|
1778 | m_heap = m_heapAtHead[block];
|
---|
1779 |
|
---|
1780 | for (Node* node : *block) {
|
---|
1781 | handleNode(
|
---|
1782 | node,
|
---|
1783 | [&] (PromotedHeapLocation location, LazyNode) {
|
---|
1784 | // If the location is not on a sink candidate,
|
---|
1785 | // we only sink it if it is read
|
---|
1786 | if (m_sinkCandidates.contains(location.base()))
|
---|
1787 | locations.addVoid(location);
|
---|
1788 | },
|
---|
1789 | [&] (PromotedHeapLocation location) -> Node* {
|
---|
1790 | locations.addVoid(location);
|
---|
1791 | return nullptr;
|
---|
1792 | });
|
---|
1793 | }
|
---|
1794 | }
|
---|
1795 |
|
---|
1796 | // Figure out which locations belong to which allocations.
|
---|
1797 | m_locationsForAllocation.clear();
|
---|
1798 | for (PromotedHeapLocation location : locations) {
|
---|
1799 | auto result = m_locationsForAllocation.add(
|
---|
1800 | location.base(),
|
---|
1801 | Vector<PromotedHeapLocation>());
|
---|
1802 | ASSERT(!result.iterator->value.contains(location));
|
---|
1803 | result.iterator->value.append(location);
|
---|
1804 | }
|
---|
1805 |
|
---|
1806 | m_pointerSSA.reset();
|
---|
1807 | m_allocationSSA.reset();
|
---|
1808 |
|
---|
1809 | // Collect the set of "variables" that we will be sinking.
|
---|
1810 | m_locationToVariable.clear();
|
---|
1811 | m_nodeToVariable.clear();
|
---|
1812 | Vector<Node*> indexToNode;
|
---|
1813 | Vector<PromotedHeapLocation> indexToLocation;
|
---|
1814 |
|
---|
1815 | for (Node* index : m_sinkCandidates) {
|
---|
1816 | SSACalculator::Variable* variable = m_allocationSSA.newVariable();
|
---|
1817 | m_nodeToVariable.add(index, variable);
|
---|
1818 | ASSERT(indexToNode.size() == variable->index());
|
---|
1819 | indexToNode.append(index);
|
---|
1820 | }
|
---|
1821 |
|
---|
1822 | for (PromotedHeapLocation location : locations) {
|
---|
1823 | SSACalculator::Variable* variable = m_pointerSSA.newVariable();
|
---|
1824 | m_locationToVariable.add(location, variable);
|
---|
1825 | ASSERT(indexToLocation.size() == variable->index());
|
---|
1826 | indexToLocation.append(location);
|
---|
1827 | }
|
---|
1828 |
|
---|
1829 | // We insert all required constants at top of block 0 so that
|
---|
1830 | // they are inserted only once and we don't clutter the graph
|
---|
1831 | // with useless constants everywhere
|
---|
1832 | HashMap<FrozenValue*, Node*> lazyMapping;
|
---|
1833 | if (!m_bottom)
|
---|
1834 | m_bottom = m_insertionSet.insertConstant(0, m_graph.block(0)->at(0)->origin, jsNumber(1927));
|
---|
1835 |
|
---|
1836 | Vector<HashSet<PromotedHeapLocation>> hintsForPhi(m_sinkCandidates.size());
|
---|
1837 |
|
---|
1838 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
|
---|
1839 | m_heap = m_heapAtHead[block];
|
---|
1840 |
|
---|
1841 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
|
---|
1842 | Node* node = block->at(nodeIndex);
|
---|
1843 |
|
---|
1844 | // Some named properties can be added conditionally,
|
---|
1845 | // and that would necessitate bottoms
|
---|
1846 | for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
|
---|
1847 | if (location.kind() != NamedPropertyPLoc)
|
---|
1848 | continue;
|
---|
1849 |
|
---|
1850 | SSACalculator::Variable* variable = m_locationToVariable.get(location);
|
---|
1851 | m_pointerSSA.newDef(variable, block, m_bottom);
|
---|
1852 | }
|
---|
1853 |
|
---|
1854 | for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
|
---|
1855 | Node* escapee = m_materializationToEscapee.get(materialization);
|
---|
1856 | m_allocationSSA.newDef(m_nodeToVariable.get(escapee), block, materialization);
|
---|
1857 | }
|
---|
1858 |
|
---|
1859 | for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node)) {
|
---|
1860 | PromotedHeapLocation location = pair.first;
|
---|
1861 | Node* identifier = pair.second;
|
---|
1862 | // We're materializing `identifier` at this point, and the unmaterialized
|
---|
1863 | // version is inside `location`. We track which SSA variable this belongs
|
---|
1864 | // to in case we also need a PutHint for the Phi.
|
---|
1865 | if (UNLIKELY(validationEnabled())) {
|
---|
1866 | RELEASE_ASSERT(m_sinkCandidates.contains(location.base()));
|
---|
1867 | RELEASE_ASSERT(m_sinkCandidates.contains(identifier));
|
---|
1868 |
|
---|
1869 | bool found = false;
|
---|
1870 | for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
|
---|
1871 | // We're materializing `identifier` here. This asserts that this is indeed the case.
|
---|
1872 | if (m_materializationToEscapee.get(materialization) == identifier) {
|
---|
1873 | found = true;
|
---|
1874 | break;
|
---|
1875 | }
|
---|
1876 | }
|
---|
1877 | RELEASE_ASSERT(found);
|
---|
1878 | }
|
---|
1879 |
|
---|
1880 | SSACalculator::Variable* variable = m_nodeToVariable.get(identifier);
|
---|
1881 | hintsForPhi[variable->index()].addVoid(location);
|
---|
1882 | }
|
---|
1883 |
|
---|
1884 | if (m_sinkCandidates.contains(node))
|
---|
1885 | m_allocationSSA.newDef(m_nodeToVariable.get(node), block, node);
|
---|
1886 |
|
---|
1887 | handleNode(
|
---|
1888 | node,
|
---|
1889 | [&] (PromotedHeapLocation location, LazyNode value) {
|
---|
1890 | if (!locations.contains(location))
|
---|
1891 | return;
|
---|
1892 |
|
---|
1893 | Node* nodeValue;
|
---|
1894 | if (value.isNode())
|
---|
1895 | nodeValue = value.asNode();
|
---|
1896 | else {
|
---|
1897 | auto iter = lazyMapping.find(value.asValue());
|
---|
1898 | if (iter != lazyMapping.end())
|
---|
1899 | nodeValue = iter->value;
|
---|
1900 | else {
|
---|
1901 | nodeValue = value.ensureIsNode(
|
---|
1902 | m_insertionSet, m_graph.block(0), 0);
|
---|
1903 | lazyMapping.add(value.asValue(), nodeValue);
|
---|
1904 | }
|
---|
1905 | }
|
---|
1906 |
|
---|
1907 | SSACalculator::Variable* variable = m_locationToVariable.get(location);
|
---|
1908 | m_pointerSSA.newDef(variable, block, nodeValue);
|
---|
1909 | },
|
---|
1910 | [] (PromotedHeapLocation) -> Node* {
|
---|
1911 | return nullptr;
|
---|
1912 | });
|
---|
1913 | }
|
---|
1914 | }
|
---|
1915 | m_insertionSet.execute(m_graph.block(0));
|
---|
1916 |
|
---|
1917 | // Run the SSA calculators to create Phis
|
---|
1918 | m_pointerSSA.computePhis(
|
---|
1919 | [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
|
---|
1920 | PromotedHeapLocation location = indexToLocation[variable->index()];
|
---|
1921 |
|
---|
1922 | // Don't create Phi nodes for fields of dead allocations
|
---|
1923 | if (!m_heapAtHead[block].isAllocation(location.base()))
|
---|
1924 | return nullptr;
|
---|
1925 |
|
---|
1926 | // Don't create Phi nodes once we are escaped
|
---|
1927 | if (m_heapAtHead[block].getAllocation(location.base()).isEscapedAllocation())
|
---|
1928 | return nullptr;
|
---|
1929 |
|
---|
1930 | // If we point to a single allocation, we will
|
---|
1931 | // directly use its materialization
|
---|
1932 | if (m_heapAtHead[block].follow(location))
|
---|
1933 | return nullptr;
|
---|
1934 |
|
---|
1935 | Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
|
---|
1936 | phiNode->mergeFlags(NodeResultJS);
|
---|
1937 | return phiNode;
|
---|
1938 | });
|
---|
1939 |
|
---|
1940 | m_allocationSSA.computePhis(
|
---|
1941 | [&] (SSACalculator::Variable* variable, BasicBlock* block) -> Node* {
|
---|
1942 | Node* identifier = indexToNode[variable->index()];
|
---|
1943 |
|
---|
1944 | // Don't create Phi nodes for dead allocations
|
---|
1945 | if (!m_heapAtHead[block].isAllocation(identifier))
|
---|
1946 | return nullptr;
|
---|
1947 |
|
---|
1948 | // Don't create Phi nodes until we are escaped
|
---|
1949 | if (!m_heapAtHead[block].getAllocation(identifier).isEscapedAllocation())
|
---|
1950 | return nullptr;
|
---|
1951 |
|
---|
1952 | Node* phiNode = m_graph.addNode(SpecHeapTop, Phi, block->at(0)->origin.withInvalidExit());
|
---|
1953 | phiNode->mergeFlags(NodeResultJS);
|
---|
1954 | return phiNode;
|
---|
1955 | });
|
---|
1956 |
|
---|
1957 | // Place Phis in the right places, replace all uses of any load with the appropriate
|
---|
1958 | // value, and create the materialization nodes.
|
---|
1959 | LocalOSRAvailabilityCalculator availabilityCalculator(m_graph);
|
---|
1960 | m_graph.clearReplacements();
|
---|
1961 | for (BasicBlock* block : m_graph.blocksInPreOrder()) {
|
---|
1962 | m_heap = m_heapAtHead[block];
|
---|
1963 | availabilityCalculator.beginBlock(block);
|
---|
1964 |
|
---|
1965 | // These mapping tables are intended to be lazy. If
|
---|
1966 | // something is omitted from the table, it means that
|
---|
1967 | // there haven't been any local stores to the promoted
|
---|
1968 | // heap location (or any local materialization).
|
---|
1969 | m_localMapping.clear();
|
---|
1970 | m_escapeeToMaterialization.clear();
|
---|
1971 |
|
---|
1972 | // Insert the Phi functions that we had previously
|
---|
1973 | // created.
|
---|
1974 | for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(block)) {
|
---|
1975 | SSACalculator::Variable* variable = phiDef->variable();
|
---|
1976 | m_insertionSet.insert(0, phiDef->value());
|
---|
1977 |
|
---|
1978 | PromotedHeapLocation location = indexToLocation[variable->index()];
|
---|
1979 | m_localMapping.set(location, phiDef->value());
|
---|
1980 |
|
---|
1981 | if (m_sinkCandidates.contains(location.base())) {
|
---|
1982 | m_insertionSet.insert(
|
---|
1983 | 0,
|
---|
1984 | location.createHint(
|
---|
1985 | m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
|
---|
1986 | }
|
---|
1987 | }
|
---|
1988 |
|
---|
1989 | for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(block)) {
|
---|
1990 | SSACalculator::Variable* variable = phiDef->variable();
|
---|
1991 | m_insertionSet.insert(0, phiDef->value());
|
---|
1992 |
|
---|
1993 | Node* identifier = indexToNode[variable->index()];
|
---|
1994 | m_escapeeToMaterialization.add(identifier, phiDef->value());
|
---|
1995 | bool canExit = false;
|
---|
1996 | insertOSRHintsForUpdate(
|
---|
1997 | 0, block->at(0)->origin, canExit,
|
---|
1998 | availabilityCalculator.m_availability, identifier, phiDef->value());
|
---|
1999 |
|
---|
2000 | for (PromotedHeapLocation location : hintsForPhi[variable->index()]) {
|
---|
2001 | if (m_heap.isUnescapedAllocation(location.base())) {
|
---|
2002 | m_insertionSet.insert(0,
|
---|
2003 | location.createHint(m_graph, block->at(0)->origin.withInvalidExit(), phiDef->value()));
|
---|
2004 | m_localMapping.set(location, phiDef->value());
|
---|
2005 | }
|
---|
2006 | }
|
---|
2007 | }
|
---|
2008 |
|
---|
2009 | if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
|
---|
2010 | dataLog("Local mapping at ", pointerDump(block), ": ", mapDump(m_localMapping), "\n");
|
---|
2011 | dataLog("Local materializations at ", pointerDump(block), ": ", mapDump(m_escapeeToMaterialization), "\n");
|
---|
2012 | }
|
---|
2013 |
|
---|
2014 | for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {
|
---|
2015 | Node* node = block->at(nodeIndex);
|
---|
2016 | bool canExit = true;
|
---|
2017 | bool nextCanExit = node->origin.exitOK;
|
---|
2018 | for (PromotedHeapLocation location : m_locationsForAllocation.get(node)) {
|
---|
2019 | if (location.kind() != NamedPropertyPLoc)
|
---|
2020 | continue;
|
---|
2021 |
|
---|
2022 | m_localMapping.set(location, m_bottom);
|
---|
2023 |
|
---|
2024 | if (m_sinkCandidates.contains(node)) {
|
---|
2025 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
2026 | dataLog("For sink candidate ", node, " found location ", location, "\n");
|
---|
2027 | m_insertionSet.insert(
|
---|
2028 | nodeIndex + 1,
|
---|
2029 | location.createHint(
|
---|
2030 | m_graph, node->origin.takeValidExit(nextCanExit), m_bottom));
|
---|
2031 | }
|
---|
2032 | }
|
---|
2033 |
|
---|
2034 | for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
|
---|
2035 | materialization->origin.exitOK &= canExit;
|
---|
2036 | Node* escapee = m_materializationToEscapee.get(materialization);
|
---|
2037 | populateMaterialization(block, materialization, escapee);
|
---|
2038 | m_escapeeToMaterialization.set(escapee, materialization);
|
---|
2039 | m_insertionSet.insert(nodeIndex, materialization);
|
---|
2040 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
2041 | dataLog("Materializing ", escapee, " => ", materialization, " at ", node, "\n");
|
---|
2042 | }
|
---|
2043 |
|
---|
2044 | for (PromotedHeapLocation location : m_materializationSiteToRecoveries.get(node))
|
---|
2045 | m_insertionSet.insert(nodeIndex, createRecovery(block, location, node, canExit));
|
---|
2046 | for (std::pair<PromotedHeapLocation, Node*> pair : m_materializationSiteToHints.get(node))
|
---|
2047 | m_insertionSet.insert(nodeIndex, createRecovery(block, pair.first, node, canExit));
|
---|
2048 |
|
---|
2049 | // We need to put the OSR hints after the recoveries,
|
---|
2050 | // because we only want the hints once the object is
|
---|
2051 | // complete
|
---|
2052 | for (Node* materialization : m_materializationSiteToMaterializations.get(node)) {
|
---|
2053 | Node* escapee = m_materializationToEscapee.get(materialization);
|
---|
2054 | insertOSRHintsForUpdate(
|
---|
2055 | nodeIndex, node->origin, canExit,
|
---|
2056 | availabilityCalculator.m_availability, escapee, materialization);
|
---|
2057 | }
|
---|
2058 |
|
---|
2059 | if (node->origin.exitOK && !canExit) {
|
---|
2060 | // We indicate that the exit state is fine now. It is OK because we updated the
|
---|
2061 | // state above. We need to indicate this manually because the validation doesn't
|
---|
2062 | // have enough information to infer that the exit state is fine.
|
---|
2063 | m_insertionSet.insertNode(nodeIndex, SpecNone, ExitOK, node->origin);
|
---|
2064 | }
|
---|
2065 |
|
---|
2066 | if (m_sinkCandidates.contains(node))
|
---|
2067 | m_escapeeToMaterialization.set(node, node);
|
---|
2068 |
|
---|
2069 | availabilityCalculator.executeNode(node);
|
---|
2070 |
|
---|
2071 | bool desiredNextExitOK = node->origin.exitOK && !clobbersExitState(m_graph, node);
|
---|
2072 |
|
---|
2073 | bool doLower = false;
|
---|
2074 | handleNode(
|
---|
2075 | node,
|
---|
2076 | [&] (PromotedHeapLocation location, LazyNode value) {
|
---|
2077 | if (!locations.contains(location))
|
---|
2078 | return;
|
---|
2079 |
|
---|
2080 | Node* nodeValue;
|
---|
2081 | if (value.isNode())
|
---|
2082 | nodeValue = value.asNode();
|
---|
2083 | else
|
---|
2084 | nodeValue = lazyMapping.get(value.asValue());
|
---|
2085 |
|
---|
2086 | nodeValue = resolve(block, nodeValue);
|
---|
2087 |
|
---|
2088 | m_localMapping.set(location, nodeValue);
|
---|
2089 |
|
---|
2090 | if (!m_sinkCandidates.contains(location.base()))
|
---|
2091 | return;
|
---|
2092 |
|
---|
2093 | doLower = true;
|
---|
2094 |
|
---|
2095 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
2096 | dataLog("Creating hint with value ", nodeValue, " before ", node, "\n");
|
---|
2097 | m_insertionSet.insert(
|
---|
2098 | nodeIndex + 1,
|
---|
2099 | location.createHint(
|
---|
2100 | m_graph, node->origin.takeValidExit(nextCanExit), nodeValue));
|
---|
2101 | },
|
---|
2102 | [&] (PromotedHeapLocation location) -> Node* {
|
---|
2103 | return resolve(block, location);
|
---|
2104 | });
|
---|
2105 |
|
---|
2106 | if (!nextCanExit && desiredNextExitOK) {
|
---|
2107 | // We indicate that the exit state is fine now. We need to do this because we
|
---|
2108 | // emitted hints that appear to invalidate the exit state.
|
---|
2109 | m_insertionSet.insertNode(nodeIndex + 1, SpecNone, ExitOK, node->origin);
|
---|
2110 | }
|
---|
2111 |
|
---|
2112 | if (m_sinkCandidates.contains(node) || doLower) {
|
---|
2113 | switch (node->op()) {
|
---|
2114 | case NewObject:
|
---|
2115 | node->convertToPhantomNewObject();
|
---|
2116 | break;
|
---|
2117 |
|
---|
2118 | case NewFunction:
|
---|
2119 | node->convertToPhantomNewFunction();
|
---|
2120 | break;
|
---|
2121 |
|
---|
2122 | case NewGeneratorFunction:
|
---|
2123 | node->convertToPhantomNewGeneratorFunction();
|
---|
2124 | break;
|
---|
2125 |
|
---|
2126 | case NewAsyncGeneratorFunction:
|
---|
2127 | node->convertToPhantomNewAsyncGeneratorFunction();
|
---|
2128 | break;
|
---|
2129 |
|
---|
2130 | case NewAsyncFunction:
|
---|
2131 | node->convertToPhantomNewAsyncFunction();
|
---|
2132 | break;
|
---|
2133 |
|
---|
2134 | case NewInternalFieldObject:
|
---|
2135 | node->convertToPhantomNewInternalFieldObject();
|
---|
2136 | break;
|
---|
2137 |
|
---|
2138 | case CreateActivation:
|
---|
2139 | node->convertToPhantomCreateActivation();
|
---|
2140 | break;
|
---|
2141 |
|
---|
2142 | case NewRegexp:
|
---|
2143 | node->convertToPhantomNewRegexp();
|
---|
2144 | break;
|
---|
2145 |
|
---|
2146 | default:
|
---|
2147 | node->remove(m_graph);
|
---|
2148 | break;
|
---|
2149 | }
|
---|
2150 | }
|
---|
2151 |
|
---|
2152 | m_graph.doToChildren(
|
---|
2153 | node,
|
---|
2154 | [&] (Edge& edge) {
|
---|
2155 | edge.setNode(resolve(block, edge.node()));
|
---|
2156 | });
|
---|
2157 | }
|
---|
2158 |
|
---|
2159 | // Gotta drop some Upsilons.
|
---|
2160 | NodeAndIndex terminal = block->findTerminal();
|
---|
2161 | size_t upsilonInsertionPoint = terminal.index;
|
---|
2162 | NodeOrigin upsilonOrigin = terminal.node->origin;
|
---|
2163 | for (BasicBlock* successorBlock : block->successors()) {
|
---|
2164 | for (SSACalculator::Def* phiDef : m_pointerSSA.phisForBlock(successorBlock)) {
|
---|
2165 | Node* phiNode = phiDef->value();
|
---|
2166 | SSACalculator::Variable* variable = phiDef->variable();
|
---|
2167 | PromotedHeapLocation location = indexToLocation[variable->index()];
|
---|
2168 | Node* incoming = resolve(block, location);
|
---|
2169 |
|
---|
2170 | m_insertionSet.insertNode(
|
---|
2171 | upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
|
---|
2172 | OpInfo(phiNode), incoming->defaultEdge());
|
---|
2173 | }
|
---|
2174 |
|
---|
2175 | for (SSACalculator::Def* phiDef : m_allocationSSA.phisForBlock(successorBlock)) {
|
---|
2176 | Node* phiNode = phiDef->value();
|
---|
2177 | SSACalculator::Variable* variable = phiDef->variable();
|
---|
2178 | Node* incoming = getMaterialization(block, indexToNode[variable->index()]);
|
---|
2179 |
|
---|
2180 | m_insertionSet.insertNode(
|
---|
2181 | upsilonInsertionPoint, SpecNone, Upsilon, upsilonOrigin,
|
---|
2182 | OpInfo(phiNode), incoming->defaultEdge());
|
---|
2183 | }
|
---|
2184 | }
|
---|
2185 |
|
---|
2186 | m_insertionSet.execute(block);
|
---|
2187 | }
|
---|
2188 | }
|
---|
2189 |
|
---|
2190 | NEVER_INLINE Node* resolve(BasicBlock* block, PromotedHeapLocation location)
|
---|
2191 | {
|
---|
2192 | // If we are currently pointing to a single local allocation,
|
---|
2193 | // simply return the associated materialization.
|
---|
2194 | if (Node* identifier = m_heap.follow(location))
|
---|
2195 | return getMaterialization(block, identifier);
|
---|
2196 |
|
---|
2197 | if (Node* result = m_localMapping.get(location))
|
---|
2198 | return result;
|
---|
2199 |
|
---|
2200 | // This implies that there is no local mapping. Find a non-local mapping.
|
---|
2201 | SSACalculator::Def* def = m_pointerSSA.nonLocalReachingDef(
|
---|
2202 | block, m_locationToVariable.get(location));
|
---|
2203 | ASSERT(def);
|
---|
2204 | ASSERT(def->value());
|
---|
2205 |
|
---|
2206 | Node* result = def->value();
|
---|
2207 | if (result->replacement())
|
---|
2208 | result = result->replacement();
|
---|
2209 | ASSERT(!result->replacement());
|
---|
2210 |
|
---|
2211 | m_localMapping.add(location, result);
|
---|
2212 | return result;
|
---|
2213 | }
|
---|
2214 |
|
---|
2215 | NEVER_INLINE Node* resolve(BasicBlock* block, Node* node)
|
---|
2216 | {
|
---|
2217 | // If we are currently pointing to a single local allocation,
|
---|
2218 | // simply return the associated materialization.
|
---|
2219 | if (Node* identifier = m_heap.follow(node))
|
---|
2220 | return getMaterialization(block, identifier);
|
---|
2221 |
|
---|
2222 | if (node->replacement())
|
---|
2223 | node = node->replacement();
|
---|
2224 | ASSERT(!node->replacement());
|
---|
2225 |
|
---|
2226 | return node;
|
---|
2227 | }
|
---|
2228 |
|
---|
2229 | NEVER_INLINE Node* getMaterialization(BasicBlock* block, Node* identifier)
|
---|
2230 | {
|
---|
2231 | ASSERT(m_heap.isAllocation(identifier));
|
---|
2232 | if (!m_sinkCandidates.contains(identifier))
|
---|
2233 | return identifier;
|
---|
2234 |
|
---|
2235 | if (Node* materialization = m_escapeeToMaterialization.get(identifier))
|
---|
2236 | return materialization;
|
---|
2237 |
|
---|
2238 | SSACalculator::Def* def = m_allocationSSA.nonLocalReachingDef(
|
---|
2239 | block, m_nodeToVariable.get(identifier));
|
---|
2240 | ASSERT(def && def->value());
|
---|
2241 | m_escapeeToMaterialization.add(identifier, def->value());
|
---|
2242 | ASSERT(!def->value()->replacement());
|
---|
2243 | return def->value();
|
---|
2244 | }
|
---|
2245 |
|
---|
2246 | void insertOSRHintsForUpdate(unsigned nodeIndex, NodeOrigin origin, bool& canExit, AvailabilityMap& availability, Node* escapee, Node* materialization)
|
---|
2247 | {
|
---|
2248 | if (DFGObjectAllocationSinkingPhaseInternal::verbose) {
|
---|
2249 | dataLog("Inserting OSR hints at ", origin, ":\n");
|
---|
2250 | dataLog(" Escapee: ", escapee, "\n");
|
---|
2251 | dataLog(" Materialization: ", materialization, "\n");
|
---|
2252 | dataLog(" Availability: ", availability, "\n");
|
---|
2253 | }
|
---|
2254 |
|
---|
2255 | // We need to follow() the value in the heap.
|
---|
2256 | // Consider the following graph:
|
---|
2257 | //
|
---|
2258 | // Block #0
|
---|
2259 | // 0: NewObject({})
|
---|
2260 | // 1: NewObject({})
|
---|
2261 | // -: PutByOffset(@0, @1, x:0)
|
---|
2262 | // -: PutStructure(@0, {x:0})
|
---|
2263 | // 2: GetByOffset(@0, x:0)
|
---|
2264 | // -: MovHint(@2, loc1)
|
---|
2265 | // -: Branch(#1, #2)
|
---|
2266 | //
|
---|
2267 | // Block #1
|
---|
2268 | // 3: Call(f, @1)
|
---|
2269 | // 4: Return(@0)
|
---|
2270 | //
|
---|
2271 | // Block #2
|
---|
2272 | // -: Return(undefined)
|
---|
2273 | //
|
---|
2274 | // We need to materialize @1 at @3, and when doing so we need
|
---|
2275 | // to insert a MovHint for the materialization into loc1 as
|
---|
2276 | // well.
|
---|
2277 | // In order to do this, we say that we need to insert an
|
---|
2278 | // update hint for any availability whose node resolve()s to
|
---|
2279 | // the materialization.
|
---|
2280 | for (auto entry : availability.m_heap) {
|
---|
2281 | if (!entry.value.hasNode())
|
---|
2282 | continue;
|
---|
2283 | if (m_heap.follow(entry.value.node()) != escapee)
|
---|
2284 | continue;
|
---|
2285 |
|
---|
2286 | m_insertionSet.insert(
|
---|
2287 | nodeIndex,
|
---|
2288 | entry.key.createHint(m_graph, origin.takeValidExit(canExit), materialization));
|
---|
2289 | }
|
---|
2290 |
|
---|
2291 | for (unsigned i = availability.m_locals.size(); i--;) {
|
---|
2292 | if (!availability.m_locals[i].hasNode())
|
---|
2293 | continue;
|
---|
2294 | if (m_heap.follow(availability.m_locals[i].node()) != escapee)
|
---|
2295 | continue;
|
---|
2296 |
|
---|
2297 | Operand operand = availability.m_locals.operandForIndex(i);
|
---|
2298 | m_insertionSet.insertNode(
|
---|
2299 | nodeIndex, SpecNone, MovHint, origin.takeValidExit(canExit), OpInfo(operand),
|
---|
2300 | materialization->defaultEdge());
|
---|
2301 | }
|
---|
2302 | }
|
---|
2303 |
|
---|
2304 | void populateMaterialization(BasicBlock* block, Node* node, Node* escapee)
|
---|
2305 | {
|
---|
2306 | Allocation& allocation = m_heap.getAllocation(escapee);
|
---|
2307 | switch (node->op()) {
|
---|
2308 | case MaterializeNewObject: {
|
---|
2309 | ObjectMaterializationData& data = node->objectMaterializationData();
|
---|
2310 | unsigned firstChild = m_graph.m_varArgChildren.size();
|
---|
2311 |
|
---|
2312 | Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
|
---|
2313 |
|
---|
2314 | PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
|
---|
2315 | ASSERT(locations.contains(structure));
|
---|
2316 |
|
---|
2317 | m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
|
---|
2318 |
|
---|
2319 | for (PromotedHeapLocation location : locations) {
|
---|
2320 | switch (location.kind()) {
|
---|
2321 | case StructurePLoc:
|
---|
2322 | ASSERT(location == structure);
|
---|
2323 | break;
|
---|
2324 |
|
---|
2325 | case NamedPropertyPLoc: {
|
---|
2326 | ASSERT(location.base() == allocation.identifier());
|
---|
2327 | data.m_properties.append(location.descriptor());
|
---|
2328 | Node* value = resolve(block, location);
|
---|
2329 | if (m_sinkCandidates.contains(value))
|
---|
2330 | m_graph.m_varArgChildren.append(m_bottom);
|
---|
2331 | else
|
---|
2332 | m_graph.m_varArgChildren.append(value);
|
---|
2333 | break;
|
---|
2334 | }
|
---|
2335 |
|
---|
2336 | default:
|
---|
2337 | DFG_CRASH(m_graph, node, "Bad location kind");
|
---|
2338 | }
|
---|
2339 | }
|
---|
2340 |
|
---|
2341 | node->children = AdjacencyList(
|
---|
2342 | AdjacencyList::Variable,
|
---|
2343 | firstChild, m_graph.m_varArgChildren.size() - firstChild);
|
---|
2344 | break;
|
---|
2345 | }
|
---|
2346 |
|
---|
2347 | case MaterializeCreateActivation: {
|
---|
2348 | ObjectMaterializationData& data = node->objectMaterializationData();
|
---|
2349 |
|
---|
2350 | unsigned firstChild = m_graph.m_varArgChildren.size();
|
---|
2351 |
|
---|
2352 | Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
|
---|
2353 |
|
---|
2354 | PromotedHeapLocation symbolTable(ActivationSymbolTablePLoc, allocation.identifier());
|
---|
2355 | ASSERT(locations.contains(symbolTable));
|
---|
2356 | ASSERT(node->cellOperand() == resolve(block, symbolTable)->constant());
|
---|
2357 | m_graph.m_varArgChildren.append(Edge(resolve(block, symbolTable), KnownCellUse));
|
---|
2358 |
|
---|
2359 | PromotedHeapLocation scope(ActivationScopePLoc, allocation.identifier());
|
---|
2360 | ASSERT(locations.contains(scope));
|
---|
2361 | m_graph.m_varArgChildren.append(Edge(resolve(block, scope), KnownCellUse));
|
---|
2362 |
|
---|
2363 | for (PromotedHeapLocation location : locations) {
|
---|
2364 | switch (location.kind()) {
|
---|
2365 | case ActivationScopePLoc: {
|
---|
2366 | ASSERT(location == scope);
|
---|
2367 | break;
|
---|
2368 | }
|
---|
2369 |
|
---|
2370 | case ActivationSymbolTablePLoc: {
|
---|
2371 | ASSERT(location == symbolTable);
|
---|
2372 | break;
|
---|
2373 | }
|
---|
2374 |
|
---|
2375 | case ClosureVarPLoc: {
|
---|
2376 | ASSERT(location.base() == allocation.identifier());
|
---|
2377 | data.m_properties.append(location.descriptor());
|
---|
2378 | Node* value = resolve(block, location);
|
---|
2379 | if (m_sinkCandidates.contains(value))
|
---|
2380 | m_graph.m_varArgChildren.append(m_bottom);
|
---|
2381 | else
|
---|
2382 | m_graph.m_varArgChildren.append(value);
|
---|
2383 | break;
|
---|
2384 | }
|
---|
2385 |
|
---|
2386 | default:
|
---|
2387 | DFG_CRASH(m_graph, node, "Bad location kind");
|
---|
2388 | }
|
---|
2389 | }
|
---|
2390 |
|
---|
2391 | node->children = AdjacencyList(
|
---|
2392 | AdjacencyList::Variable,
|
---|
2393 | firstChild, m_graph.m_varArgChildren.size() - firstChild);
|
---|
2394 | break;
|
---|
2395 | }
|
---|
2396 |
|
---|
2397 | case NewFunction:
|
---|
2398 | case NewGeneratorFunction:
|
---|
2399 | case NewAsyncGeneratorFunction:
|
---|
2400 | case NewAsyncFunction: {
|
---|
2401 | Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
|
---|
2402 | ASSERT(locations.size() == 2);
|
---|
2403 |
|
---|
2404 | PromotedHeapLocation executable(FunctionExecutablePLoc, allocation.identifier());
|
---|
2405 | ASSERT_UNUSED(executable, locations.contains(executable));
|
---|
2406 |
|
---|
2407 | PromotedHeapLocation activation(FunctionActivationPLoc, allocation.identifier());
|
---|
2408 | ASSERT(locations.contains(activation));
|
---|
2409 |
|
---|
2410 | node->child1() = Edge(resolve(block, activation), KnownCellUse);
|
---|
2411 | break;
|
---|
2412 | }
|
---|
2413 |
|
---|
2414 | case MaterializeNewInternalFieldObject: {
|
---|
2415 | ObjectMaterializationData& data = node->objectMaterializationData();
|
---|
2416 |
|
---|
2417 | unsigned firstChild = m_graph.m_varArgChildren.size();
|
---|
2418 |
|
---|
2419 | Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
|
---|
2420 |
|
---|
2421 | PromotedHeapLocation structure(StructurePLoc, allocation.identifier());
|
---|
2422 | ASSERT(locations.contains(structure));
|
---|
2423 | m_graph.m_varArgChildren.append(Edge(resolve(block, structure), KnownCellUse));
|
---|
2424 |
|
---|
2425 | for (PromotedHeapLocation location : locations) {
|
---|
2426 | switch (location.kind()) {
|
---|
2427 | case StructurePLoc: {
|
---|
2428 | ASSERT(location == structure);
|
---|
2429 | break;
|
---|
2430 | }
|
---|
2431 |
|
---|
2432 | case InternalFieldObjectPLoc: {
|
---|
2433 | ASSERT(location.base() == allocation.identifier());
|
---|
2434 | data.m_properties.append(location.descriptor());
|
---|
2435 | Node* value = resolve(block, location);
|
---|
2436 | if (m_sinkCandidates.contains(value))
|
---|
2437 | m_graph.m_varArgChildren.append(m_bottom);
|
---|
2438 | else
|
---|
2439 | m_graph.m_varArgChildren.append(value);
|
---|
2440 | break;
|
---|
2441 | }
|
---|
2442 |
|
---|
2443 | default:
|
---|
2444 | DFG_CRASH(m_graph, node, "Bad location kind");
|
---|
2445 | }
|
---|
2446 | }
|
---|
2447 |
|
---|
2448 | node->children = AdjacencyList(
|
---|
2449 | AdjacencyList::Variable,
|
---|
2450 | firstChild, m_graph.m_varArgChildren.size() - firstChild);
|
---|
2451 | break;
|
---|
2452 | }
|
---|
2453 |
|
---|
2454 | case NewRegexp: {
|
---|
2455 | Vector<PromotedHeapLocation> locations = m_locationsForAllocation.get(escapee);
|
---|
2456 | ASSERT(locations.size() == 2);
|
---|
2457 |
|
---|
2458 | PromotedHeapLocation regExp(RegExpObjectRegExpPLoc, allocation.identifier());
|
---|
2459 | ASSERT_UNUSED(regExp, locations.contains(regExp));
|
---|
2460 |
|
---|
2461 | PromotedHeapLocation lastIndex(RegExpObjectLastIndexPLoc, allocation.identifier());
|
---|
2462 | ASSERT(locations.contains(lastIndex));
|
---|
2463 | Node* value = resolve(block, lastIndex);
|
---|
2464 | if (m_sinkCandidates.contains(value))
|
---|
2465 | node->child1() = Edge(m_bottom);
|
---|
2466 | else
|
---|
2467 | node->child1() = Edge(value);
|
---|
2468 | break;
|
---|
2469 | }
|
---|
2470 |
|
---|
2471 | default:
|
---|
2472 | DFG_CRASH(m_graph, node, "Bad materialize op");
|
---|
2473 | }
|
---|
2474 | }
|
---|
2475 |
|
---|
2476 | Node* createRecovery(BasicBlock* block, PromotedHeapLocation location, Node* where, bool& canExit)
|
---|
2477 | {
|
---|
2478 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
2479 | dataLog("Recovering ", location, " at ", where, "\n");
|
---|
2480 | ASSERT(location.base()->isPhantomAllocation());
|
---|
2481 | Node* base = getMaterialization(block, location.base());
|
---|
2482 | Node* value = resolve(block, location);
|
---|
2483 |
|
---|
2484 | NodeOrigin origin = where->origin.withSemantic(base->origin.semantic);
|
---|
2485 |
|
---|
2486 | if (DFGObjectAllocationSinkingPhaseInternal::verbose)
|
---|
2487 | dataLog("Base is ", base, " and value is ", value, "\n");
|
---|
2488 |
|
---|
2489 | if (base->isPhantomAllocation()) {
|
---|
2490 | return PromotedHeapLocation(base, location.descriptor()).createHint(
|
---|
2491 | m_graph, origin.takeValidExit(canExit), value);
|
---|
2492 | }
|
---|
2493 |
|
---|
2494 | switch (location.kind()) {
|
---|
2495 | case NamedPropertyPLoc: {
|
---|
2496 | Allocation& allocation = m_heap.getAllocation(location.base());
|
---|
2497 |
|
---|
2498 | unsigned identifierNumber = location.info();
|
---|
2499 | UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber];
|
---|
2500 |
|
---|
2501 | Vector<RegisteredStructure> structures;
|
---|
2502 | for (RegisteredStructure structure : allocation.structuresForMaterialization()) {
|
---|
2503 | // This structure set is conservative. This set can include Structure which does not have a legit property.
|
---|
2504 | // We filter out such an apparently inappropriate structures here since MultiPutByOffset assumes all the structures
|
---|
2505 | // have valid corresponding offset for the given property.
|
---|
2506 | if (structure->getConcurrently(uid) != invalidOffset)
|
---|
2507 | structures.append(structure);
|
---|
2508 | }
|
---|
2509 |
|
---|
2510 | // Since we filter structures, it is possible that we no longer have any structures here. In this case, we emit ForceOSRExit.
|
---|
2511 | if (structures.isEmpty())
|
---|
2512 | return m_graph.addNode(ForceOSRExit, origin.takeValidExit(canExit));
|
---|
2513 |
|
---|
2514 | std::sort(
|
---|
2515 | structures.begin(),
|
---|
2516 | structures.end(),
|
---|
2517 | [uid] (RegisteredStructure a, RegisteredStructure b) -> bool {
|
---|
2518 | return a->getConcurrently(uid) < b->getConcurrently(uid);
|
---|
2519 | });
|
---|
2520 |
|
---|
2521 | RELEASE_ASSERT(structures.size());
|
---|
2522 | PropertyOffset firstOffset = structures[0]->getConcurrently(uid);
|
---|
2523 |
|
---|
2524 | if (firstOffset == structures.last()->getConcurrently(uid)) {
|
---|
2525 | Node* storage = base;
|
---|
2526 | // FIXME: When we decide to sink objects with a
|
---|
2527 | // property storage, we should handle non-inline offsets.
|
---|
2528 | RELEASE_ASSERT(isInlineOffset(firstOffset));
|
---|
2529 |
|
---|
2530 | StorageAccessData* data = m_graph.m_storageAccessData.add();
|
---|
2531 | data->offset = firstOffset;
|
---|
2532 | data->identifierNumber = identifierNumber;
|
---|
2533 |
|
---|
2534 | return m_graph.addNode(
|
---|
2535 | PutByOffset,
|
---|
2536 | origin.takeValidExit(canExit),
|
---|
2537 | OpInfo(data),
|
---|
2538 | Edge(storage, KnownCellUse),
|
---|
2539 | Edge(base, KnownCellUse),
|
---|
2540 | value->defaultEdge());
|
---|
2541 | }
|
---|
2542 |
|
---|
2543 | MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add();
|
---|
2544 | data->identifierNumber = identifierNumber;
|
---|
2545 |
|
---|
2546 | {
|
---|
2547 | PropertyOffset currentOffset = firstOffset;
|
---|
2548 | StructureSet currentSet;
|
---|
2549 | for (RegisteredStructure structure : structures) {
|
---|
2550 | PropertyOffset offset = structure->getConcurrently(uid);
|
---|
2551 | if (offset != currentOffset) {
|
---|
2552 | // Because our analysis treats MultiPutByOffset like an escape, we only have to
|
---|
2553 | // deal with storing results that would have been previously stored by PutByOffset
|
---|
2554 | // nodes. Those nodes were guarded by the appropriate type checks. This means that
|
---|
2555 | // at this point, we can simply trust that the incoming value has the right type
|
---|
2556 | // for whatever structure we are using.
|
---|
2557 | data->variants.append(PutByVariant::replace(nullptr, currentSet, currentOffset));
|
---|
2558 | currentOffset = offset;
|
---|
2559 | currentSet.clear();
|
---|
2560 | }
|
---|
2561 | currentSet.add(structure.get());
|
---|
2562 | }
|
---|
2563 | data->variants.append(PutByVariant::replace(nullptr, currentSet, currentOffset));
|
---|
2564 | }
|
---|
2565 |
|
---|
2566 | return m_graph.addNode(
|
---|
2567 | MultiPutByOffset,
|
---|
2568 | origin.takeValidExit(canExit),
|
---|
2569 | OpInfo(data),
|
---|
2570 | Edge(base, KnownCellUse),
|
---|
2571 | value->defaultEdge());
|
---|
2572 | }
|
---|
2573 |
|
---|
2574 | case ClosureVarPLoc: {
|
---|
2575 | return m_graph.addNode(
|
---|
2576 | PutClosureVar,
|
---|
2577 | origin.takeValidExit(canExit),
|
---|
2578 | OpInfo(location.info()),
|
---|
2579 | Edge(base, KnownCellUse),
|
---|
2580 | value->defaultEdge());
|
---|
2581 | }
|
---|
2582 |
|
---|
2583 | case InternalFieldObjectPLoc: {
|
---|
2584 | return m_graph.addNode(
|
---|
2585 | PutInternalField,
|
---|
2586 | origin.takeValidExit(canExit),
|
---|
2587 | OpInfo(location.info()),
|
---|
2588 | Edge(base, KnownCellUse),
|
---|
2589 | value->defaultEdge());
|
---|
2590 | }
|
---|
2591 |
|
---|
2592 | case RegExpObjectLastIndexPLoc: {
|
---|
2593 | return m_graph.addNode(
|
---|
2594 | SetRegExpObjectLastIndex,
|
---|
2595 | origin.takeValidExit(canExit),
|
---|
2596 | OpInfo(true),
|
---|
2597 | Edge(base, KnownCellUse),
|
---|
2598 | value->defaultEdge());
|
---|
2599 | }
|
---|
2600 |
|
---|
2601 | default:
|
---|
2602 | DFG_CRASH(m_graph, base, "Bad location kind");
|
---|
2603 | break;
|
---|
2604 | }
|
---|
2605 |
|
---|
2606 | RELEASE_ASSERT_NOT_REACHED();
|
---|
2607 | }
|
---|
2608 |
|
---|
2609 | void removeICStatusFilters()
|
---|
2610 | {
|
---|
2611 | for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {
|
---|
2612 | for (Node* node : *block) {
|
---|
2613 | switch (node->op()) {
|
---|
2614 | case FilterCallLinkStatus:
|
---|
2615 | case FilterGetByStatus:
|
---|
2616 | case FilterPutByStatus:
|
---|
2617 | case FilterInByStatus:
|
---|
2618 | case FilterDeleteByStatus:
|
---|
2619 | case FilterCheckPrivateBrandStatus:
|
---|
2620 | case FilterSetPrivateBrandStatus:
|
---|
2621 | if (node->child1()->isPhantomAllocation())
|
---|
2622 | node->removeWithoutChecks();
|
---|
2623 | break;
|
---|
2624 | default:
|
---|
2625 | break;
|
---|
2626 | }
|
---|
2627 | }
|
---|
2628 | }
|
---|
2629 | }
|
---|
2630 |
|
---|
2631 | // This is a great way of asking value->isStillValid() without having to worry about getting
|
---|
2632 | // different answers. It turns out that this analysis works OK regardless of what this
|
---|
2633 | // returns but breaks badly if this changes its mind for any particular InferredValue. This
|
---|
2634 | // method protects us from that.
|
---|
2635 | bool isStillValid(SymbolTable* value)
|
---|
2636 | {
|
---|
2637 | return m_validInferredValues.add(value, value->singleton().isStillValid()).iterator->value;
|
---|
2638 | }
|
---|
2639 |
|
---|
2640 | bool isStillValid(FunctionExecutable* value)
|
---|
2641 | {
|
---|
2642 | return m_validInferredValues.add(value, value->singleton().isStillValid()).iterator->value;
|
---|
2643 | }
|
---|
2644 |
|
---|
2645 |
|
---|
2646 | SSACalculator m_pointerSSA;
|
---|
2647 | SSACalculator m_allocationSSA;
|
---|
2648 | NodeSet m_sinkCandidates;
|
---|
2649 | HashMap<PromotedHeapLocation, SSACalculator::Variable*> m_locationToVariable;
|
---|
2650 | HashMap<Node*, SSACalculator::Variable*> m_nodeToVariable;
|
---|
2651 | HashMap<PromotedHeapLocation, Node*> m_localMapping;
|
---|
2652 | HashMap<Node*, Node*> m_escapeeToMaterialization;
|
---|
2653 | InsertionSet m_insertionSet;
|
---|
2654 | CombinedLiveness m_combinedLiveness;
|
---|
2655 |
|
---|
2656 | HashMap<JSCell*, bool> m_validInferredValues;
|
---|
2657 |
|
---|
2658 | HashMap<Node*, Node*> m_materializationToEscapee;
|
---|
2659 | HashMap<Node*, Vector<Node*>> m_materializationSiteToMaterializations;
|
---|
2660 | HashMap<Node*, Vector<PromotedHeapLocation>> m_materializationSiteToRecoveries;
|
---|
2661 | HashMap<Node*, Vector<std::pair<PromotedHeapLocation, Node*>>> m_materializationSiteToHints;
|
---|
2662 |
|
---|
2663 | HashMap<Node*, Vector<PromotedHeapLocation>> m_locationsForAllocation;
|
---|
2664 |
|
---|
2665 | BlockMap<LocalHeap> m_heapAtHead;
|
---|
2666 | BlockMap<LocalHeap> m_heapAtTail;
|
---|
2667 | LocalHeap m_heap;
|
---|
2668 |
|
---|
2669 | Node* m_bottom = nullptr;
|
---|
2670 | };
|
---|
2671 |
|
---|
2672 | }
|
---|
2673 |
|
---|
2674 | bool performObjectAllocationSinking(Graph& graph)
|
---|
2675 | {
|
---|
2676 | return runPhase<ObjectAllocationSinkingPhase>(graph);
|
---|
2677 | }
|
---|
2678 |
|
---|
2679 | } } // namespace JSC::DFG
|
---|
2680 |
|
---|
2681 | #endif // ENABLE(DFG_JIT)
|
---|