Changeset 172129 in webkit for trunk/Source/JavaScriptCore/dfg
- Timestamp:
- Aug 5, 2014, 10:27:46 PM (11 years ago)
- Location:
- trunk/Source/JavaScriptCore/dfg
- Files:
-
- 6 added
- 31 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/dfg/DFGAbstractHeap.h
r171380 r172129 1 1 /* 2 * Copyright (C) 2013 Apple Inc. All rights reserved.2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 43 43 macro(InvalidAbstractHeap) \ 44 44 macro(World) \ 45 macro(Arguments_numArguments) \46 macro(Arguments_overrideLength) \47 45 macro(Arguments_registers) \ 48 macro(Arguments_slowArguments) \49 macro(ArrayBuffer_data) \50 macro(Butterfly_arrayBuffer) \51 46 macro(Butterfly_publicLength) \ 52 47 macro(Butterfly_vectorLength) \ 53 48 macro(GetterSetter_getter) \ 54 49 macro(GetterSetter_setter) \ 55 macro(JSArrayBufferView_length) \56 macro(JSArrayBufferView_mode) \57 macro(JSArrayBufferView_vector) \58 50 macro(JSCell_structureID) \ 59 51 macro(JSCell_indexingType) \ 60 52 macro(JSCell_typeInfoFlags) \ 61 53 macro(JSCell_typeInfoType) \ 62 macro(JSFunction_executable) \63 macro(JSFunction_scopeChain) \64 54 macro(JSObject_butterfly) \ 65 55 macro(JSVariableObject_registers) \ … … 68 58 macro(IndexedDoubleProperties) \ 69 59 macro(IndexedContiguousProperties) \ 60 macro(IndexedArrayStorageProperties) \ 70 61 macro(ArrayStorageProperties) \ 71 62 macro(Variables) \ … … 77 68 /* Use this for writes only, to indicate that this may fire watchpoints. Usually this is never directly written but instead we test to see if a node clobbers this; it just so happens that you have to write world to clobber it. */\ 78 69 macro(Watchpoint_fire) \ 79 /* Use th isfor reads only, just to indicate that if the world got clobbered, then this operation will not work. */\70 /* Use these for reads only, just to indicate that if the world got clobbered, then this operation will not work. */\ 80 71 macro(MiscFields) \ 81 72 /* Use this for writes only, just to indicate that hoisting the node is invalid. This works because we don't hoist anything that has any side effects at all. */\ … … 208 199 } 209 200 210 bool isDisjoint(const AbstractHeap& other) 201 bool isDisjoint(const AbstractHeap& other) const 211 202 { 212 203 ASSERT(kind() != InvalidAbstractHeap); … … 221 212 } 222 213 223 bool overlaps(const AbstractHeap& other) 214 bool overlaps(const AbstractHeap& other) const 224 215 { 225 216 return !isDisjoint(other); -
trunk/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
r171660 r172129 31 31 #include "DFGAbstractInterpreter.h" 32 32 #include "GetByIdStatus.h" 33 #include "GetterSetter.h" 33 34 #include "Operations.h" 34 35 #include "PutByIdStatus.h" … … 1355 1356 1356 1357 case GetCallee: 1357 case GetGetter:1358 case GetSetter:1359 1358 forNode(node).setType(SpecFunction); 1360 1359 break; 1360 1361 case GetGetter: { 1362 JSValue base = forNode(node->child1()).m_value; 1363 if (base) { 1364 if (JSObject* getter = jsCast<GetterSetter*>(base)->getterConcurrently()) { 1365 setConstant(node, *m_graph.freeze(getter)); 1366 break; 1367 } 1368 } 1369 1370 forNode(node).setType(SpecObject); 1371 break; 1372 } 1373 1374 case GetSetter: { 1375 JSValue base = forNode(node->child1()).m_value; 1376 if (base) { 1377 if (JSObject* setter = jsCast<GetterSetter*>(base)->setterConcurrently()) { 1378 setConstant(node, *m_graph.freeze(setter)); 1379 break; 1380 } 1381 } 1382 1383 forNode(node).setType(SpecObject); 1384 break; 1385 } 1361 1386 1362 1387 case GetScope: // FIXME: We could get rid of these if we know that the JSFunction is a constant. https://bugs.webkit.org/show_bug.cgi?id=106202 … … 1406 1431 AbstractValue result; 1407 1432 for (unsigned i = status.numVariants(); i--;) { 1408 if (!status[i].specificValue()) { 1433 DFG_ASSERT(m_graph, node, !status[i].alternateBase()); 1434 JSValue constantResult = 1435 m_graph.tryGetConstantProperty(value, status[i].offset()); 1436 if (!constantResult) { 1409 1437 result.makeHeapTop(); 1410 1438 break; … … 1413 1441 AbstractValue thisResult; 1414 1442 thisResult.set( 1415 m_graph, *m_graph.freeze( status[i].specificValue()),1443 m_graph, *m_graph.freeze(constantResult), 1416 1444 m_state.structureClobberState()); 1417 1445 result.merge(thisResult); … … 1588 1616 1589 1617 case GetByOffset: { 1618 StorageAccessData data = m_graph.m_storageAccessData[node->storageAccessDataIndex()]; 1619 JSValue result = m_graph.tryGetConstantProperty(forNode(node->child2()), data.offset); 1620 if (result) { 1621 setConstant(node, *m_graph.freeze(result)); 1622 break; 1623 } 1624 1590 1625 forNode(node).makeHeapTop(); 1591 1626 break; … … 1593 1628 1594 1629 case GetGetterSetterByOffset: { 1630 StorageAccessData data = m_graph.m_storageAccessData[node->storageAccessDataIndex()]; 1631 JSValue result = m_graph.tryGetConstantProperty(forNode(node->child2()), data.offset); 1632 if (result && jsDynamicCast<GetterSetter*>(result)) { 1633 setConstant(node, *m_graph.freeze(result)); 1634 break; 1635 } 1636 1595 1637 forNode(node).set(m_graph, m_graph.m_vm.getterSetterStructure.get()); 1596 1638 break; … … 1621 1663 continue; 1622 1664 baseSet.merge(set); 1623 if (!variant.specificValue()) { 1665 1666 JSValue baseForLoad; 1667 if (variant.alternateBase()) 1668 baseForLoad = variant.alternateBase(); 1669 else 1670 baseForLoad = base.m_value; 1671 JSValue constantResult = 1672 m_graph.tryGetConstantProperty( 1673 baseForLoad, variant.baseStructure(), variant.offset()); 1674 if (!constantResult) { 1624 1675 result.makeHeapTop(); 1625 1676 continue; … … 1628 1679 thisResult.set( 1629 1680 m_graph, 1630 *m_graph.freeze( variant.specificValue()),1681 *m_graph.freeze(constantResult), 1631 1682 m_state.structureClobberState()); 1632 1683 result.merge(thisResult); -
trunk/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
r171613 r172129 53 53 } 54 54 55 AdjacencyList(Kind kind, Edge child1, Edge child2 , Edge child3)55 AdjacencyList(Kind kind, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) 56 56 { 57 57 ASSERT_UNUSED(kind, kind == Fixed); … … 133 133 setChild(Size - 1, Edge()); 134 134 } 135 135 136 136 unsigned firstChild() const 137 137 { … … 152 152 } 153 153 154 AdjacencyList sanitized() const 155 { 156 return AdjacencyList(Fixed, child1().sanitized(), child2().sanitized(), child3().sanitized()); 157 } 158 159 unsigned hash() const 160 { 161 unsigned result = 0; 162 if (!child1()) 163 return result; 164 165 result += child1().hash(); 166 167 if (!child2()) 168 return result; 169 170 result *= 3; 171 result += child2().hash(); 172 173 if (!child3()) 174 return result; 175 176 result *= 3; 177 result += child3().hash(); 178 179 return result; 180 } 181 182 bool operator==(const AdjacencyList& other) const 183 { 184 return child1() == other.child1() 185 && child2() == other.child2() 186 && child3() == other.child3(); 187 } 188 154 189 private: 155 190 Edge m_words[Size]; -
trunk/Source/JavaScriptCore/dfg/DFGBasicBlock.h
r171613 r172129 172 172 ~SSAData(); 173 173 }; 174 OwnPtr<SSAData> ssa;175 174 std::unique_ptr<SSAData> ssa; 175 176 176 private: 177 177 friend class InsertionSet; -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r171660 r172129 172 172 void handleCall( 173 173 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, 174 Node* callTarget, int argCount, int registerOffset, CallLinkStatus, 175 SpeculatedType prediction); 176 void handleCall( 177 int result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, 174 178 Node* callTarget, int argCount, int registerOffset, CallLinkStatus); 175 179 void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset); … … 184 188 bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind); 185 189 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); 186 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset);190 Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset); 187 191 void handleGetById( 188 192 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, … … 641 645 } 642 646 643 Node* addCall (int result, NodeType op, Node* callee, int argCount, int registerOffset)644 {645 SpeculatedType prediction = getPrediction();646 647 Node* addCallWithoutSettingResult( 648 NodeType op, Node* callee, int argCount, int registerOffset, 649 SpeculatedType prediction) 650 { 647 651 addVarArgChild(callee); 648 652 size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount; … … 654 658 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); 655 659 656 Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction)); 657 set(VirtualRegister(result), call); 660 return addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction)); 661 } 662 663 Node* addCall( 664 int result, NodeType op, Node* callee, int argCount, int registerOffset, 665 SpeculatedType prediction) 666 { 667 Node* call = addCallWithoutSettingResult( 668 op, callee, argCount, registerOffset, prediction); 669 VirtualRegister resultReg(result); 670 if (resultReg.isValid()) 671 set(VirtualRegister(result), call); 658 672 return call; 659 673 } … … 995 1009 CallLinkStatus callLinkStatus) 996 1010 { 1011 handleCall( 1012 result, op, kind, instructionSize, callTarget, argumentCountIncludingThis, 1013 registerOffset, callLinkStatus, getPrediction()); 1014 } 1015 1016 void ByteCodeParser::handleCall( 1017 int result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, 1018 Node* callTarget, int argumentCountIncludingThis, int registerOffset, 1019 CallLinkStatus callLinkStatus, SpeculatedType prediction) 1020 { 997 1021 ASSERT(registerOffset <= 0); 998 1022 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); … … 1005 1029 // that we cannot optimize them. 1006 1030 1007 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset );1031 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1008 1032 return; 1009 1033 } 1010 1034 1011 1035 unsigned nextOffset = m_currentIndex + instructionSize; 1012 SpeculatedType prediction = getPrediction();1013 1036 1014 1037 if (InternalFunction* function = callLinkStatus.internalFunction()) { … … 1022 1045 1023 1046 // Can only handle this using the generic call handler. 1024 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset );1047 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1025 1048 return; 1026 1049 } … … 1059 1082 } 1060 1083 } 1061 Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset );1084 Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1062 1085 1063 1086 if (knownFunction) … … 1206 1229 size_t argumentPositionStart = m_graph.m_argumentPositions.size(); 1207 1230 1231 VirtualRegister resultReg(resultOperand); 1232 if (resultReg.isValid()) 1233 resultReg = m_inlineStackTop->remapOperand(resultReg); 1234 1208 1235 InlineStackEntry inlineStackEntry( 1209 this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), 1210 m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)), 1236 this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), resultReg, 1211 1237 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind); 1212 1238 … … 1674 1700 } 1675 1701 1676 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset, NodeType op)1702 Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, const StructureSet& structureSet, unsigned identifierNumber, PropertyOffset offset, NodeType op) 1677 1703 { 1704 if (base->hasConstant()) { 1705 if (JSValue constant = m_graph.tryGetConstantProperty(base->asJSValue(), structureSet, offset)) { 1706 addToGraph(Phantom, base); 1707 return weakJSConstant(constant); 1708 } 1709 } 1710 1678 1711 Node* propertyStorage; 1679 1712 if (isInlineOffset(offset)) … … 1770 1803 // ensure that the base of the original get_by_id is kept alive until we're done with 1771 1804 // all of the speculations. We only insert the Phantom if there had been a CheckStructure 1772 // on something other than the base following the CheckStructure on base, or if the 1773 // access was compiled to a WeakJSConstant specific value, in which case we might not 1774 // have any explicit use of the base at all. 1775 if (variant.specificValue() || originalBase != base) 1805 // on something other than the base following the CheckStructure on base. 1806 if (originalBase != base) 1776 1807 addToGraph(Phantom, originalBase); 1777 1808 1778 Node* loadedValue; 1779 if (variant.specificValue()) 1780 loadedValue = weakJSConstant(variant.specificValue()); 1781 else { 1782 loadedValue = handleGetByOffset( 1783 prediction, base, identifierNumber, variant.offset(), 1784 variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset); 1785 } 1809 Node* loadedValue = handleGetByOffset( 1810 variant.callLinkStatus() ? SpecCellOther : prediction, 1811 base, variant.baseStructure(), identifierNumber, variant.offset(), 1812 variant.callLinkStatus() ? GetGetterSetterByOffset : GetByOffset); 1786 1813 1787 1814 if (!variant.callLinkStatus()) { … … 1825 1852 handleCall( 1826 1853 destinationOperand, Call, InlineCallFrame::GetterCall, OPCODE_LENGTH(op_get_by_id), 1827 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus() );1854 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); 1828 1855 } 1829 1856 … … 1876 1903 const PutByIdVariant& variant = putByIdStatus[0]; 1877 1904 1878 if (variant.kind() == PutByIdVariant::Replace) { 1905 switch (variant.kind()) { 1906 case PutByIdVariant::Replace: { 1879 1907 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base); 1880 1908 handlePutByOffset(base, identifierNumber, variant.offset(), value); … … 1884 1912 } 1885 1913 1886 if (variant.kind() != PutByIdVariant::Transition) { 1914 case PutByIdVariant::Transition: { 1915 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); 1916 emitChecks(variant.constantChecks()); 1917 1918 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated()); 1919 1920 Node* propertyStorage; 1921 Transition* transition = m_graph.m_transitions.add( 1922 variant.oldStructureForTransition(), variant.newStructure()); 1923 1924 if (variant.reallocatesStorage()) { 1925 1926 // If we're growing the property storage then it must be because we're 1927 // storing into the out-of-line storage. 1928 ASSERT(!isInlineOffset(variant.offset())); 1929 1930 if (!variant.oldStructureForTransition()->outOfLineCapacity()) { 1931 propertyStorage = addToGraph( 1932 AllocatePropertyStorage, OpInfo(transition), base); 1933 } else { 1934 propertyStorage = addToGraph( 1935 ReallocatePropertyStorage, OpInfo(transition), 1936 base, addToGraph(GetButterfly, base)); 1937 } 1938 } else { 1939 if (isInlineOffset(variant.offset())) 1940 propertyStorage = base; 1941 else 1942 propertyStorage = addToGraph(GetButterfly, base); 1943 } 1944 1945 addToGraph(PutStructure, OpInfo(transition), base); 1946 1947 addToGraph( 1948 PutByOffset, 1949 OpInfo(m_graph.m_storageAccessData.size()), 1950 propertyStorage, 1951 base, 1952 value); 1953 1954 StorageAccessData storageAccessData; 1955 storageAccessData.offset = variant.offset(); 1956 storageAccessData.identifierNumber = identifierNumber; 1957 m_graph.m_storageAccessData.append(storageAccessData); 1958 1959 if (m_graph.compilation()) 1960 m_graph.compilation()->noticeInlinedPutById(); 1961 return; 1962 } 1963 1964 case PutByIdVariant::Setter: { 1965 Node* originalBase = base; 1966 1967 addToGraph( 1968 CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base); 1969 1970 emitChecks(variant.constantChecks()); 1971 1972 if (variant.alternateBase()) 1973 base = weakJSConstant(variant.alternateBase()); 1974 1975 Node* loadedValue = handleGetByOffset( 1976 SpecCellOther, base, variant.baseStructure(), identifierNumber, variant.offset(), 1977 GetGetterSetterByOffset); 1978 1979 Node* setter = addToGraph(GetSetter, loadedValue); 1980 1981 // Make a call. We don't try to get fancy with using the smallest operand number because 1982 // the stack layout phase should compress the stack anyway. 1983 1984 unsigned numberOfParameters = 0; 1985 numberOfParameters++; // The 'this' argument. 1986 numberOfParameters++; // The new value. 1987 numberOfParameters++; // True return PC. 1988 1989 // Start with a register offset that corresponds to the last in-use register. 1990 int registerOffset = virtualRegisterForLocal( 1991 m_inlineStackTop->m_profiledBlock->m_numCalleeRegisters - 1).offset(); 1992 registerOffset -= numberOfParameters; 1993 registerOffset -= JSStack::CallFrameHeaderSize; 1994 1995 // Get the alignment right. 1996 registerOffset = -WTF::roundUpToMultipleOf( 1997 stackAlignmentRegisters(), 1998 -registerOffset); 1999 2000 ensureLocals( 2001 m_inlineStackTop->remapOperand( 2002 VirtualRegister(registerOffset)).toLocal()); 2003 2004 int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; 2005 set(VirtualRegister(nextRegister++), originalBase, ImmediateNakedSet); 2006 set(VirtualRegister(nextRegister++), value, ImmediateNakedSet); 2007 2008 handleCall( 2009 VirtualRegister().offset(), Call, InlineCallFrame::SetterCall, 2010 OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset, 2011 *variant.callLinkStatus(), SpecOther); 2012 return; 2013 } 2014 2015 default: { 1887 2016 emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); 1888 2017 return; 1889 } 1890 1891 addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); 1892 emitChecks(variant.constantChecks()); 1893 1894 ASSERT(variant.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated()); 1895 1896 Node* propertyStorage; 1897 Transition* transition = m_graph.m_transitions.add( 1898 variant.oldStructureForTransition(), variant.newStructure()); 1899 1900 if (variant.reallocatesStorage()) { 1901 1902 // If we're growing the property storage then it must be because we're 1903 // storing into the out-of-line storage. 1904 ASSERT(!isInlineOffset(variant.offset())); 1905 1906 if (!variant.oldStructureForTransition()->outOfLineCapacity()) { 1907 propertyStorage = addToGraph( 1908 AllocatePropertyStorage, OpInfo(transition), base); 1909 } else { 1910 propertyStorage = addToGraph( 1911 ReallocatePropertyStorage, OpInfo(transition), 1912 base, addToGraph(GetButterfly, base)); 1913 } 1914 } else { 1915 if (isInlineOffset(variant.offset())) 1916 propertyStorage = base; 1917 else 1918 propertyStorage = addToGraph(GetButterfly, base); 1919 } 1920 1921 addToGraph(PutStructure, OpInfo(transition), base); 1922 1923 addToGraph( 1924 PutByOffset, 1925 OpInfo(m_graph.m_storageAccessData.size()), 1926 propertyStorage, 1927 base, 1928 value); 1929 1930 StorageAccessData storageAccessData; 1931 storageAccessData.offset = variant.offset(); 1932 storageAccessData.identifierNumber = identifierNumber; 1933 m_graph.m_storageAccessData.append(storageAccessData); 1934 1935 if (m_graph.compilation()) 1936 m_graph.compilation()->noticeInlinedPutById(); 2018 } } 1937 2019 } 1938 2020 … … 2716 2798 flushForReturn(); 2717 2799 if (inlineCallFrame()) { 2718 ASSERT(m_inlineStackTop->m_returnValue.isValid());2719 setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);2800 if (m_inlineStackTop->m_returnValue.isValid()) 2801 setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); 2720 2802 m_inlineStackTop->m_didReturn = true; 2721 2803 if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { … … 2895 2977 Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().onlyStructure()); 2896 2978 addToGraph(Phantom, get(VirtualRegister(scope))); 2897 if (JSValue specificValue = status[0].specificValue()) 2898 set(VirtualRegister(dst), weakJSConstant(specificValue.asCell())); 2899 else 2900 set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand)); 2979 set(VirtualRegister(dst), handleGetByOffset(prediction, base, status[0].structureSet(), identifierNumber, operand)); 2901 2980 break; 2902 2981 } … … 2906 2985 SymbolTableEntry entry = globalObject->symbolTable()->get(uid); 2907 2986 VariableWatchpointSet* watchpointSet = entry.watchpointSet(); 2908 JSValue specificValue =2987 JSValue inferredValue = 2909 2988 watchpointSet ? watchpointSet->inferredValue() : JSValue(); 2910 if (! specificValue) {2989 if (!inferredValue) { 2911 2990 SpeculatedType prediction = getPrediction(); 2912 2991 set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction))); … … 2915 2994 2916 2995 addToGraph(VariableWatchpoint, OpInfo(watchpointSet)); 2917 set(VirtualRegister(dst), weakJSConstant( specificValue));2996 set(VirtualRegister(dst), weakJSConstant(inferredValue)); 2918 2997 break; 2919 2998 } -
trunk/Source/JavaScriptCore/dfg/DFGCPSRethreadingPhase.cpp
r165995 r172129 202 202 if (otherNode->op() == GetLocal) { 203 203 // Replace all references to this GetLocal with otherNode. 204 node-> misc.replacement = otherNode;204 node->replacement = otherNode; 205 205 return; 206 206 } 207 207 208 208 ASSERT(otherNode->op() == SetLocal); 209 node-> misc.replacement = otherNode->child1().node();209 node->replacement = otherNode->child1().node(); 210 210 return; 211 211 } -
trunk/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
r171613 r172129 30 30 31 31 #include "DFGAbstractHeap.h" 32 #include "DFGClobberSet.h" 32 33 #include "DFGClobberize.h" 33 34 #include "DFGEdgeUsesStructure.h" … … 40 41 namespace JSC { namespace DFG { 41 42 42 class CSEPhase : public Phase { 43 // This file contains two CSE implementations: local and global. LocalCSE typically runs when we're 44 // in DFG mode, i.e. we want to compile quickly. LocalCSE contains a lot of optimizations for 45 // compile time. GlobalCSE, on the other hand, is fairly straight-forward. It will find more 46 // optimization opportunities by virtue of being global. 47 48 namespace { 49 50 const bool verbose = false; 51 52 class ClobberFilter { 43 53 public: 44 CSEPhase(Graph& graph) 45 : Phase(graph, "common subexpression elimination") 54 ClobberFilter(AbstractHeap heap) 55 : m_heap(heap) 56 { 57 } 58 59 bool operator()(const ImpureMap::KeyValuePairType& pair) const 60 { 61 return m_heap.overlaps(pair.key.heap()); 62 } 63 64 private: 65 AbstractHeap m_heap; 66 }; 67 68 inline void clobber(ImpureMap& map, AbstractHeap heap) 69 { 70 ClobberFilter filter(heap); 71 map.removeIf(filter); 72 } 73 74 class LocalCSEPhase : public Phase { 75 public: 76 LocalCSEPhase(Graph& graph) 77 : Phase(graph, "local common subexpression elimination") 78 , m_smallBlock(graph) 79 , m_largeBlock(graph) 46 80 { 47 81 } … … 49 83 bool run() 50 84 { 51 ASSERT(m_graph.m_fixpointState != BeforeFixpoint); 52 53 m_changed = false; 85 ASSERT(m_graph.m_fixpointState == FixpointNotConverged); 86 ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == LoadStore); 87 88 bool changed = false; 54 89 55 90 m_graph.clearReplacements(); … … 60 95 continue; 61 96 62 // All Phis need to already be marked as relevant to OSR. 63 if (!ASSERT_DISABLED) { 64 for (unsigned i = 0; i < block->phis.size(); ++i) 65 ASSERT(block->phis[i]->flags() & NodeRelevantToOSR); 66 } 67 68 for (unsigned i = block->size(); i--;) { 69 Node* node = block->at(i); 97 if (block->size() <= SmallMaps::capacity) 98 changed |= m_smallBlock.run(block); 99 else 100 changed |= m_largeBlock.run(block); 101 } 102 103 return changed; 104 } 105 106 private: 107 class SmallMaps { 108 public: 109 // This permits SmallMaps to be used for blocks that have up to 100 nodes. In practice, 110 // fewer than half of the nodes in a block have pure defs, and even fewer have impure defs. 111 // Thus, a capacity limit of 100 probably means that somewhere around ~40 things may end up 112 // in one of these "small" list-based maps. That number still seems largeish, except that 113 // the overhead of HashMaps can be quite high currently: clearing them, or even removing 114 // enough things from them, deletes (or resizes) their backing store eagerly. Hence 115 // HashMaps induce a lot of malloc traffic. 116 static const unsigned capacity = 100; 117 118 SmallMaps() 119 : m_pureLength(0) 120 , m_impureLength(0) 121 { 122 } 123 124 void clear() 125 { 126 m_pureLength = 0; 127 m_impureLength = 0; 128 } 129 130 void write(AbstractHeap heap) 131 { 132 for (unsigned i = 0; i < m_impureLength; ++i) { 133 if (heap.overlaps(m_impureMap[i].key.heap())) 134 m_impureMap[i--] = m_impureMap[--m_impureLength]; 135 } 136 } 137 138 Node* addPure(PureValue value, Node* node) 139 { 140 for (unsigned i = m_pureLength; i--;) { 141 if (m_pureMap[i].key == value) 142 return m_pureMap[i].value; 143 } 144 145 ASSERT(m_pureLength < capacity); 146 m_pureMap[m_pureLength++] = WTF::KeyValuePair<PureValue, Node*>(value, node); 147 return nullptr; 148 } 149 150 Node* findReplacement(HeapLocation location) 151 { 152 for (unsigned i = m_impureLength; i--;) { 153 if (m_impureMap[i].key == location) 154 return m_impureMap[i].value; 155 } 156 return nullptr; 157 } 158 159 Node* addImpure(HeapLocation location, Node* node) 160 { 161 if (Node* result = findReplacement(location)) 162 return result; 163 ASSERT(m_impureLength < capacity); 164 m_impureMap[m_impureLength++] = WTF::KeyValuePair<HeapLocation, Node*>(location, node); 165 return nullptr; 166 } 167 168 private: 169 WTF::KeyValuePair<PureValue, Node*> m_pureMap[capacity]; 170 WTF::KeyValuePair<HeapLocation, Node*> m_impureMap[capacity]; 171 unsigned m_pureLength; 172 unsigned m_impureLength; 173 }; 174 175 class LargeMaps { 176 public: 177 LargeMaps() 178 { 179 } 180 181 void clear() 182 { 183 m_pureMap.clear(); 184 m_impureMap.clear(); 185 } 186 187 void write(AbstractHeap heap) 188 { 189 clobber(m_impureMap, heap); 190 } 191 192 Node* addPure(PureValue value, Node* node) 193 { 194 auto result = m_pureMap.add(value, node); 195 if (result.isNewEntry) 196 return nullptr; 197 return result.iterator->value; 198 } 199 200 Node* findReplacement(HeapLocation location) 201 { 202 return m_impureMap.get(location); 203 } 204 205 Node* addImpure(HeapLocation location, Node* node) 206 { 207 auto result = m_impureMap.add(location, node); 208 if (result.isNewEntry) 209 return nullptr; 210 return result.iterator->value; 211 } 212 213 private: 214 HashMap<PureValue, Node*> m_pureMap; 215 HashMap<HeapLocation, Node*> m_impureMap; 216 }; 217 218 template<typename Maps> 219 class BlockCSE { 220 public: 221 BlockCSE(Graph& graph) 222 : m_graph(graph) 223 { 224 } 225 226 bool run(BasicBlock* block) 227 { 228 m_maps.clear(); 229 m_changed = false; 230 231 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { 232 m_node = block->at(nodeIndex); 233 m_graph.performSubstitution(m_node); 234 235 if (m_node->op() == Identity) { 236 m_node->convertToCheck(); 237 m_node->replacement = m_node->child1().node(); 238 m_changed = true; 239 } else { 240 // This rule only makes sense for local CSE, since in SSA form we have already 241 // factored the bounds check out of the PutByVal. It's kind of gross, but we 242 // still have reason to believe that PutByValAlias is a good optimization and 243 // that it's better to do it with a single node rather than separating out the 244 // CheckInBounds. 245 if (m_node->op() == PutByVal || m_node->op() == PutByValDirect) { 246 HeapLocation heap; 247 248 Node* base = m_graph.varArgChild(m_node, 0).node(); 249 Node* index = m_graph.varArgChild(m_node, 1).node(); 250 251 ArrayMode mode = m_node->arrayMode(); 252 switch (mode.type()) { 253 case Array::Int32: 254 if (!mode.isInBounds()) 255 break; 256 heap = HeapLocation( 257 IndexedPropertyLoc, IndexedInt32Properties, base, index); 258 break; 259 260 case Array::Double: 261 if (!mode.isInBounds()) 262 break; 263 heap = HeapLocation( 264 IndexedPropertyLoc, IndexedDoubleProperties, base, index); 265 break; 266 267 case Array::Contiguous: 268 if (!mode.isInBounds()) 269 break; 270 heap = HeapLocation( 271 IndexedPropertyLoc, IndexedContiguousProperties, base, index); 272 break; 273 274 case Array::Int8Array: 275 case Array::Int16Array: 276 case Array::Int32Array: 277 case Array::Uint8Array: 278 case Array::Uint8ClampedArray: 279 case Array::Uint16Array: 280 case Array::Uint32Array: 281 case Array::Float32Array: 282 case Array::Float64Array: 283 if (!mode.isInBounds()) 284 break; 285 heap = HeapLocation( 286 IndexedPropertyLoc, TypedArrayProperties, base, index); 287 break; 288 289 default: 290 break; 291 } 292 293 if (!!heap && m_maps.findReplacement(heap)) 294 m_node->setOp(PutByValAlias); 295 } 296 297 clobberize(m_graph, m_node, *this); 298 } 299 } 300 301 return m_changed; 302 } 303 304 void read(AbstractHeap) { } 305 306 void write(AbstractHeap heap) 307 { 308 m_maps.write(heap); 309 } 310 311 void def(PureValue value) 312 { 313 Node* match = m_maps.addPure(value, m_node); 314 if (!match) 315 return; 316 317 m_node->replaceWith(match); 318 m_changed = true; 319 } 320 321 void def(HeapLocation location, Node* value) 322 { 323 Node* match = m_maps.addImpure(location, value); 324 if (!match) 325 return; 326 327 if (m_node->op() == GetLocal) { 328 // For uncaptured locals, usually the CPS rethreading phase does this. But it's OK 329 // for us to mess with locals - regardless of their capturedness - so long as: 330 // 331 // - We dethread the graph. Any changes we make may invalidate the assumptions of 332 // our CPS form, particularly if this GetLocal is linked to the variablesAtTail. 333 // 334 // - We don't introduce a Phantom for the child of the GetLocal. This wouldn't be 335 // totally wrong but it would pessimize the code. Just because there is a 336 // GetLocal doesn't mean that the child was live. Simply rerouting the all uses 337 // of this GetLocal will preserve the live-at-exit information just fine. 338 // 339 // We accomplish the latter by just clearing the child; then the Phantom that we 340 // introduce won't have children and so it will eventually just be deleted. 341 342 m_node->child1() = Edge(); 343 m_graph.dethread(); 344 } 345 346 m_node->replaceWith(match); 347 m_changed = true; 348 } 349 350 private: 351 Graph& m_graph; 352 353 bool m_changed; 354 Node* m_node; 355 356 Maps m_maps; 357 }; 358 359 BlockCSE<SmallMaps> m_smallBlock; 360 BlockCSE<LargeMaps> m_largeBlock; 361 }; 362 363 class GlobalCSEPhase : public Phase { 364 public: 365 GlobalCSEPhase(Graph& graph) 366 : Phase(graph, "global common subexpression elimination") 367 { 368 } 369 370 bool run() 371 { 372 ASSERT(m_graph.m_fixpointState == FixpointNotConverged); 373 ASSERT(m_graph.m_form == SSA); 374 375 m_graph.initializeNodeOwners(); 376 m_graph.m_dominators.computeIfNecessary(m_graph); 377 378 m_graph.getBlocksInPreOrder(m_preOrder); 379 380 m_impureDataMap.resize(m_graph.numBlocks()); 381 382 // First figure out what gets clobbered by blocks. Node that this uses the preOrder list 383 // for convenience only. 384 for (unsigned i = m_preOrder.size(); i--;) { 385 m_block = m_preOrder[i]; 386 m_impureData = &m_impureDataMap[m_block->index]; 387 for (unsigned nodeIndex = m_block->size(); nodeIndex--;) 388 addWrites(m_graph, m_block->at(nodeIndex), m_impureData->writes); 389 } 390 391 // Based on my experience doing this before, what follows might have to be made iterative. 392 // Right now it doesn't have to be iterative because everything is dominator-bsed. But when 393 // validation is enabled, we check if iterating would find new CSE opportunities. 394 395 bool changed = iterate(); 396 397 // Iterating a second time should not find new CSE opportunities, unless we have a bug. 398 if (validationEnabled()) { 399 reset(); 400 DFG_ASSERT(m_graph, nullptr, !iterate()); 401 } 402 403 return changed; 404 } 405 406 void reset() 407 { 408 m_pureValues.clear(); 409 410 for (unsigned i = m_impureDataMap.size(); i--;) { 411 m_impureDataMap[i].availableAtTail.clear(); 412 m_impureDataMap[i].didVisit = false; 413 } 414 } 415 416 bool iterate() 417 { 418 if (verbose) 419 dataLog("Performing iteration.\n"); 420 421 m_changed = false; 422 m_graph.clearReplacements(); 423 424 for (unsigned i = 0; i < m_preOrder.size(); ++i) { 425 m_block = m_preOrder[i]; 426 m_impureData = &m_impureDataMap[m_block->index]; 427 m_writesSoFar.clear(); 428 429 if (verbose) 430 dataLog("Processing block ", *m_block, ":\n"); 431 432 for (unsigned nodeIndex = 0; nodeIndex < m_block->size(); ++nodeIndex) { 433 m_node = m_block->at(nodeIndex); 434 if (verbose) 435 dataLog(" Looking at node ", m_node, ":\n"); 70 436 71 switch (node->op()) { 72 case SetLocal: 73 case GetLocal: // FIXME: The GetLocal case is only necessary until we do https://bugs.webkit.org/show_bug.cgi?id=106707. 74 node->mergeFlags(NodeRelevantToOSR); 75 break; 76 default: 77 node->clearFlags(NodeRelevantToOSR); 78 break; 437 m_graph.performSubstitution(m_node); 438 439 if (m_node->op() == Identity) { 440 m_node->convertToCheck(); 441 m_node->replacement = m_node->child1().node(); 442 m_changed = true; 443 } else 444 clobberize(m_graph, m_node, *this); 445 } 446 447 m_impureData->didVisit = true; 448 } 449 450 return m_changed; 451 } 452 453 void read(AbstractHeap) { } 454 455 void write(AbstractHeap heap) 456 { 457 clobber(m_impureData->availableAtTail, heap); 458 m_writesSoFar.add(heap); 459 if (verbose) 460 dataLog(" Clobbered, new tail map: ", mapDump(m_impureData->availableAtTail), "\n"); 461 } 462 463 void def(PureValue value) 464 { 465 // With pure values we do not have to worry about the possibility of some control flow path 466 // clobbering the value. So, we just search for all of the like values that have been 467 // computed. We pick one that is in a block that dominates ours. Note that this means that 468 // a PureValue will map to a list of nodes, since there may be many places in the control 469 // flow graph that compute a value but only one of them that dominates us. we may build up 470 // a large list of nodes that compute some value in the case of gnarly control flow. This 471 // is probably OK. 472 473 auto result = m_pureValues.add(value, Vector<Node*>()); 474 if (result.isNewEntry) { 475 result.iterator->value.append(m_node); 476 return; 477 } 478 479 for (unsigned i = result.iterator->value.size(); i--;) { 480 Node* candidate = result.iterator->value[i]; 481 if (m_graph.m_dominators.dominates(candidate->owner, m_block)) { 482 m_node->replaceWith(candidate); 483 m_changed = true; 484 return; 485 } 486 } 487 488 result.iterator->value.append(m_node); 489 } 490 491 Node* findReplacement(HeapLocation location) 492 { 493 // At this instant, our "availableAtTail" reflects the set of things that are available in 494 // this block so far. We check this map to find block-local CSE opportunities before doing 495 // a global search. 496 Node* match = m_impureData->availableAtTail.get(location); 497 if (match) { 498 if (verbose) 499 dataLog(" Found local match: ", match, "\n"); 500 return match; 501 } 502 503 // If it's not available at this point in the block, and at some prior point in the block 504 // we have clobbered this heap location, then there is no point in doing a global search. 505 if (m_writesSoFar.overlaps(location.heap())) { 506 if (verbose) 507 dataLog(" Not looking globally because of local clobber: ", m_writesSoFar, "\n"); 508 return nullptr; 509 } 510 511 // This perfoms a backward search over the control flow graph to find some possible 512 // non-local def() that matches our heap location. Such a match is only valid if there does 513 // not exist any path from that def() to our block that contains a write() that overlaps 514 // our heap. This algorithm looks for both of these things (the matching def and the 515 // overlapping writes) in one backwards DFS pass. 516 // 517 // This starts by looking at the starting block's predecessors, and then it continues along 518 // their predecessors. As soon as this finds a possible def() - one that defines the heap 519 // location we want while dominating our starting block - it assumes that this one must be 520 // the match. It then lets the DFS over predecessors complete, but it doesn't add the 521 // def()'s predecessors; this ensures that any blocks we visit thereafter are on some path 522 // from the def() to us. As soon as the DFG finds a write() that overlaps the location's 523 // heap, it stops, assuming that there is no possible match. Note that the write() case may 524 // trigger before we find a def(), or after. Either way, the write() case causes this 525 // function to immediately return nullptr. 526 // 527 // If the write() is found before we find the def(), then we know that any def() we would 528 // find would have a path to us that trips over the write() and hence becomes invalid. This 529 // is just a direct outcome of us looking for a def() that dominates us. Given a block A 530 // that dominates block B - so that A is the one that would have the def() and B is our 531 // starting block - we know that any other block must either be on the path from A to B, or 532 // it must be on a path from the root to A, but not both. So, if we haven't found A yet but 533 // we already have found a block C that has a write(), then C must be on some path from A 534 // to B, which means that A's def() is invalid for our purposes. Hence, before we find the 535 // def(), stopping on write() is the right thing to do. 536 // 537 // Stopping on write() is also the right thing to do after we find the def(). After we find 538 // the def(), we don't add that block's predecessors to the search worklist. That means 539 // that henceforth the only blocks we will see in the search are blocks on the path from 540 // the def() to us. If any such block has a write() that clobbers our heap then we should 541 // give up. 542 // 543 // Hence this graph search algorithm ends up being deceptively simple: any overlapping 544 // write() causes us to immediately return nullptr, and a matching def() means that we just 545 // record it and neglect to visit its precessors. 546 547 Vector<BasicBlock*, 8> worklist; 548 Vector<BasicBlock*, 8> seenList; 549 BitVector seen; 550 551 for (unsigned i = m_block->predecessors.size(); i--;) { 552 BasicBlock* predecessor = m_block->predecessors[i]; 553 if (!seen.get(predecessor->index)) { 554 worklist.append(predecessor); 555 seen.set(predecessor->index); 556 } 557 } 558 559 while (!worklist.isEmpty()) { 560 BasicBlock* block = worklist.takeLast(); 561 seenList.append(block); 562 563 if (verbose) 564 dataLog(" Searching in block ", *block, "\n"); 565 ImpureBlockData& data = m_impureDataMap[block->index]; 566 567 // We require strict domination because this would only see things in our own block if 568 // they came *after* our position in the block. Clearly, while our block dominates 569 // itself, the things in the block after us don't dominate us. 570 if (m_graph.m_dominators.dominates(block, m_block) && block != m_block) { 571 if (verbose) 572 dataLog(" It strictly dominates.\n"); 573 DFG_ASSERT(m_graph, m_node, data.didVisit); 574 DFG_ASSERT(m_graph, m_node, !match); 575 if (verbose) 576 dataLog(" Availability map: ", mapDump(data.availableAtTail), "\n"); 577 match = data.availableAtTail.get(location); 578 if (verbose) 579 dataLog(" Availability: ", match, "\n"); 580 if (match) { 581 // Don't examine the predecessors of a match. At this point we just want to 582 // establish that other blocks on the path from here to there don't clobber 583 // the location we're interested in. 584 continue; 79 585 } 80 586 } 81 } 82 83 for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { 84 BasicBlock* block = m_graph.block(blockIndex); 85 if (!block) 86 continue; 87 88 for (unsigned i = block->size(); i--;) { 89 Node* node = block->at(i); 90 if (!node->containsMovHint()) 91 continue; 92 93 ASSERT(node->op() != ZombieHint); 94 node->child1()->mergeFlags(NodeRelevantToOSR); 95 } 96 } 97 98 if (m_graph.m_form == SSA) { 99 Vector<BasicBlock*> depthFirst; 100 m_graph.getBlocksInDepthFirstOrder(depthFirst); 101 for (unsigned i = 0; i < depthFirst.size(); ++i) 102 performBlockCSE(depthFirst[i]); 103 } else { 104 for (unsigned blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) 105 performBlockCSE(m_graph.block(blockIndex)); 106 } 107 108 return m_changed; 109 } 110 111 private: 112 113 unsigned endIndexForPureCSE() 114 { 115 unsigned result = m_lastSeen[m_currentNode->op()]; 116 if (result == UINT_MAX) 117 result = 0; 118 else 119 result++; 120 ASSERT(result <= m_indexInBlock); 121 return result; 122 } 123 124 Node* pureCSE(Node* node) 125 { 126 Edge child1 = node->child1().sanitized(); 127 Edge child2 = node->child2().sanitized(); 128 Edge child3 = node->child3().sanitized(); 129 130 for (unsigned i = endIndexForPureCSE(); i--;) { 131 Node* otherNode = m_currentBlock->at(i); 132 if (otherNode == child1 || otherNode == child2 || otherNode == child3) 133 break; 134 135 if (node->op() != otherNode->op()) 136 continue; 137 138 Edge otherChild = otherNode->child1().sanitized(); 139 if (!otherChild) 140 return otherNode; 141 if (otherChild != child1) 142 continue; 143 144 otherChild = otherNode->child2().sanitized(); 145 if (!otherChild) 146 return otherNode; 147 if (otherChild != child2) 148 continue; 149 150 otherChild = otherNode->child3().sanitized(); 151 if (!otherChild) 152 return otherNode; 153 if (otherChild != child3) 154 continue; 155 156 return otherNode; 157 } 158 return 0; 159 } 160 161 Node* constantCSE(Node* node) 162 { 163 for (unsigned i = endIndexForPureCSE(); i--;) { 164 Node* otherNode = m_currentBlock->at(i); 165 if (otherNode->op() != node->op()) 166 continue; 167 168 if (otherNode->constant() != node->constant()) 169 continue; 170 171 return otherNode; 172 } 173 return 0; 174 } 175 176 Node* constantStoragePointerCSE(Node* node) 177 { 178 for (unsigned i = endIndexForPureCSE(); i--;) { 179 Node* otherNode = m_currentBlock->at(i); 180 if (otherNode->op() != ConstantStoragePointer) 181 continue; 182 183 if (otherNode->storagePointer() != node->storagePointer()) 184 continue; 185 186 return otherNode; 187 } 188 return 0; 189 } 190 191 Node* getCalleeLoadElimination() 192 { 193 for (unsigned i = m_indexInBlock; i--;) { 194 Node* node = m_currentBlock->at(i); 195 switch (node->op()) { 196 case GetCallee: 197 return node; 198 default: 199 break; 200 } 201 } 202 return 0; 203 } 204 205 Node* getArrayLengthElimination(Node* array) 206 { 207 for (unsigned i = m_indexInBlock; i--;) { 208 Node* node = m_currentBlock->at(i); 209 switch (node->op()) { 210 case GetArrayLength: 211 if (node->child1() == array) 212 return node; 213 break; 214 215 case PutByValDirect: 216 case PutByVal: 217 if (!m_graph.byValIsPure(node)) 218 return 0; 219 if (node->arrayMode().mayStoreToHole()) 220 return 0; 221 break; 222 223 default: 224 if (m_graph.clobbersWorld(node)) 225 return 0; 226 } 227 } 228 return 0; 229 } 230 231 Node* globalVarLoadElimination(WriteBarrier<Unknown>* registerPointer) 232 { 233 for (unsigned i = m_indexInBlock; i--;) { 234 Node* node = m_currentBlock->at(i); 235 switch (node->op()) { 236 case GetGlobalVar: 237 if (node->registerPointer() == registerPointer) 238 return node; 239 break; 240 case PutGlobalVar: 241 if (node->registerPointer() == registerPointer) 242 return node->child1().node(); 243 break; 244 default: 245 break; 246 } 247 if (m_graph.clobbersWorld(node)) 248 break; 249 } 250 return 0; 251 } 252 253 Node* scopedVarLoadElimination(Node* registers, int varNumber) 254 { 255 for (unsigned i = m_indexInBlock; i--;) { 256 Node* node = m_currentBlock->at(i); 257 switch (node->op()) { 258 case GetClosureVar: { 259 if (node->child1() == registers && node->varNumber() == varNumber) 260 return node; 261 break; 262 } 263 case PutClosureVar: { 264 if (node->varNumber() != varNumber) 265 break; 266 if (node->child2() == registers) 267 return node->child3().node(); 268 return 0; 269 } 270 case SetLocal: { 271 VariableAccessData* variableAccessData = node->variableAccessData(); 272 if (variableAccessData->isCaptured() 273 && variableAccessData->local() == static_cast<VirtualRegister>(varNumber)) 274 return 0; 275 break; 276 } 277 default: 278 break; 279 } 280 if (m_graph.clobbersWorld(node)) 281 break; 282 } 283 return 0; 284 } 285 286 bool varInjectionWatchpointElimination() 287 { 288 for (unsigned i = m_indexInBlock; i--;) { 289 Node* node = m_currentBlock->at(i); 290 if (node->op() == VarInjectionWatchpoint) 291 return true; 292 if (m_graph.clobbersWorld(node)) 293 break; 294 } 295 return false; 296 } 297 298 Node* getByValLoadElimination(Node* child1, Node* child2, ArrayMode arrayMode) 299 { 300 for (unsigned i = m_indexInBlock; i--;) { 301 Node* node = m_currentBlock->at(i); 302 if (node == child1 || node == child2) 303 break; 304 305 switch (node->op()) { 306 case GetByVal: 307 if (!m_graph.byValIsPure(node)) 308 return 0; 309 if (node->child1() == child1 310 && node->child2() == child2 311 && node->arrayMode().type() == arrayMode.type()) 312 return node; 313 break; 314 315 case PutByValDirect: 316 case PutByVal: 317 case PutByValAlias: { 318 if (!m_graph.byValIsPure(node)) 319 return 0; 320 // Typed arrays 321 if (arrayMode.typedArrayType() != NotTypedArray) 322 return 0; 323 if (m_graph.varArgChild(node, 0) == child1 324 && m_graph.varArgChild(node, 1) == child2 325 && node->arrayMode().type() == arrayMode.type()) 326 return m_graph.varArgChild(node, 2).node(); 327 // We must assume that the PutByVal will clobber the location we're getting from. 328 // FIXME: We can do better; if we know that the PutByVal is accessing an array of a 329 // different type than the GetByVal, then we know that they won't clobber each other. 330 // ... except of course for typed arrays, where all typed arrays clobber all other 331 // typed arrays! An Int32Array can alias a Float64Array for example, and so on. 332 return 0; 333 } 334 default: 335 if (m_graph.clobbersWorld(node)) 336 return 0; 337 break; 338 } 339 } 340 return 0; 341 } 342 343 bool checkFunctionElimination(FrozenValue* function, Node* child1) 344 { 345 for (unsigned i = endIndexForPureCSE(); i--;) { 346 Node* node = m_currentBlock->at(i); 347 if (node == child1) 348 break; 349 350 if (node->op() == CheckFunction && node->child1() == child1 && node->function() == function) 351 return true; 352 } 353 return false; 354 } 355 356 bool checkExecutableElimination(ExecutableBase* executable, Node* child1) 357 { 358 for (unsigned i = endIndexForPureCSE(); i--;) { 359 Node* node = m_currentBlock->at(i); 360 if (node == child1) 361 break; 362 363 if (node->op() == CheckExecutable && node->child1() == child1 && node->executable() == executable) 364 return true; 365 } 366 return false; 367 } 368 369 bool checkStructureElimination(const StructureSet& structureSet, Node* child1) 370 { 371 for (unsigned i = m_indexInBlock; i--;) { 372 Node* node = m_currentBlock->at(i); 373 if (node == child1) 374 break; 375 376 switch (node->op()) { 377 case CheckStructure: 378 if (node->child1() == child1 379 && structureSet.isSupersetOf(node->structureSet())) 380 return true; 381 break; 382 383 case PutStructure: 384 if (node->child1() == child1 385 && structureSet.contains(node->transition()->next)) 386 return true; 387 if (structureSet.contains(node->transition()->previous)) 388 return false; 389 break; 390 391 case PutByOffset: 392 // Setting a property cannot change the structure. 393 break; 394 395 case MultiPutByOffset: 396 if (node->multiPutByOffsetData().writesStructures()) 397 return false; 398 break; 399 400 case PutByValDirect: 401 case PutByVal: 402 case PutByValAlias: 403 if (m_graph.byValIsPure(node)) { 404 // If PutByVal speculates that it's accessing an array with an 405 // integer index, then it's impossible for it to cause a structure 406 // change. 407 break; 587 588 if (verbose) 589 dataLog(" Dealing with write set ", data.writes, "\n"); 590 if (data.writes.overlaps(location.heap())) { 591 if (verbose) 592 dataLog(" Clobbered.\n"); 593 return nullptr; 594 } 595 596 for (unsigned i = block->predecessors.size(); i--;) { 597 BasicBlock* predecessor = block->predecessors[i]; 598 if (!seen.get(predecessor->index)) { 599 worklist.append(predecessor); 600 seen.set(predecessor->index); 408 601 } 409 return false; 410 411 case Arrayify: 412 case ArrayifyToStructure: 413 // We could check if the arrayification could affect our structures. 414 // But that seems like it would take Effort. 415 return false; 416 417 default: 418 if (m_graph.clobbersWorld(node)) 419 return false; 420 break; 421 } 422 } 423 return false; 424 } 425 426 bool structureTransitionWatchpointElimination(Structure* structure, Node* child1) 427 { 428 for (unsigned i = m_indexInBlock; i--;) { 429 Node* node = m_currentBlock->at(i); 430 if (node == child1) 431 break; 432 433 switch (node->op()) { 434 case CheckStructure: 435 if (node->child1() == child1 436 && node->structureSet().isSubsetOf(StructureSet(structure))) 437 return true; 438 break; 439 440 case PutStructure: 441 ASSERT(node->transition()->previous != structure); 442 break; 443 444 case PutByOffset: 445 // Setting a property cannot change the structure. 446 break; 447 448 case MultiPutByOffset: 449 if (node->multiPutByOffsetData().writesStructures()) 450 return false; 451 break; 452 453 case PutByValDirect: 454 case PutByVal: 455 case PutByValAlias: 456 if (m_graph.byValIsPure(node)) { 457 // If PutByVal speculates that it's accessing an array with an 458 // integer index, then it's impossible for it to cause a structure 459 // change. 460 break; 461 } 462 return false; 463 464 case Arrayify: 465 case ArrayifyToStructure: 466 // We could check if the arrayification could affect our structures. 467 // But that seems like it would take Effort. 468 return false; 469 470 default: 471 if (m_graph.clobbersWorld(node)) 472 return false; 473 break; 474 } 475 } 476 return false; 477 } 478 479 Node* getByOffsetLoadElimination(unsigned identifierNumber, Node* base) 480 { 481 for (unsigned i = m_indexInBlock; i--;) { 482 Node* node = m_currentBlock->at(i); 483 if (node == base) 484 break; 485 486 switch (node->op()) { 487 case GetByOffset: 488 if (node->child2() == base 489 && m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) 490 return node; 491 break; 492 493 case MultiGetByOffset: 494 if (node->child1() == base 495 && node->multiGetByOffsetData().identifierNumber == identifierNumber) 496 return node; 497 break; 498 499 case PutByOffset: 500 if (m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) { 501 if (node->child2() == base) // Must be same property storage. 502 return node->child3().node(); 503 return 0; 504 } 505 break; 506 507 case MultiPutByOffset: 508 if (node->multiPutByOffsetData().identifierNumber == identifierNumber) { 509 if (node->child1() == base) 510 return node->child2().node(); 511 return 0; 512 } 513 break; 514 515 case PutByValDirect: 516 case PutByVal: 517 case PutByValAlias: 518 if (m_graph.byValIsPure(node)) { 519 // If PutByVal speculates that it's accessing an array with an 520 // integer index, then it's impossible for it to cause a structure 521 // change. 522 break; 523 } 524 return 0; 525 526 default: 527 if (m_graph.clobbersWorld(node)) 528 return 0; 529 break; 530 } 531 } 532 return 0; 533 } 534 535 Node* getGetterSetterByOffsetLoadElimination(unsigned identifierNumber, Node* base) 536 { 537 for (unsigned i = m_indexInBlock; i--;) { 538 Node* node = m_currentBlock->at(i); 539 if (node == base) 540 break; 541 542 switch (node->op()) { 543 case GetGetterSetterByOffset: 544 if (node->child2() == base 545 && m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber == identifierNumber) 546 return node; 547 break; 548 549 case PutByValDirect: 550 case PutByVal: 551 case PutByValAlias: 552 if (m_graph.byValIsPure(node)) { 553 // If PutByVal speculates that it's accessing an array with an 554 // integer index, then it's impossible for it to cause a structure 555 // change. 556 break; 557 } 558 return 0; 559 560 default: 561 if (m_graph.clobbersWorld(node)) 562 return 0; 563 break; 564 } 565 } 566 return 0; 567 } 568 569 Node* getPropertyStorageLoadElimination(Node* child1) 570 { 571 for (unsigned i = m_indexInBlock; i--;) { 572 Node* node = m_currentBlock->at(i); 573 if (node == child1) 574 break; 575 576 switch (node->op()) { 577 case GetButterfly: 578 if (node->child1() == child1) 579 return node; 580 break; 581 582 case AllocatePropertyStorage: 583 case ReallocatePropertyStorage: 584 // If we can cheaply prove this is a change to our object's storage, we 585 // can optimize and use its result. 586 if (node->child1() == child1) 587 return node; 588 // Otherwise, we currently can't prove that this doesn't change our object's 589 // storage, so we conservatively assume that it may change the storage 590 // pointer of any object, including ours. 591 return 0; 592 593 case PutByValDirect: 594 case PutByVal: 595 case PutByValAlias: 596 if (m_graph.byValIsPure(node)) { 597 // If PutByVal speculates that it's accessing an array with an 598 // integer index, then it's impossible for it to cause a structure 599 // change. 600 break; 601 } 602 return 0; 603 604 case Arrayify: 605 case ArrayifyToStructure: 606 // We could check if the arrayification could affect our butterfly. 607 // But that seems like it would take Effort. 608 return 0; 609 610 case MultiPutByOffset: 611 if (node->multiPutByOffsetData().reallocatesStorage()) 612 return 0; 613 break; 614 615 default: 616 if (m_graph.clobbersWorld(node)) 617 return 0; 618 break; 619 } 620 } 621 return 0; 622 } 623 624 bool checkArrayElimination(Node* child1, ArrayMode arrayMode) 625 { 626 for (unsigned i = m_indexInBlock; i--;) { 627 Node* node = m_currentBlock->at(i); 628 if (node == child1) 629 break; 630 631 switch (node->op()) { 632 case CheckArray: 633 if (node->child1() == child1 && node->arrayMode() == arrayMode) 634 return true; 635 break; 636 637 case Arrayify: 638 case ArrayifyToStructure: 639 // We could check if the arrayification could affect our array. 640 // But that seems like it would take Effort. 641 return false; 642 643 default: 644 if (m_graph.clobbersWorld(node)) 645 return false; 646 break; 647 } 648 } 649 return false; 650 } 651 652 Node* getIndexedPropertyStorageLoadElimination(Node* child1, ArrayMode arrayMode) 653 { 654 for (unsigned i = m_indexInBlock; i--;) { 655 Node* node = m_currentBlock->at(i); 656 if (node == child1) 657 break; 658 659 switch (node->op()) { 660 case GetIndexedPropertyStorage: { 661 if (node->child1() == child1 && node->arrayMode() == arrayMode) 662 return node; 663 break; 664 } 665 666 default: 667 if (m_graph.clobbersWorld(node)) 668 return 0; 669 break; 670 } 671 } 672 return 0; 673 } 674 675 Node* getInternalFieldLoadElimination(NodeType op, Node* child1) 676 { 677 for (unsigned i = m_indexInBlock; i--;) { 678 Node* node = m_currentBlock->at(i); 679 if (node == child1) 680 break; 681 682 if (node->op() == op && node->child1() == child1) 683 return node; 684 685 if (m_graph.clobbersWorld(node)) 686 return 0; 687 } 688 return 0; 689 } 690 691 Node* getMyScopeLoadElimination() 692 { 693 for (unsigned i = m_indexInBlock; i--;) { 694 Node* node = m_currentBlock->at(i); 695 switch (node->op()) { 696 case CreateActivation: 697 // This may cause us to return a different scope. 698 return 0; 699 case GetMyScope: 700 return node; 701 default: 702 break; 703 } 704 } 705 return 0; 706 } 707 708 Node* getLocalLoadElimination(VirtualRegister local, Node*& relevantLocalOp, bool careAboutClobbering) 709 { 710 relevantLocalOp = 0; 711 712 for (unsigned i = m_indexInBlock; i--;) { 713 Node* node = m_currentBlock->at(i); 714 switch (node->op()) { 715 case GetLocal: 716 if (node->local() == local) { 717 relevantLocalOp = node; 718 return node; 719 } 720 break; 721 722 case GetLocalUnlinked: 723 if (node->unlinkedLocal() == local) { 724 relevantLocalOp = node; 725 return node; 726 } 727 break; 728 729 case SetLocal: 730 if (node->local() == local) { 731 relevantLocalOp = node; 732 return node->child1().node(); 733 } 734 break; 735 736 case GetClosureVar: 737 case PutClosureVar: 738 if (static_cast<VirtualRegister>(node->varNumber()) == local) 739 return 0; 740 break; 741 742 default: 743 if (careAboutClobbering && m_graph.clobbersWorld(node)) 744 return 0; 745 break; 746 } 747 } 748 return 0; 749 } 750 751 Node* uncapturedSetLocalStoreElimination(VirtualRegister local) 752 { 753 for (unsigned i = m_indexInBlock; i--;) { 754 Node* node = m_currentBlock->at(i); 755 switch (node->op()) { 756 case GetLocal: 757 case Flush: 758 if (node->local() == local) 759 return nullptr; 760 break; 761 762 case GetLocalUnlinked: 763 if (node->unlinkedLocal() == local) 764 return nullptr; 765 break; 766 767 case SetLocal: { 768 if (node->local() != local) 769 break; 770 return node; 771 } 772 773 case GetClosureVar: 774 case PutClosureVar: 775 if (static_cast<VirtualRegister>(node->varNumber()) == local) 776 return nullptr; 777 break; 778 779 case GetMyScope: 780 case SkipTopScope: 781 if (node->origin.semantic.inlineCallFrame) 782 break; 783 if (m_graph.uncheckedActivationRegister() == local) 784 return nullptr; 785 break; 786 787 case CheckArgumentsNotCreated: 788 case GetMyArgumentsLength: 789 case GetMyArgumentsLengthSafe: 790 if (m_graph.uncheckedArgumentsRegisterFor(node->origin.semantic) == local) 791 return nullptr; 792 break; 793 794 case GetMyArgumentByVal: 795 case GetMyArgumentByValSafe: 796 return nullptr; 797 798 case GetByVal: 799 // If this is accessing arguments then it's potentially accessing locals. 800 if (node->arrayMode().type() == Array::Arguments) 801 return nullptr; 802 break; 803 804 case CreateArguments: 805 case TearOffActivation: 806 case TearOffArguments: 807 // If an activation is being torn off then it means that captured variables 808 // are live. We could be clever here and check if the local qualifies as an 809 // argument register. But that seems like it would buy us very little since 810 // any kind of tear offs are rare to begin with. 811 return nullptr; 812 813 default: 814 break; 815 } 816 if (m_graph.clobbersWorld(node)) 817 return nullptr; 818 } 819 return nullptr; 820 } 821 822 Node* capturedSetLocalStoreElimination(VirtualRegister local) 823 { 824 for (unsigned i = m_indexInBlock; i--;) { 825 Node* node = m_currentBlock->at(i); 826 switch (node->op()) { 827 case GetLocal: 828 case Flush: 829 if (node->local() == local) 830 return nullptr; 831 break; 832 833 case GetLocalUnlinked: 834 if (node->unlinkedLocal() == local) 835 return nullptr; 836 break; 837 838 case SetLocal: { 839 if (node->local() != local) 840 break; 841 return node; 842 } 843 844 case Phantom: 845 case Check: 846 case HardPhantom: 847 case MovHint: 848 case JSConstant: 849 case DoubleConstant: 850 case Int52Constant: 851 break; 852 853 default: 854 return nullptr; 855 } 856 } 857 return nullptr; 858 } 859 860 Node* setLocalStoreElimination(VariableAccessData* variableAccessData) 861 { 862 if (variableAccessData->isCaptured()) 863 return capturedSetLocalStoreElimination(variableAccessData->local()); 864 return uncapturedSetLocalStoreElimination(variableAccessData->local()); 865 } 866 867 bool invalidationPointElimination() 868 { 869 for (unsigned i = m_indexInBlock; i--;) { 870 Node* node = m_currentBlock->at(i); 871 if (node->op() == InvalidationPoint) 872 return true; 873 if (writesOverlap(m_graph, node, Watchpoint_fire)) 874 break; 875 } 876 return false; 877 } 878 879 void eliminateIrrelevantPhantomChildren(Node* node) 880 { 881 ASSERT(node->op() == Phantom); 882 for (unsigned i = 0; i < AdjacencyList::Size; ++i) { 883 Edge edge = node->children.child(i); 884 if (!edge) 885 continue; 886 if (edge.useKind() != UntypedUse) 887 continue; // Keep the type check. 888 if (edge->flags() & NodeRelevantToOSR) 889 continue; 890 891 node->children.removeEdge(i--); 892 m_changed = true; 893 } 894 } 895 896 bool setReplacement(Node* replacement) 897 { 898 if (!replacement) 899 return false; 900 901 if (!ASSERT_DISABLED 902 && canonicalResultRepresentation(m_currentNode->result()) != canonicalResultRepresentation(replacement->result())) { 903 startCrashing(); 904 dataLog("CSE attempting to replace a node with a mismatched result: ", m_currentNode, " with ", replacement, "\n"); 905 dataLog("\n"); 906 m_graph.dump(); 907 RELEASE_ASSERT_NOT_REACHED(); 908 } 909 910 m_currentNode->convertToPhantom(); 911 eliminateIrrelevantPhantomChildren(m_currentNode); 912 913 // At this point we will eliminate all references to this node. 914 m_currentNode->misc.replacement = replacement; 915 602 } 603 } 604 605 if (!match) 606 return nullptr; 607 608 // Cache the results for next time. We cache them both for this block and for all of our 609 // predecessors, since even though we've already visited our predecessors, our predecessors 610 // probably have successors other than us. 611 // FIXME: Consider caching failed searches as well, when match is null. It's not clear that 612 // the reduction in compile time would warrant the increase in complexity, though. 613 // https://bugs.webkit.org/show_bug.cgi?id=134876 614 for (BasicBlock* block : seenList) 615 m_impureDataMap[block->index].availableAtTail.add(location, match); 616 m_impureData->availableAtTail.add(location, match); 617 618 return match; 619 } 620 621 void def(HeapLocation location, Node* value) 622 { 623 if (verbose) 624 dataLog(" Got heap location def: ", location, " -> ", value, "\n"); 625 626 Node* match = findReplacement(location); 627 628 if (verbose) 629 dataLog(" Got match: ", match, "\n"); 630 631 if (!match) { 632 if (verbose) 633 dataLog(" Adding at-tail mapping: ", location, " -> ", value, "\n"); 634 auto result = m_impureData->availableAtTail.add(location, value); 635 ASSERT_UNUSED(result, result.isNewEntry); 636 return; 637 } 638 639 m_node->replaceWith(match); 916 640 m_changed = true; 917 918 return true; 919 } 920 921 void eliminate() 922 { 923 ASSERT(m_currentNode->mustGenerate()); 924 m_currentNode->convertToPhantom(); 925 eliminateIrrelevantPhantomChildren(m_currentNode); 926 927 m_changed = true; 928 } 929 930 void eliminate(Node* node, NodeType phantomType = Phantom) 931 { 932 if (!node) 933 return; 934 ASSERT(node->mustGenerate()); 935 node->setOpAndDefaultFlags(phantomType); 936 if (phantomType == Phantom) 937 eliminateIrrelevantPhantomChildren(node); 938 939 m_changed = true; 940 } 941 942 void performNodeCSE(Node* node) 943 { 944 m_graph.performSubstitution(node); 945 946 switch (node->op()) { 947 948 case Identity: 949 setReplacement(node->child1().node()); 950 break; 951 952 // Handle the pure nodes. These nodes never have any side-effects. 953 case BitAnd: 954 case BitOr: 955 case BitXor: 956 case BitRShift: 957 case BitLShift: 958 case BitURShift: 959 case ArithAbs: 960 case ArithMin: 961 case ArithMax: 962 case ArithSqrt: 963 case ArithFRound: 964 case ArithSin: 965 case ArithCos: 966 case StringCharAt: 967 case StringCharCodeAt: 968 case IsUndefined: 969 case IsBoolean: 970 case IsNumber: 971 case IsString: 972 case IsObject: 973 case IsFunction: 974 case LogicalNot: 975 case SkipTopScope: 976 case SkipScope: 977 case GetClosureRegisters: 978 case GetScope: 979 case TypeOf: 980 case CompareEqConstant: 981 case ValueToInt32: 982 case MakeRope: 983 case DoubleRep: 984 case ValueRep: 985 case Int52Rep: 986 case BooleanToNumber: 987 setReplacement(pureCSE(node)); 988 break; 989 990 case ArithAdd: 991 case ArithSub: 992 case ArithNegate: 993 case ArithMul: 994 case ArithDiv: 995 case ArithMod: 996 case UInt32ToNumber: 997 case DoubleAsInt32: { 998 Node* candidate = pureCSE(node); 999 if (!candidate) 1000 break; 1001 if (!subsumes(candidate->arithMode(), node->arithMode())) { 1002 if (!subsumes(node->arithMode(), candidate->arithMode())) 1003 break; 1004 candidate->setArithMode(node->arithMode()); 1005 } 1006 setReplacement(candidate); 1007 break; 1008 } 1009 1010 case GetCallee: 1011 setReplacement(getCalleeLoadElimination()); 1012 break; 1013 1014 case GetLocal: { 1015 VariableAccessData* variableAccessData = node->variableAccessData(); 1016 if (!variableAccessData->isCaptured()) 1017 break; 1018 Node* relevantLocalOp; 1019 Node* possibleReplacement = getLocalLoadElimination(variableAccessData->local(), relevantLocalOp, variableAccessData->isCaptured()); 1020 if (!relevantLocalOp) 1021 break; 1022 if (relevantLocalOp->op() != GetLocalUnlinked 1023 && relevantLocalOp->variableAccessData() != variableAccessData) 1024 break; 1025 Node* phi = node->child1().node(); 1026 if (!setReplacement(possibleReplacement)) 1027 break; 1028 1029 m_graph.dethread(); 1030 1031 // If we replace a GetLocal with a GetLocalUnlinked, then turn the GetLocalUnlinked 1032 // into a GetLocal. 1033 if (relevantLocalOp->op() == GetLocalUnlinked) 1034 relevantLocalOp->convertToGetLocal(variableAccessData, phi); 1035 1036 m_changed = true; 1037 break; 1038 } 1039 1040 case GetLocalUnlinked: { 1041 Node* relevantLocalOpIgnored; 1042 setReplacement(getLocalLoadElimination(node->unlinkedLocal(), relevantLocalOpIgnored, true)); 1043 break; 1044 } 1045 1046 case JSConstant: 1047 case DoubleConstant: 1048 case Int52Constant: 1049 // This is strange, but necessary. Some phases will convert nodes to constants, 1050 // which may result in duplicated constants. We use CSE to clean this up. 1051 setReplacement(constantCSE(node)); 1052 break; 1053 1054 case ConstantStoragePointer: 1055 setReplacement(constantStoragePointerCSE(node)); 1056 break; 1057 1058 case GetArrayLength: 1059 setReplacement(getArrayLengthElimination(node->child1().node())); 1060 break; 1061 1062 case GetMyScope: 1063 setReplacement(getMyScopeLoadElimination()); 1064 break; 1065 1066 // Handle nodes that are conditionally pure: these are pure, and can 1067 // be CSE'd, so long as the prediction is the one we want. 1068 case CompareLess: 1069 case CompareLessEq: 1070 case CompareGreater: 1071 case CompareGreaterEq: 1072 case CompareEq: { 1073 if (m_graph.isPredictedNumerical(node)) { 1074 Node* replacement = pureCSE(node); 1075 if (replacement && m_graph.isPredictedNumerical(replacement)) 1076 setReplacement(replacement); 1077 } 1078 break; 1079 } 1080 1081 // Finally handle heap accesses. These are not quite pure, but we can still 1082 // optimize them provided that some subtle conditions are met. 1083 case GetGlobalVar: 1084 setReplacement(globalVarLoadElimination(node->registerPointer())); 1085 break; 1086 1087 case GetClosureVar: { 1088 setReplacement(scopedVarLoadElimination(node->child1().node(), node->varNumber())); 1089 break; 1090 } 1091 1092 case VarInjectionWatchpoint: 1093 if (varInjectionWatchpointElimination()) 1094 eliminate(); 1095 break; 1096 1097 case GetByVal: 1098 if (m_graph.byValIsPure(node)) 1099 setReplacement(getByValLoadElimination(node->child1().node(), node->child2().node(), node->arrayMode())); 1100 break; 1101 1102 case PutByValDirect: 1103 case PutByVal: { 1104 Edge child1 = m_graph.varArgChild(node, 0); 1105 Edge child2 = m_graph.varArgChild(node, 1); 1106 if (node->arrayMode().canCSEStorage()) { 1107 Node* replacement = getByValLoadElimination(child1.node(), child2.node(), node->arrayMode()); 1108 if (!replacement) 1109 break; 1110 node->setOp(PutByValAlias); 1111 } 1112 break; 1113 } 1114 1115 case CheckStructure: 1116 if (checkStructureElimination(node->structureSet(), node->child1().node())) 1117 eliminate(); 1118 break; 1119 1120 case CheckFunction: 1121 if (checkFunctionElimination(node->function(), node->child1().node())) 1122 eliminate(); 1123 break; 1124 1125 case CheckExecutable: 1126 if (checkExecutableElimination(node->executable(), node->child1().node())) 1127 eliminate(); 1128 break; 1129 1130 case CheckArray: 1131 if (checkArrayElimination(node->child1().node(), node->arrayMode())) 1132 eliminate(); 1133 break; 1134 1135 case GetIndexedPropertyStorage: { 1136 setReplacement(getIndexedPropertyStorageLoadElimination(node->child1().node(), node->arrayMode())); 1137 break; 1138 } 1139 1140 case GetTypedArrayByteOffset: 1141 case GetGetter: 1142 case GetSetter: { 1143 setReplacement(getInternalFieldLoadElimination(node->op(), node->child1().node())); 1144 break; 1145 } 1146 1147 case GetButterfly: 1148 setReplacement(getPropertyStorageLoadElimination(node->child1().node())); 1149 break; 1150 1151 case GetByOffset: 1152 setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child2().node())); 1153 break; 1154 1155 case GetGetterSetterByOffset: 1156 setReplacement(getGetterSetterByOffsetLoadElimination(m_graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber, node->child2().node())); 1157 break; 1158 1159 case MultiGetByOffset: 1160 setReplacement(getByOffsetLoadElimination(node->multiGetByOffsetData().identifierNumber, node->child1().node())); 1161 break; 1162 1163 case InvalidationPoint: 1164 if (invalidationPointElimination()) 1165 eliminate(); 1166 break; 1167 1168 case Phantom: 1169 // FIXME: we ought to remove Phantom's that have no children. 1170 // NB. It would be incorrect to do this for HardPhantom. In fact, the whole point 1171 // of HardPhantom is that we *don't* do this for HardPhantoms, since they signify 1172 // a more strict kind of liveness than the Phantom bytecode liveness. 1173 eliminateIrrelevantPhantomChildren(node); 1174 break; 1175 1176 case Flush: 1177 // This is needed for arguments simplification to work. We need to eliminate the 1178 // redundancy between op_enter's undefined-all-the-things and the subsequent 1179 // op_init_lazy_reg. 1180 1181 ASSERT(m_graph.m_form != SSA); 1182 1183 if (Node* setLocal = setLocalStoreElimination(node->variableAccessData())) { 1184 node->convertToPhantom(); 1185 Node* dataNode = setLocal->child1().node(); 1186 ASSERT(dataNode->hasResult()); 1187 node->child1() = dataNode->defaultEdge(); 1188 m_graph.dethread(); 1189 m_changed = true; 1190 } 1191 break; 1192 1193 default: 1194 // do nothing. 1195 break; 1196 } 1197 1198 m_lastSeen[node->op()] = m_indexInBlock; 1199 } 1200 1201 void performBlockCSE(BasicBlock* block) 1202 { 1203 if (!block) 1204 return; 1205 if (!block->isReachable) 1206 return; 1207 1208 m_currentBlock = block; 1209 for (unsigned i = 0; i < LastNodeType; ++i) 1210 m_lastSeen[i] = UINT_MAX; 1211 1212 for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) { 1213 m_currentNode = block->at(m_indexInBlock); 1214 performNodeCSE(m_currentNode); 1215 } 1216 } 1217 1218 BasicBlock* m_currentBlock; 1219 Node* m_currentNode; 1220 unsigned m_indexInBlock; 1221 std::array<unsigned, LastNodeType> m_lastSeen; 1222 bool m_changed; // Only tracks changes that have a substantive effect on other optimizations. 641 } 642 643 struct ImpureBlockData { 644 ImpureBlockData() 645 : didVisit(false) 646 { 647 } 648 649 ClobberSet writes; 650 ImpureMap availableAtTail; 651 bool didVisit; 652 }; 653 654 Vector<BasicBlock*> m_preOrder; 655 656 PureMultiMap m_pureValues; 657 Vector<ImpureBlockData> m_impureDataMap; 658 659 BasicBlock* m_block; 660 Node* m_node; 661 ImpureBlockData* m_impureData; 662 ClobberSet m_writesSoFar; 663 664 bool m_changed; 1223 665 }; 1224 666 1225 bool performCSE(Graph& graph) 667 } // anonymous namespace 668 669 bool performLocalCSE(Graph& graph) 1226 670 { 1227 SamplingRegion samplingRegion("DFG CSE Phase");1228 return runPhase< CSEPhase>(graph);671 SamplingRegion samplingRegion("DFG LocalCSE Phase"); 672 return runPhase<LocalCSEPhase>(graph); 1229 673 } 1230 674 675 bool performGlobalCSE(Graph& graph) 676 { 677 SamplingRegion samplingRegion("DFG GlobalCSE Phase"); 678 return runPhase<GlobalCSEPhase>(graph); 679 } 680 1231 681 } } // namespace JSC::DFG 1232 682 1233 683 #endif // ENABLE(DFG_JIT) 1234 1235 -
trunk/Source/JavaScriptCore/dfg/DFGCSEPhase.h
r171613 r172129 1 1 /* 2 * Copyright (C) 2011 Apple Inc. All rights reserved.2 * Copyright (C) 2011, 2014 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 35 35 class Graph; 36 36 37 // Block-local common subexpression elimination. This is an optional phase, but 38 // it is rather profitable. It has fairly accurate heap modeling and will match 39 // a wide range of subexpression similarities. It's known to produce big wins 40 // on a few benchmarks, and is relatively cheap to run. 41 bool performCSE(Graph&); 37 // Block-local common subexpression elimination. It uses clobberize() for heap 38 // modeling, which is quite precise. This phase is known to produce big wins on 39 // a few benchmarks, and is relatively cheap to run. 40 // 41 // Note that this phase also gets rid of Identity nodes, which means that it's 42 // currently not an optional phase. Basically, DFG IR doesn't have use-lists, 43 // so there is no instantaneous replaceAllUsesWith operation. Instead, you turn 44 // a node into an Identity and wait for CSE to clean it up. 45 bool performLocalCSE(Graph&); 46 47 // Same, but global. Only works for SSA. This will find common subexpressions 48 // both in the same block and in any block that dominates the current block. It 49 // has no limits on how far it will look for load-elimination opportunities. 50 bool performGlobalCSE(Graph&); 42 51 43 52 } } // namespace JSC::DFG -
trunk/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
r168178 r172129 46 46 } 47 47 48 bool isSupportedForInlining(CodeBlock* codeBlock) 49 { 50 return !codeBlock->ownerExecutable()->needsActivation() 51 && codeBlock->ownerExecutable()->isInliningCandidate(); 52 } 53 48 54 bool mightCompileEval(CodeBlock* codeBlock) 49 55 { … … 70 76 { 71 77 return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount() 72 && !codeBlock->ownerExecutable()->needsActivation() 73 && codeBlock->ownerExecutable()->isInliningCandidate(); 78 && isSupportedForInlining(codeBlock); 74 79 } 75 80 bool mightInlineFunctionForClosureCall(CodeBlock* codeBlock) 76 81 { 77 82 return codeBlock->instructionCount() <= Options::maximumFunctionForClosureCallInlineCandidateInstructionCount() 78 && !codeBlock->ownerExecutable()->needsActivation() 79 && codeBlock->ownerExecutable()->isInliningCandidate(); 83 && isSupportedForInlining(codeBlock); 80 84 } 81 85 bool mightInlineFunctionForConstruct(CodeBlock* codeBlock) 82 86 { 83 87 return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount() 84 && !codeBlock->ownerExecutable()->needsActivation() 85 && codeBlock->ownerExecutable()->isInliningCandidate(); 88 && isSupportedForInlining(codeBlock); 86 89 } 87 90 -
trunk/Source/JavaScriptCore/dfg/DFGCapabilities.h
r170011 r172129 40 40 // check opcodes. 41 41 bool isSupported(CodeBlock*); 42 bool isSupportedForInlining(CodeBlock*); 42 43 bool mightCompileEval(CodeBlock*); 43 44 bool mightCompileProgram(CodeBlock*); -
trunk/Source/JavaScriptCore/dfg/DFGClobberSet.cpp
r164229 r172129 1 1 /* 2 * Copyright (C) 2013 Apple Inc. All rights reserved.2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 123 123 { 124 124 ClobberSetAdd addRead(readSet); 125 NoOpClobberize addWrite;126 clobberize(graph, node, addRead, addWrite);125 NoOpClobberize noOp; 126 clobberize(graph, node, addRead, noOp, noOp); 127 127 } 128 128 129 129 void addWrites(Graph& graph, Node* node, ClobberSet& writeSet) 130 130 { 131 NoOpClobberize addRead;131 NoOpClobberize noOp; 132 132 ClobberSetAdd addWrite(writeSet); 133 clobberize(graph, node, addRead, addWrite);133 clobberize(graph, node, noOp, addWrite, noOp); 134 134 } 135 135 … … 138 138 ClobberSetAdd addRead(readSet); 139 139 ClobberSetAdd addWrite(writeSet); 140 clobberize(graph, node, addRead, addWrite); 140 NoOpClobberize noOp; 141 clobberize(graph, node, addRead, addWrite, noOp); 141 142 } 142 143 … … 144 145 { 145 146 ClobberSetOverlaps addRead(readSet); 146 NoOpClobberize addWrite;147 clobberize(graph, node, addRead, addWrite);147 NoOpClobberize noOp; 148 clobberize(graph, node, addRead, noOp, noOp); 148 149 return addRead.result(); 149 150 } … … 151 152 bool writesOverlap(Graph& graph, Node* node, ClobberSet& writeSet) 152 153 { 153 NoOpClobberize addRead;154 NoOpClobberize noOp; 154 155 ClobberSetOverlaps addWrite(writeSet); 155 clobberize(graph, node, addRead, addWrite);156 clobberize(graph, node, noOp, addWrite, noOp); 156 157 return addWrite.result(); 157 158 } -
trunk/Source/JavaScriptCore/dfg/DFGClobberize.cpp
r164229 r172129 1 1 /* 2 * Copyright (C) 2013 Apple Inc. All rights reserved.2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 35 35 bool doesWrites(Graph& graph, Node* node) 36 36 { 37 NoOpClobberize addRead;37 NoOpClobberize noOp; 38 38 CheckClobberize addWrite; 39 clobberize(graph, node, addRead, addWrite);39 clobberize(graph, node, noOp, addWrite, noOp); 40 40 return addWrite.result(); 41 } 42 43 bool accessesOverlap(Graph& graph, Node* node, AbstractHeap heap) 44 { 45 NoOpClobberize noOp; 46 AbstractHeapOverlaps addAccess(heap); 47 clobberize(graph, node, addAccess, addAccess, noOp); 48 return addAccess.result(); 41 49 } 42 50 43 51 bool writesOverlap(Graph& graph, Node* node, AbstractHeap heap) 44 52 { 45 NoOpClobberize addRead;53 NoOpClobberize noOp; 46 54 AbstractHeapOverlaps addWrite(heap); 47 clobberize(graph, node, addRead, addWrite);55 clobberize(graph, node, noOp, addWrite, noOp); 48 56 return addWrite.result(); 49 57 } -
trunk/Source/JavaScriptCore/dfg/DFGClobberize.h
r171660 r172129 32 32 #include "DFGEdgeUsesStructure.h" 33 33 #include "DFGGraph.h" 34 #include "DFGHeapLocation.h" 35 #include "DFGPureValue.h" 34 36 35 37 namespace JSC { namespace DFG { 36 38 37 template<typename ReadFunctor, typename WriteFunctor >38 void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write )39 template<typename ReadFunctor, typename WriteFunctor, typename DefFunctor> 40 void clobberize(Graph& graph, Node* node, ReadFunctor& read, WriteFunctor& write, DefFunctor& def) 39 41 { 40 42 // Some notes: … … 70 72 // small hacking. 71 73 74 // While read() and write() are fairly self-explanatory - they track what sorts of things the 75 // node may read or write - the def() functor is more tricky. It tells you the heap locations 76 // (not just abstract heaps) that are defined by a node. A heap location comprises an abstract 77 // heap, some nodes, and a LocationKind. Briefly, a location defined by a node is a location 78 // whose value can be deduced from looking at the node itself. The locations returned must obey 79 // the following properties: 80 // 81 // - If someone wants to CSE a load from the heap, then a HeapLocation object should be 82 // sufficient to find a single matching node. 83 // 84 // - The abstract heap is the only abstract heap that could be clobbered to invalidate any such 85 // CSE attempt. I.e. if clobberize() reports that on every path between some node and a node 86 // that defines a HeapLocation that it wanted, there were no writes to any abstract heap that 87 // overlap the location's heap, then we have a sound match. Effectively, the semantics of 88 // write() and def() are intertwined such that for them to be sound they must agree on what 89 // is CSEable. 90 // 91 // read(), write(), and def() for heap locations is enough to do GCSE on effectful things. To 92 // keep things simple, this code will also def() pure things. def() must be overloaded to also 93 // accept PureValue. This way, a client of clobberize() can implement GCSE entirely using the 94 // information that clobberize() passes to write() and def(). Other clients of clobberize() can 95 // just ignore def() by using a NoOpClobberize functor. 96 72 97 if (edgesUseStructure(graph, node)) 73 98 read(JSCell_structureID); … … 77 102 case DoubleConstant: 78 103 case Int52Constant: 104 def(PureValue(node, node->constant())); 105 return; 106 79 107 case Identity: 80 108 case Phantom: 81 109 case HardPhantom: 110 case Check: 111 case ExtractOSREntryLocal: 112 return; 113 82 114 case BitAnd: 83 115 case BitOr: … … 86 118 case BitRShift: 87 119 case BitURShift: 88 case ValueToInt32:89 case ArithAdd:90 case ArithSub:91 case ArithNegate:92 case ArithMul:93 120 case ArithIMul: 94 case ArithDiv:95 case ArithMod:96 121 case ArithAbs: 97 122 case ArithMin: … … 103 128 case GetScope: 104 129 case SkipScope: 105 case CheckFunction:106 130 case StringCharCodeAt: 107 131 case StringFromCharCode: … … 113 137 case IsString: 114 138 case LogicalNot: 115 case ExtractOSREntryLocal:116 139 case CheckInBounds: 117 case ConstantStoragePointer:118 case UInt32ToNumber:119 case DoubleAsInt32:120 case Check:121 140 case DoubleRep: 122 141 case ValueRep: … … 125 144 case FiatInt52: 126 145 case MakeRope: 127 return; 128 146 case ValueToInt32: 147 def(PureValue(node)); 148 return; 149 150 case ArithAdd: 151 case ArithSub: 152 case ArithNegate: 153 case ArithMul: 154 case ArithDiv: 155 case ArithMod: 156 case DoubleAsInt32: 157 case UInt32ToNumber: 158 def(PureValue(node, node->arithMode())); 159 return; 160 161 case CheckFunction: 162 def(PureValue(CheckFunction, AdjacencyList(AdjacencyList::Fixed, node->child1()), node->function())); 163 return; 164 165 case CheckExecutable: 166 def(PureValue(node, node->executable())); 167 return; 168 169 case ConstantStoragePointer: 170 def(PureValue(node, node->storagePointer())); 171 return; 172 129 173 case MovHint: 130 174 case ZombieHint: 131 175 case Upsilon: 132 176 case Phi: 133 case Flush:134 177 case PhantomLocal: 135 178 case SetArgument: … … 146 189 case CheckTierUpAndOSREnter: 147 190 case LoopHint: 148 case InvalidationPoint:149 191 case Breakpoint: 150 192 case ProfileWillCall: … … 155 197 return; 156 198 199 case InvalidationPoint: 200 write(SideState); 201 def(HeapLocation(InvalidationPointLoc, Watchpoint_fire), node); 202 return; 203 204 case Flush: 205 read(AbstractHeap(Variables, node->local())); 206 write(SideState); 207 return; 208 157 209 case VariableWatchpoint: 158 210 case TypedArrayWatchpoint: … … 167 219 168 220 case CreateActivation: 169 case CreateArguments:170 221 read(HeapObjectCount); 171 222 write(HeapObjectCount); … … 174 225 return; 175 226 227 case CreateArguments: 228 read(Variables); 229 read(HeapObjectCount); 230 write(HeapObjectCount); 231 write(SideState); 232 write(Watchpoint_fire); 233 return; 234 176 235 case FunctionReentryWatchpoint: 177 236 read(Watchpoint_fire); … … 186 245 187 246 case VarInjectionWatchpoint: 247 read(MiscFields); 248 def(HeapLocation(VarInjectionWatchpointLoc, MiscFields), node); 249 return; 250 188 251 case AllocationProfileWatchpoint: 252 read(MiscFields); 253 def(HeapLocation(AllocationProfileWatchpointLoc, MiscFields), node); 254 return; 255 189 256 case IsObject: 257 read(MiscFields); 258 def(HeapLocation(IsObjectLoc, MiscFields, node->child1()), node); 259 return; 260 190 261 case IsFunction: 262 read(MiscFields); 263 def(HeapLocation(IsFunctionLoc, MiscFields, node->child1()), node); 264 return; 265 191 266 case TypeOf: 192 267 read(MiscFields); 193 return; 194 268 def(HeapLocation(TypeOfLoc, MiscFields, node->child1()), node); 269 return; 270 195 271 case GetById: 196 272 case GetByIdFlush: … … 215 291 case GetGetter: 216 292 read(GetterSetter_getter); 293 def(HeapLocation(GetterLoc, GetterSetter_getter, node->child1()), node); 217 294 return; 218 295 219 296 case GetSetter: 220 297 read(GetterSetter_setter); 298 def(HeapLocation(SetterLoc, GetterSetter_setter, node->child1()), node); 221 299 return; 222 300 223 301 case GetCallee: 224 302 read(AbstractHeap(Variables, JSStack::Callee)); 303 def(HeapLocation(VariableLoc, AbstractHeap(Variables, JSStack::Callee)), node); 225 304 return; 226 305 … … 228 307 case GetArgument: 229 308 read(AbstractHeap(Variables, node->local())); 309 def(HeapLocation(VariableLoc, AbstractHeap(Variables, node->local())), node); 230 310 return; 231 311 232 312 case SetLocal: 233 313 write(AbstractHeap(Variables, node->local())); 314 def(HeapLocation(VariableLoc, AbstractHeap(Variables, node->local())), node->child1().node()); 234 315 return; 235 316 236 317 case GetLocalUnlinked: 237 318 read(AbstractHeap(Variables, node->unlinkedLocal())); 319 def(HeapLocation(VariableLoc, AbstractHeap(Variables, node->unlinkedLocal())), node); 238 320 return; 239 321 … … 265 347 } 266 348 // This appears to read nothing because it's only reading immutable data. 349 def(PureValue(node, mode.asWord())); 267 350 return; 268 351 … … 270 353 read(Arguments_registers); 271 354 read(Variables); 355 def(HeapLocation(IndexedPropertyLoc, Variables, node->child1(), node->child2()), node); 272 356 return; 273 357 … … 275 359 if (mode.isInBounds()) { 276 360 read(Butterfly_publicLength); 277 read(Butterfly_vectorLength);278 361 read(IndexedInt32Properties); 362 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, node->child1(), node->child2()), node); 279 363 return; 280 364 } … … 286 370 if (mode.isInBounds()) { 287 371 read(Butterfly_publicLength); 288 read(Butterfly_vectorLength);289 372 read(IndexedDoubleProperties); 373 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, node->child1(), node->child2()), node); 290 374 return; 291 375 } … … 297 381 if (mode.isInBounds()) { 298 382 read(Butterfly_publicLength); 299 read(Butterfly_vectorLength);300 383 read(IndexedContiguousProperties); 384 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, node->child1(), node->child2()), node); 301 385 return; 302 386 } … … 307 391 case Array::ArrayStorage: 308 392 case Array::SlowPutArrayStorage: 309 // Give up on life for now. 393 if (mode.isInBounds()) { 394 read(Butterfly_vectorLength); 395 read(IndexedArrayStorageProperties); 396 return; 397 } 310 398 read(World); 311 399 write(World); … … 322 410 case Array::Float64Array: 323 411 read(TypedArrayProperties); 324 read( JSArrayBufferView_vector);325 read(JSArrayBufferView_length);412 read(MiscFields); 413 def(HeapLocation(IndexedPropertyLoc, TypedArrayProperties, node->child1(), node->child2()), node); 326 414 return; 327 415 } … … 334 422 case PutByValAlias: { 335 423 ArrayMode mode = node->arrayMode(); 424 Node* base = graph.varArgChild(node, 0).node(); 425 Node* index = graph.varArgChild(node, 1).node(); 426 Node* value = graph.varArgChild(node, 2).node(); 336 427 switch (mode.modeForPut().type()) { 337 428 case Array::SelectUsingPredictions: … … 355 446 case Array::Arguments: 356 447 read(Arguments_registers); 357 read(Arguments_numArguments); 358 read(Arguments_slowArguments); 448 read(MiscFields); 359 449 write(Variables); 450 def(HeapLocation(IndexedPropertyLoc, Variables, base, index), value); 360 451 return; 361 452 … … 370 461 read(IndexedInt32Properties); 371 462 write(IndexedInt32Properties); 463 if (node->arrayMode().mayStoreToHole()) 464 write(Butterfly_publicLength); 465 def(HeapLocation(IndexedPropertyLoc, IndexedInt32Properties, base, index), value); 372 466 return; 373 467 … … 382 476 read(IndexedDoubleProperties); 383 477 write(IndexedDoubleProperties); 478 if (node->arrayMode().mayStoreToHole()) 479 write(Butterfly_publicLength); 480 def(HeapLocation(IndexedPropertyLoc, IndexedDoubleProperties, base, index), value); 384 481 return; 385 482 … … 394 491 read(IndexedContiguousProperties); 395 492 write(IndexedContiguousProperties); 493 if (node->arrayMode().mayStoreToHole()) 494 write(Butterfly_publicLength); 495 def(HeapLocation(IndexedPropertyLoc, IndexedContiguousProperties, base, index), value); 396 496 return; 397 497 … … 412 512 case Array::Float32Array: 413 513 case Array::Float64Array: 414 read(JSArrayBufferView_vector); 415 read(JSArrayBufferView_length); 514 read(MiscFields); 416 515 write(TypedArrayProperties); 516 // FIXME: We can't def() anything here because these operations truncate their inputs. 517 // https://bugs.webkit.org/show_bug.cgi?id=134737 417 518 return; 418 519 } … … 422 523 423 524 case CheckStructure: 424 case InstanceOf:425 525 read(JSCell_structureID); 426 526 return; … … 434 534 case CheckHasInstance: 435 535 read(JSCell_typeInfoFlags); 436 return; 437 438 case CheckExecutable: 439 read(JSFunction_executable); 440 return; 441 536 def(HeapLocation(CheckHasInstanceLoc, JSCell_typeInfoFlags, node->child1()), node); 537 return; 538 539 case InstanceOf: 540 read(JSCell_structureID); 541 def(HeapLocation(InstanceOfLoc, JSCell_structureID, node->child1(), node->child2()), node); 542 return; 543 442 544 case PutStructure: 443 545 write(JSCell_structureID); … … 449 551 case AllocatePropertyStorage: 450 552 write(JSObject_butterfly); 553 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), node); 451 554 return; 452 555 … … 454 557 read(JSObject_butterfly); 455 558 write(JSObject_butterfly); 559 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), node); 456 560 return; 457 561 458 562 case GetButterfly: 459 563 read(JSObject_butterfly); 564 def(HeapLocation(ButterflyLoc, JSObject_butterfly, node->child1()), node); 460 565 return; 461 566 … … 472 577 473 578 case GetIndexedPropertyStorage: 474 if (node->arrayMode().type() == Array::String) 475 return; 476 read(JSArrayBufferView_vector); 579 if (node->arrayMode().type() == Array::String) { 580 def(PureValue(node, node->arrayMode().asWord())); 581 return; 582 } 583 read(MiscFields); 584 def(HeapLocation(IndexedPropertyStorageLoc, MiscFields, node->child1()), node); 477 585 return; 478 586 479 587 case GetTypedArrayByteOffset: 480 read(JSArrayBufferView_vector); 481 read(JSArrayBufferView_mode); 482 read(Butterfly_arrayBuffer); 483 read(ArrayBuffer_data); 588 read(MiscFields); 589 def(HeapLocation(TypedArrayByteOffsetLoc, MiscFields, node->child1()), node); 484 590 return; 485 591 486 592 case GetByOffset: 487 case GetGetterSetterByOffset: 488 read(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber)); 489 return; 490 491 case MultiGetByOffset: 593 case GetGetterSetterByOffset: { 594 unsigned identifierNumber = 595 graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber; 596 AbstractHeap heap(NamedProperties, identifierNumber); 597 read(heap); 598 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), node); 599 return; 600 } 601 602 case MultiGetByOffset: { 492 603 read(JSCell_structureID); 493 604 read(JSObject_butterfly); 494 read(AbstractHeap(NamedProperties, node->multiGetByOffsetData().identifierNumber)); 495 return; 496 497 case MultiPutByOffset: 605 AbstractHeap heap(NamedProperties, node->multiGetByOffsetData().identifierNumber); 606 read(heap); 607 def(HeapLocation(NamedPropertyLoc, heap, node->child1()), node); 608 return; 609 } 610 611 case MultiPutByOffset: { 498 612 read(JSCell_structureID); 499 613 read(JSObject_butterfly); 500 write(AbstractHeap(NamedProperties, node->multiPutByOffsetData().identifierNumber)); 614 AbstractHeap heap(NamedProperties, node->multiPutByOffsetData().identifierNumber); 615 write(heap); 501 616 if (node->multiPutByOffsetData().writesStructures()) 502 617 write(JSCell_structureID); 503 618 if (node->multiPutByOffsetData().reallocatesStorage()) 504 619 write(JSObject_butterfly); 505 return; 506 507 case PutByOffset: 508 write(AbstractHeap(NamedProperties, graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber)); 509 return; 620 def(HeapLocation(NamedPropertyLoc, heap, node->child1()), node->child2().node()); 621 return; 622 } 623 624 case PutByOffset: { 625 unsigned identifierNumber = 626 graph.m_storageAccessData[node->storageAccessDataIndex()].identifierNumber; 627 AbstractHeap heap(NamedProperties, identifierNumber); 628 write(heap); 629 def(HeapLocation(NamedPropertyLoc, heap, node->child2()), node->child3().node()); 630 return; 631 } 510 632 511 633 case GetArrayLength: { … … 518 640 case Array::SlowPutArrayStorage: 519 641 read(Butterfly_publicLength); 642 def(HeapLocation(ArrayLengthLoc, Butterfly_publicLength, node->child1()), node); 520 643 return; 521 644 522 645 case Array::String: 646 def(PureValue(node, mode.asWord())); 523 647 return; 524 648 525 649 case Array::Arguments: 526 read( Arguments_overrideLength);527 read(Arguments_numArguments);650 read(MiscFields); 651 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), node); 528 652 return; 529 653 530 654 default: 531 read(JSArrayBufferView_length); 655 ASSERT(mode.typedArrayType() != NotTypedArray); 656 read(MiscFields); 657 def(HeapLocation(ArrayLengthLoc, MiscFields, node->child1()), node); 532 658 return; 533 659 } … … 535 661 536 662 case GetMyScope: 537 read(AbstractHeap(Variables, JSStack::ScopeChain)); 663 if (graph.m_codeBlock->needsActivation()) { 664 read(AbstractHeap(Variables, JSStack::ScopeChain)); 665 def(HeapLocation(VariableLoc, AbstractHeap(Variables, JSStack::ScopeChain)), node); 666 } else 667 def(PureValue(node)); 538 668 return; 539 669 540 670 case SkipTopScope: 541 671 read(AbstractHeap(Variables, graph.activationRegister())); 672 def(HeapLocation(SkipTopScopeLoc, AbstractHeap(Variables, graph.activationRegister()), node->child1()), node); 542 673 return; 543 674 544 675 case GetClosureRegisters: 545 676 read(JSVariableObject_registers); 546 return; 547 677 def(HeapLocation(ClosureRegistersLoc, JSVariableObject_registers, node->child1()), node); 678 return; 679 548 680 case GetClosureVar: 549 681 read(AbstractHeap(Variables, node->varNumber())); 682 def(HeapLocation(ClosureVariableLoc, AbstractHeap(Variables, node->varNumber()), node->child1()), node); 550 683 return; 551 684 552 685 case PutClosureVar: 553 686 write(AbstractHeap(Variables, node->varNumber())); 687 def(HeapLocation(ClosureVariableLoc, AbstractHeap(Variables, node->varNumber()), node->child2()), node->child3().node()); 554 688 return; 555 689 556 690 case GetGlobalVar: 557 691 read(AbstractHeap(Absolute, node->registerPointer())); 692 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->registerPointer())), node); 558 693 return; 559 694 560 695 case PutGlobalVar: 561 696 write(AbstractHeap(Absolute, node->registerPointer())); 562 return;563 564 case NewObject: 697 def(HeapLocation(GlobalVariableLoc, AbstractHeap(Absolute, node->registerPointer())), node->child1().node()); 698 return; 699 565 700 case NewArray: 566 701 case NewArrayWithSize: 567 702 case NewArrayBuffer: 703 case NewTypedArray: 704 // FIXME: Enable CSE for these nodes. We can't do this right now because there is no way 705 // for us to claim an index node and a value node. We could make this work if we lowered 706 // these nodes or if we had a more flexible way of def()'ing. 707 // https://bugs.webkit.org/show_bug.cgi?id=134737 708 read(HeapObjectCount); 709 write(HeapObjectCount); 710 return; 711 712 case NewObject: 568 713 case NewRegexp: 569 714 case NewStringObject: 715 read(HeapObjectCount); 716 write(HeapObjectCount); 717 return; 718 570 719 case NewFunctionNoCheck: 571 720 case NewFunction: … … 574 723 write(HeapObjectCount); 575 724 return; 576 577 case NewTypedArray: 578 read(HeapObjectCount); 579 write(HeapObjectCount); 580 switch (node->child1().useKind()) { 581 case Int32Use: 582 return; 583 case UntypedUse: 584 read(World); 585 write(World); 586 return; 587 default: 588 RELEASE_ASSERT_NOT_REACHED(); 589 return; 590 } 591 725 592 726 case RegExpExec: 593 727 case RegExpTest: … … 602 736 return; 603 737 } 738 def(PureValue(node)); 604 739 return; 605 740 … … 609 744 case CompareGreater: 610 745 case CompareGreaterEq: 611 if (!node->isBinaryUseKind(UntypedUse)) 612 return; 746 if (!node->isBinaryUseKind(UntypedUse)) { 747 def(PureValue(node)); 748 return; 749 } 613 750 read(World); 614 751 write(World); … … 619 756 case StringObjectUse: 620 757 case StringOrStringObjectUse: 758 // These don't def a pure value, unfortunately. I'll avoid load-eliminating these for 759 // now. 621 760 return; 622 761 … … 633 772 634 773 case TearOffActivation: 774 read(Variables); 635 775 write(JSVariableObject_registers); 636 776 return; 637 777 638 778 case TearOffArguments: 779 read(Variables); 639 780 write(Arguments_registers); 640 781 return; … … 643 784 read(AbstractHeap(Variables, graph.argumentsRegisterFor(node->origin.semantic))); 644 785 read(AbstractHeap(Variables, JSStack::ArgumentCount)); 786 // FIXME: We could def() this by specifying the code origin as a kind of m_info, like we 787 // have for PureValue. 788 // https://bugs.webkit.org/show_bug.cgi?id=134797 645 789 return; 646 790 647 791 case GetMyArgumentByVal: 648 792 read(Variables); 793 // FIXME: We could def() this by specifying the code origin as a kind of m_info, like we 794 // have for PureValue. 795 // https://bugs.webkit.org/show_bug.cgi?id=134797 649 796 return; 650 797 … … 676 823 public: 677 824 NoOpClobberize() { } 678 void operator()(AbstractHeap) { } 825 template<typename... T> 826 void operator()(T...) { } 679 827 }; 680 828 … … 686 834 } 687 835 688 void operator()(AbstractHeap) { m_result = true; } 836 template<typename... T> 837 void operator()(T...) { m_result = true; } 689 838 690 839 bool result() const { return m_result; } … … 718 867 }; 719 868 869 bool accessesOverlap(Graph&, Node*, AbstractHeap); 720 870 bool writesOverlap(Graph&, Node*, AbstractHeap); 721 871 872 // We would have used bind() for these, but because of the overlaoding that we are doing, 873 // it's quite a bit of clearer to just write this out the traditional way. 874 875 template<typename T> 876 class ReadMethodClobberize { 877 public: 878 ReadMethodClobberize(T& value) 879 : m_value(value) 880 { 881 } 882 883 void operator()(AbstractHeap heap) 884 { 885 m_value.read(heap); 886 } 887 private: 888 T& m_value; 889 }; 890 891 template<typename T> 892 class WriteMethodClobberize { 893 public: 894 WriteMethodClobberize(T& value) 895 : m_value(value) 896 { 897 } 898 899 void operator()(AbstractHeap heap) 900 { 901 m_value.write(heap); 902 } 903 private: 904 T& m_value; 905 }; 906 907 template<typename T> 908 class DefMethodClobberize { 909 public: 910 DefMethodClobberize(T& value) 911 : m_value(value) 912 { 913 } 914 915 void operator()(PureValue value) 916 { 917 m_value.def(value); 918 } 919 920 void operator()(HeapLocation location, Node* node) 921 { 922 m_value.def(location, node); 923 } 924 925 private: 926 T& m_value; 927 }; 928 929 template<typename Adaptor> 930 void clobberize(Graph& graph, Node* node, Adaptor& adaptor) 931 { 932 ReadMethodClobberize<Adaptor> read(adaptor); 933 WriteMethodClobberize<Adaptor> write(adaptor); 934 DefMethodClobberize<Adaptor> def(adaptor); 935 clobberize(graph, node, read, write, def); 936 } 937 722 938 } } // namespace JSC::DFG 723 939 -
trunk/Source/JavaScriptCore/dfg/DFGCommonData.h
r169040 r172129 33 33 #include "InlineCallFrameSet.h" 34 34 #include "JSCell.h" 35 #include "ProfiledCodeBlockJettisoningWatchpoint.h"36 35 #include "ProfilerCompilation.h" 37 36 #include "SymbolTable.h" … … 96 95 Vector<WeakReferenceTransition> transitions; 97 96 Vector<WriteBarrier<JSCell>> weakReferences; 97 Vector<WriteBarrier<Structure>> weakStructureReferences; 98 98 SegmentedVector<CodeBlockJettisoningWatchpoint, 1, 0> watchpoints; 99 SegmentedVector<ProfiledCodeBlockJettisoningWatchpoint, 1, 0> profiledWatchpoints;100 99 Vector<JumpReplacement> jumpReplacements; 101 100 -
trunk/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp
r171660 r172129 415 415 addBaseCheck(indexInBlock, node, baseValue, variant.structureSet()); 416 416 417 if (variant.specificValue()) { 418 m_graph.convertToConstant(node, m_graph.freeze(variant.specificValue())); 417 JSValue baseForLoad; 418 if (variant.alternateBase()) 419 baseForLoad = variant.alternateBase(); 420 else 421 baseForLoad = baseValue.m_value; 422 if (JSValue value = m_graph.tryGetConstantProperty(baseForLoad, variant.baseStructure(), variant.offset())) { 423 m_graph.convertToConstant(node, m_graph.freeze(value)); 419 424 return; 420 425 } -
trunk/Source/JavaScriptCore/dfg/DFGDCEPhase.cpp
r170769 r172129 108 108 if (m_graph.m_form == SSA) { 109 109 Vector<BasicBlock*> depthFirst; 110 m_graph.getBlocksIn DepthFirstOrder(depthFirst);110 m_graph.getBlocksInPreOrder(depthFirst); 111 111 for (unsigned i = 0; i < depthFirst.size(); ++i) 112 112 fixupBlock(depthFirst[i]); … … 194 194 switch (node->op()) { 195 195 case MovHint: { 196 // Check if the child is dead. MovHint's child would only be a Phantom 197 // if we had just killed it.198 if (node->child1()->op() == Phantom ) {196 // Check if the child is dead. MovHint's child would only be a Phantom or 197 // Check if we had just killed it. 198 if (node->child1()->op() == Phantom || node->child1()->op() == Check) { 199 199 node->setOpAndDefaultFlags(ZombieHint); 200 200 node->child1() = Edge(); … … 221 221 } 222 222 223 node->convertToPhantom Unchecked();223 node->convertToPhantom(); 224 224 node->children.reset(); 225 225 node->setRefCount(1); -
trunk/Source/JavaScriptCore/dfg/DFGDesiredWeakReferences.cpp
r167897 r172129 58 58 for (unsigned i = 0; i < m_references.size(); i++) { 59 59 JSCell* target = m_references[i]; 60 common->weakReferences.append(WriteBarrier<JSCell>(vm, m_codeBlock->ownerExecutable(), target)); 60 if (Structure* structure = jsDynamicCast<Structure*>(target)) { 61 common->weakStructureReferences.append( 62 WriteBarrier<Structure>(vm, m_codeBlock->ownerExecutable(), structure)); 63 } else { 64 common->weakReferences.append( 65 WriteBarrier<JSCell>(vm, m_codeBlock->ownerExecutable(), target)); 66 } 61 67 } 62 68 } -
trunk/Source/JavaScriptCore/dfg/DFGEdgeDominates.h
r164424 r172129 46 46 void operator()(Node*, Edge edge) 47 47 { 48 bool result = m_graph.m_dominators.dominates(edge.node()-> misc.owner, m_block);48 bool result = m_graph.m_dominators.dominates(edge.node()->owner, m_block); 49 49 if (verbose) { 50 50 dataLog( 51 "Checking if ", edge, " in ", *edge.node()-> misc.owner,51 "Checking if ", edge, " in ", *edge.node()->owner, 52 52 " dominates ", *m_block, ": ", result, "\n"); 53 53 } -
trunk/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
r171689 r172129 1297 1297 } 1298 1298 1299 bool isStringPrototypeMethodSane(Structure* stringPrototypeStructure, StringImpl* uid) 1299 bool isStringPrototypeMethodSane( 1300 JSObject* stringPrototype, Structure* stringPrototypeStructure, StringImpl* uid) 1300 1301 { 1301 1302 unsigned attributesUnused; 1302 JSCell* specificValue; 1303 PropertyOffset offset = stringPrototypeStructure->getConcurrently( 1304 vm(), uid, attributesUnused, specificValue); 1303 PropertyOffset offset = 1304 stringPrototypeStructure->getConcurrently(vm(), uid, attributesUnused); 1305 1305 if (!isValidOffset(offset)) 1306 1306 return false; 1307 1307 1308 if (!specificValue) 1308 JSValue value = m_graph.tryGetConstantProperty( 1309 stringPrototype, stringPrototypeStructure, offset); 1310 if (!value) 1309 1311 return false; 1310 1312 1311 if (!specificValue->inherits(JSFunction::info())) 1313 JSFunction* function = jsDynamicCast<JSFunction*>(value); 1314 if (!function) 1312 1315 return false; 1313 1316 1314 JSFunction* function = jsCast<JSFunction*>(specificValue);1315 1317 if (function->executable()->intrinsicFor(CodeForCall) != StringPrototypeValueOfIntrinsic) 1316 1318 return false; … … 1341 1343 // between the two, just because that seems like it would get confusing. So we 1342 1344 // just require both methods to be sane. 1343 if (!isStringPrototypeMethodSane(stringPrototype Structure, vm().propertyNames->valueOf.impl()))1345 if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, vm().propertyNames->valueOf.impl())) 1344 1346 return false; 1345 if (!isStringPrototypeMethodSane(stringPrototype Structure, vm().propertyNames->toString.impl()))1347 if (!isStringPrototypeMethodSane(stringPrototypeObject, stringPrototypeStructure, vm().propertyNames->toString.impl())) 1346 1348 return false; 1347 1349 -
trunk/Source/JavaScriptCore/dfg/DFGGraph.cpp
r171660 r172129 640 640 } 641 641 642 void Graph::addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock* block) 643 { 644 if (seen.contains(block)) 642 // Utilities for pre- and post-order traversals. 643 namespace { 644 645 inline void addForPreOrder(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, BitVector& seen, BasicBlock* block) 646 { 647 if (seen.get(block->index)) 645 648 return; 646 649 647 650 result.append(block); 648 651 worklist.append(block); 649 seen.add(block); 650 } 651 652 void Graph::getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result) 652 seen.set(block->index); 653 } 654 655 enum PostOrderTaskKind { 656 PostOrderFirstVisit, 657 PostOrderAddToResult 658 }; 659 660 struct PostOrderTask { 661 PostOrderTask(BasicBlock* block = nullptr, PostOrderTaskKind kind = PostOrderFirstVisit) 662 : m_block(block) 663 , m_kind(kind) 664 { 665 } 666 667 BasicBlock* m_block; 668 PostOrderTaskKind m_kind; 669 }; 670 671 inline void addForPostOrder(Vector<PostOrderTask, 16>& worklist, BitVector& seen, BasicBlock* block) 672 { 673 if (seen.get(block->index)) 674 return; 675 676 worklist.append(PostOrderTask(block, PostOrderFirstVisit)); 677 seen.set(block->index); 678 } 679 680 } // anonymous namespace 681 682 void Graph::getBlocksInPreOrder(Vector<BasicBlock*>& result) 653 683 { 654 684 Vector<BasicBlock*, 16> worklist; 655 HashSet<BasicBlock*>seen;656 addFor DepthFirstSort(result, worklist, seen, block(0));685 BitVector seen; 686 addForPreOrder(result, worklist, seen, block(0)); 657 687 while (!worklist.isEmpty()) { 658 688 BasicBlock* block = worklist.takeLast(); 659 689 for (unsigned i = block->numSuccessors(); i--;) 660 addForDepthFirstSort(result, worklist, seen, block->successor(i)); 690 addForPreOrder(result, worklist, seen, block->successor(i)); 691 } 692 } 693 694 void Graph::getBlocksInPostOrder(Vector<BasicBlock*>& result) 695 { 696 Vector<PostOrderTask, 16> worklist; 697 BitVector seen; 698 addForPostOrder(worklist, seen, block(0)); 699 while (!worklist.isEmpty()) { 700 PostOrderTask task = worklist.takeLast(); 701 switch (task.m_kind) { 702 case PostOrderFirstVisit: 703 worklist.append(PostOrderTask(task.m_block, PostOrderAddToResult)); 704 for (unsigned i = task.m_block->numSuccessors(); i--;) 705 addForPostOrder(worklist, seen, task.m_block->successor(i)); 706 break; 707 case PostOrderAddToResult: 708 result.append(task.m_block); 709 break; 710 } 661 711 } 662 712 } … … 669 719 continue; 670 720 for (unsigned phiIndex = block->phis.size(); phiIndex--;) 671 block->phis[phiIndex]-> misc.replacement = 0;721 block->phis[phiIndex]->replacement = 0; 672 722 for (unsigned nodeIndex = block->size(); nodeIndex--;) 673 block->at(nodeIndex)-> misc.replacement = 0;723 block->at(nodeIndex)->replacement = 0; 674 724 } 675 725 } … … 682 732 continue; 683 733 for (unsigned phiIndex = block->phis.size(); phiIndex--;) 684 block->phis[phiIndex]-> misc.owner = block;734 block->phis[phiIndex]->owner = block; 685 735 for (unsigned nodeIndex = block->size(); nodeIndex--;) 686 block->at(nodeIndex)-> misc.owner = block;736 block->at(nodeIndex)->owner = block; 687 737 } 688 738 } … … 788 838 { 789 839 return std::max(frameRegisterCount(), requiredRegisterCountForExit()); 840 } 841 842 JSValue Graph::tryGetConstantProperty( 843 JSValue base, const StructureSet& structureSet, PropertyOffset offset) 844 { 845 if (!base || !base.isObject()) 846 return JSValue(); 847 848 JSObject* object = asObject(base); 849 850 for (unsigned i = structureSet.size(); i--;) { 851 Structure* structure = structureSet[i]; 852 WatchpointSet* set = structure->propertyReplacementWatchpointSet(offset); 853 if (!set || !set->isStillValid()) 854 return JSValue(); 855 856 ASSERT(structure->isValidOffset(offset)); 857 ASSERT(!structure->isUncacheableDictionary()); 858 859 watchpoints().addLazily(set); 860 } 861 862 // What follows may require some extra thought. We need this load to load a valid JSValue. If 863 // our profiling makes sense and we're still on track to generate code that won't be 864 // invalidated, then we have nothing to worry about. We do, however, have to worry about 865 // loading - and then using - an invalid JSValue in the case that unbeknownst to us our code 866 // is doomed. 867 // 868 // One argument in favor of this code is that it should definitely work because the butterfly 869 // is always set before the structure. However, we don't currently have a fence between those 870 // stores. It's not clear if this matters, however. We don't ever shrink the property storage. 871 // So, for this to fail, you'd need an access on a constant object pointer such that the inline 872 // caches told us that the object had a structure that it did not *yet* have, and then later, 873 // the object transitioned to that structure that the inline caches had alraedy seen. And then 874 // the processor reordered the stores. Seems unlikely and difficult to test. I believe that 875 // this is worth revisiting but it isn't worth losing sleep over. Filed: 876 // https://bugs.webkit.org/show_bug.cgi?id=134641 877 // 878 // For now, we just do the minimal thing: defend against the structure right now being 879 // incompatible with the getDirect we're trying to do. The easiest way to do that is to 880 // determine if the structure belongs to the proven set. 881 882 if (!structureSet.contains(object->structure())) 883 return JSValue(); 884 885 return object->getDirect(offset); 886 } 887 888 JSValue Graph::tryGetConstantProperty(JSValue base, Structure* structure, PropertyOffset offset) 889 { 890 return tryGetConstantProperty(base, StructureSet(structure), offset); 891 } 892 893 JSValue Graph::tryGetConstantProperty( 894 JSValue base, const StructureAbstractValue& structure, PropertyOffset offset) 895 { 896 if (structure.isTop() || structure.isClobbered()) 897 return JSValue(); 898 899 return tryGetConstantProperty(base, structure.set(), offset); 900 } 901 902 JSValue Graph::tryGetConstantProperty(const AbstractValue& base, PropertyOffset offset) 903 { 904 return tryGetConstantProperty(base.m_value, base.m_structure, offset); 790 905 } 791 906 … … 901 1016 for (unsigned i = node->multiGetByOffsetData().variants.size(); i--;) { 902 1017 GetByIdVariant& variant = node->multiGetByOffsetData().variants[i]; 903 visitor.appendUnbarrieredReadOnlyValue(variant.specificValue());904 1018 const StructureSet& set = variant.structureSet(); 905 1019 for (unsigned j = set.size(); j--;) -
trunk/Source/JavaScriptCore/dfg/DFGGraph.h
r171660 r172129 126 126 127 127 // Check if there is any replacement. 128 Node* replacement = child-> misc.replacement;128 Node* replacement = child->replacement; 129 129 if (!replacement) 130 130 return; … … 134 134 // There is definitely a replacement. Assert that the replacement does not 135 135 // have a replacement. 136 ASSERT(!child-> misc.replacement);136 ASSERT(!child->replacement); 137 137 } 138 138 … … 676 676 void initializeNodeOwners(); 677 677 678 void getBlocksInDepthFirstOrder(Vector<BasicBlock*>& result); 678 void getBlocksInPreOrder(Vector<BasicBlock*>& result); 679 void getBlocksInPostOrder(Vector<BasicBlock*>& result); 679 680 680 681 Profiler::Compilation* compilation() { return m_plan.compilation.get(); } … … 691 692 unsigned requiredRegisterCountForExit(); 692 693 unsigned requiredRegisterCountForExecutionAndExit(); 694 695 JSValue tryGetConstantProperty(JSValue base, const StructureSet&, PropertyOffset); 696 JSValue tryGetConstantProperty(JSValue base, Structure*, PropertyOffset); 697 JSValue tryGetConstantProperty(JSValue base, const StructureAbstractValue&, PropertyOffset); 698 JSValue tryGetConstantProperty(const AbstractValue&, PropertyOffset); 693 699 694 700 JSActivation* tryGetActivation(Node*); … … 759 765 760 766 void handleSuccessor(Vector<BasicBlock*, 16>& worklist, BasicBlock*, BasicBlock* successor); 761 void addForDepthFirstSort(Vector<BasicBlock*>& result, Vector<BasicBlock*, 16>& worklist, HashSet<BasicBlock*>& seen, BasicBlock*);762 767 763 768 AddSpeculationMode addImmediateShouldSpeculateInt32(Node* add, bool variableShouldSpeculateInt32, Node* immediate, RareCaseProfilingSource source) -
trunk/Source/JavaScriptCore/dfg/DFGLICMPhase.cpp
r171613 r172129 152 152 // tend to hoist dominators before dominatees. 153 153 Vector<BasicBlock*> depthFirst; 154 m_graph.getBlocksIn DepthFirstOrder(depthFirst);154 m_graph.getBlocksInPreOrder(depthFirst); 155 155 Vector<const NaturalLoop*> loopStack; 156 156 bool changed = false; … … 246 246 247 247 data.preHeader->insertBeforeLast(node); 248 node-> misc.owner = data.preHeader;248 node->owner = data.preHeader; 249 249 NodeOrigin originalOrigin = node->origin; 250 250 node->origin.forExit = data.preHeader->last()->origin.forExit; -
trunk/Source/JavaScriptCore/dfg/DFGNode.h
r171660 r172129 209 209 , m_refCount(1) 210 210 , m_prediction(SpecNone) 211 { 212 misc.replacement = 0; 211 , replacement(nullptr) 212 , owner(nullptr) 213 { 213 214 setOpAndDefaultFlags(op); 214 215 } … … 223 224 , m_opInfo(0) 224 225 , m_opInfo2(0) 225 { 226 misc.replacement = 0; 226 , replacement(nullptr) 227 , owner(nullptr) 228 { 227 229 setOpAndDefaultFlags(op); 228 230 ASSERT(!(m_flags & NodeHasVarArgs)); … … 238 240 , m_opInfo(0) 239 241 , m_opInfo2(0) 240 { 241 misc.replacement = 0; 242 , replacement(nullptr) 243 , owner(nullptr) 244 { 242 245 setOpAndDefaultFlags(op); 243 246 setResult(result); … … 254 257 , m_opInfo(imm.m_value) 255 258 , m_opInfo2(0) 256 { 257 misc.replacement = 0; 259 , replacement(nullptr) 260 , owner(nullptr) 261 { 258 262 setOpAndDefaultFlags(op); 259 263 ASSERT(!(m_flags & NodeHasVarArgs)); … … 269 273 , m_opInfo(imm.m_value) 270 274 , m_opInfo2(0) 271 { 272 misc.replacement = 0; 275 , replacement(nullptr) 276 , owner(nullptr) 277 { 273 278 setOpAndDefaultFlags(op); 274 279 setResult(result); … … 285 290 , m_opInfo(imm1.m_value) 286 291 , m_opInfo2(imm2.m_value) 287 { 288 misc.replacement = 0; 292 , replacement(nullptr) 293 , owner(nullptr) 294 { 289 295 setOpAndDefaultFlags(op); 290 296 ASSERT(!(m_flags & NodeHasVarArgs)); … … 300 306 , m_opInfo(imm1.m_value) 301 307 , m_opInfo2(imm2.m_value) 302 { 303 misc.replacement = 0; 308 , replacement(nullptr) 309 , owner(nullptr) 310 { 304 311 setOpAndDefaultFlags(op); 305 312 ASSERT(m_flags & NodeHasVarArgs); … … 367 374 setOpAndDefaultFlags(Phantom); 368 375 } 369 370 void convertToPhantomUnchecked() 371 { 372 setOpAndDefaultFlags(Phantom); 376 377 void convertToCheck() 378 { 379 setOpAndDefaultFlags(Check); 380 } 381 382 void replaceWith(Node* other) 383 { 384 convertToPhantom(); 385 replacement = other; 373 386 } 374 387 … … 437 450 m_op = ConstantStoragePointer; 438 451 m_opInfo = bitwise_cast<uintptr_t>(pointer); 452 children.reset(); 439 453 } 440 454 … … 1761 1775 1762 1776 // Miscellaneous data that is usually meaningless, but can hold some analysis results 1763 // if you ask right. For example, if you do Graph::initializeNodeOwners(), misc.owner1777 // if you ask right. For example, if you do Graph::initializeNodeOwners(), Node::owner 1764 1778 // will tell you which basic block a node belongs to. You cannot rely on this persisting 1765 1779 // across transformations unless you do the maintenance work yourself. Other phases use 1766 // misc.replacement, but they do so manually: first you do Graph::clearReplacements()1780 // Node::replacement, but they do so manually: first you do Graph::clearReplacements() 1767 1781 // and then you set, and use, replacement's yourself. 1768 1782 // … … 1770 1784 // calling some appropriate methods that initialize them the way you want. Otherwise, 1771 1785 // these fields are meaningless. 1772 union { 1773 Node* replacement; 1774 BasicBlock* owner; 1775 } misc; 1786 Node* replacement; 1787 BasicBlock* owner; 1776 1788 }; 1777 1789 -
trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
r167532 r172129 59 59 sanitizeStackForVM(vm); 60 60 61 if (bytecodeIndex) 62 codeBlock->ownerExecutable()->setDidTryToEnterInLoop(true); 63 61 64 if (codeBlock->jitType() != JITCode::DFGJIT) { 62 65 RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT); -
trunk/Source/JavaScriptCore/dfg/DFGOSRExitCompilerCommon.cpp
r171362 r172129 55 55 AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()), 56 56 AssemblyHelpers::TrustedImm32(0)); 57 58 tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization())); 57 58 // We want to figure out if there's a possibility that we're in a loop. For the outermost 59 // code block in the inline stack, we handle this appropriately by having the loop OSR trigger 60 // check the exit count of the replacement of the CodeBlock from which we are OSRing. The 61 // problem is the inlined functions, which might also have loops, but whose baseline versions 62 // don't know where to look for the exit count. Figure out if those loops are severe enough 63 // that we had tried to OSR enter. If so, then we should use the loop reoptimization trigger. 64 // Otherwise, we should use the normal reoptimization trigger. 65 66 AssemblyHelpers::JumpList loopThreshold; 67 68 for (InlineCallFrame* inlineCallFrame = exit.m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { 69 loopThreshold.append( 70 jit.branchTest8( 71 AssemblyHelpers::NonZero, 72 AssemblyHelpers::AbsoluteAddress( 73 inlineCallFrame->executable->addressOfDidTryToEnterInLoop()))); 74 } 75 76 jit.move( 77 AssemblyHelpers::TrustedImm32(jit.codeBlock()->exitCountThresholdForReoptimization()), 78 GPRInfo::regT1); 79 80 if (!loopThreshold.empty()) { 81 AssemblyHelpers::Jump done = jit.jump(); 82 83 loopThreshold.link(&jit); 84 jit.move( 85 AssemblyHelpers::TrustedImm32( 86 jit.codeBlock()->exitCountThresholdForReoptimizationFromLoop()), 87 GPRInfo::regT1); 88 89 done.link(&jit); 90 } 91 92 tooFewFails = jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1); 59 93 60 94 reoptimizeNow.link(&jit); … … 63 97 #if !NUMBER_OF_ARGUMENT_REGISTERS 64 98 jit.poke(GPRInfo::regT0); 99 jit.poke(AssemblyHelpers::TrustedImmPtr(&exit), 1); 65 100 #else 66 101 jit.move(GPRInfo::regT0, GPRInfo::argumentGPR0); 67 ASSERT(GPRInfo::argumentGPR0 != GPRInfo::regT1);68 #endif 69 jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo:: regT1);70 jit.call(GPRInfo:: regT1);102 jit.move(AssemblyHelpers::TrustedImmPtr(&exit), GPRInfo::argumentGPR1); 103 #endif 104 jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(triggerReoptimizationNow)), GPRInfo::nonArgGPR0); 105 jit.call(GPRInfo::nonArgGPR0); 71 106 AssemblyHelpers::Jump doneAdjusting = jit.jump(); 72 107 -
trunk/Source/JavaScriptCore/dfg/DFGOperations.cpp
r171096 r172129 1031 1031 JSValue value = JSValue::decode(encodedValue); 1032 1032 1033 set->notifyWrite(vm, value );1033 set->notifyWrite(vm, value, "Executed NotifyWrite"); 1034 1034 } 1035 1035 … … 1106 1106 } 1107 1107 1108 extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock )1108 extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSRExitBase* exit) 1109 1109 { 1110 1110 // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't … … 1130 1130 CodeBlock* optimizedCodeBlock = codeBlock->replacement(); 1131 1131 ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType())); 1132 1133 bool didTryToEnterIntoInlinedLoops = false; 1134 for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { 1135 if (inlineCallFrame->executable->didTryToEnterInLoop()) { 1136 didTryToEnterIntoInlinedLoops = true; 1137 break; 1138 } 1139 } 1132 1140 1133 1141 // In order to trigger reoptimization, one of two things must have happened: … … 1136 1144 bool didExitABunch = optimizedCodeBlock->shouldReoptimizeNow(); 1137 1145 bool didGetStuckInLoop = 1138 codeBlock->checkIfOptimizationThresholdReached()1146 (codeBlock->checkIfOptimizationThresholdReached() || didTryToEnterIntoInlinedLoops) 1139 1147 && optimizedCodeBlock->shouldReoptimizeFromLoopNow(); 1140 1148 … … 1229 1237 if (Options::verboseOSR()) { 1230 1238 dataLog( 1231 *codeBlock, ": Entered trigger TierUpNow with executeCounter = ",1239 *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ", 1232 1240 jitCode->tierUpCounter, "\n"); 1233 1241 } -
trunk/Source/JavaScriptCore/dfg/DFGOperations.h
r171096 r172129 32 32 #include "PutKind.h" 33 33 34 namespace JSC { 35 36 namespace DFG { 34 namespace JSC { namespace DFG { 35 36 struct OSRExitBase; 37 37 38 38 extern "C" { … … 137 137 void JIT_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void*, void*) WTF_INTERNAL; 138 138 139 void JIT_OPERATION triggerReoptimizationNow(CodeBlock* ) WTF_INTERNAL;139 void JIT_OPERATION triggerReoptimizationNow(CodeBlock*, OSRExitBase*) WTF_INTERNAL; 140 140 141 141 #if ENABLE(FTL_JIT) -
trunk/Source/JavaScriptCore/dfg/DFGPlan.cpp
r171613 r172129 50 50 #include "DFGOSRAvailabilityAnalysisPhase.h" 51 51 #include "DFGOSREntrypointCreationPhase.h" 52 #include "DFGPhantomRemovalPhase.h" 52 53 #include "DFGPredictionInjectionPhase.h" 53 54 #include "DFGPredictionPropagationPhase.h" … … 251 252 252 253 performStrengthReduction(dfg); 253 perform CSE(dfg);254 performLocalCSE(dfg); 254 255 performArgumentsSimplification(dfg); 255 256 performCPSRethreading(dfg); … … 258 259 bool changed = false; 259 260 changed |= performCFGSimplification(dfg); 260 changed |= perform CSE(dfg);261 changed |= performLocalCSE(dfg); 261 262 262 263 if (validationEnabled()) 263 264 validate(dfg); 264 265 265 266 performCPSRethreading(dfg); 266 267 if (changed) { … … 283 284 284 285 performStoreBarrierElision(dfg); 286 performPhantomRemoval(dfg); 285 287 performCPSRethreading(dfg); 286 288 performDCE(dfg); … … 310 312 } 311 313 314 performPhantomRemoval(dfg); 312 315 performCriticalEdgeBreaking(dfg); 313 316 performLoopPreHeaderCreation(dfg); … … 315 318 performSSAConversion(dfg); 316 319 performSSALowering(dfg); 317 performCSE(dfg); 318 319 // At this point we're not allowed to do any further code motion because our reasoning 320 // about code motion assumes that it's OK to insert GC points in random places. 321 322 performStoreBarrierElision(dfg); 320 performGlobalCSE(dfg); 323 321 performLivenessAnalysis(dfg); 324 322 performCFA(dfg); … … 331 329 } 332 330 performLICM(dfg); 331 performPhantomRemoval(dfg); 333 332 performIntegerCheckCombining(dfg); 334 perform CSE(dfg);333 performGlobalCSE(dfg); 335 334 336 335 // At this point we're not allowed to do any further code motion because our reasoning … … 339 338 340 339 performStoreBarrierElision(dfg); 340 performPhantomRemoval(dfg); 341 341 performLivenessAnalysis(dfg); 342 342 performCFA(dfg); -
trunk/Source/JavaScriptCore/dfg/DFGSSAConversionPhase.cpp
r168480 r172129 247 247 } 248 248 ASSERT(phi != block->variablesAtHead.operand(phi->local())); 249 phi-> misc.replacement = block->variablesAtHead.operand(phi->local());249 phi->replacement = block->variablesAtHead.operand(phi->local()); 250 250 } 251 251 } … … 262 262 if (!node) 263 263 continue; 264 while (node-> misc.replacement) {265 ASSERT(node != node-> misc.replacement);266 node = node-> misc.replacement;264 while (node->replacement) { 265 ASSERT(node != node->replacement); 266 node = node->replacement; 267 267 } 268 268 block->variablesAtHead[i] = node; … … 302 302 303 303 for (unsigned phiIndex = block->phis.size(); phiIndex--;) { 304 block->phis[phiIndex]-> misc.replacement =304 block->phis[phiIndex]->replacement = 305 305 block->variablesAtHead.operand(block->phis[phiIndex]->local()); 306 306 } 307 307 for (unsigned nodeIndex = block->size(); nodeIndex--;) 308 ASSERT(!block->at(nodeIndex)-> misc.replacement);308 ASSERT(!block->at(nodeIndex)->replacement); 309 309 310 310 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { … … 320 320 else 321 321 node->setOpAndDefaultFlags(Check); 322 node-> misc.replacement = node->child1().node(); // Only for Upsilons.322 node->replacement = node->child1().node(); // Only for Upsilons. 323 323 break; 324 324 } … … 334 334 break; 335 335 node->convertToPhantom(); 336 node-> misc.replacement = block->variablesAtHead.operand(variable->local());336 node->replacement = block->variablesAtHead.operand(variable->local()); 337 337 break; 338 338 } … … 343 343 // This is only for Upsilons. An Upsilon will only refer to a Flush if 344 344 // there were no SetLocals or GetLocals in the block. 345 node-> misc.replacement = block->variablesAtHead.operand(node->local());345 node->replacement = block->variablesAtHead.operand(node->local()); 346 346 break; 347 347 } … … 368 368 // This is only for Upsilons. An Upsilon will only refer to a 369 369 // PhantomLocal if there were no SetLocals or GetLocals in the block. 370 node-> misc.replacement = block->variablesAtHead.operand(variable->local());370 node->replacement = block->variablesAtHead.operand(variable->local()); 371 371 break; 372 372 } … … 399 399 block->valuesAtHead.clear(); 400 400 block->valuesAtHead.clear(); 401 block->ssa = adoptPtr(new BasicBlock::SSAData(block));401 block->ssa = std::make_unique<BasicBlock::SSAData>(block); 402 402 } 403 403 -
trunk/Source/JavaScriptCore/dfg/DFGStrengthReductionPhase.cpp
r171613 r172129 1 1 /* 2 * Copyright (C) 2013 Apple Inc. All rights reserved.2 * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 29 29 #if ENABLE(DFG_JIT) 30 30 31 #include "DFGAbstractHeap.h" 32 #include "DFGClobberize.h" 31 33 #include "DFGGraph.h" 32 34 #include "DFGInsertionSet.h" … … 217 219 } 218 220 221 case Flush: { 222 ASSERT(m_graph.m_form != SSA); 223 224 Node* setLocal = nullptr; 225 VirtualRegister local = m_node->local(); 226 227 if (m_node->variableAccessData()->isCaptured()) { 228 for (unsigned i = m_nodeIndex; i--;) { 229 Node* node = m_block->at(i); 230 bool done = false; 231 switch (node->op()) { 232 case GetLocal: 233 case Flush: 234 if (node->local() == local) 235 done = true; 236 break; 237 238 case GetLocalUnlinked: 239 if (node->unlinkedLocal() == local) 240 done = true; 241 break; 242 243 case SetLocal: { 244 if (node->local() != local) 245 break; 246 setLocal = node; 247 done = true; 248 break; 249 } 250 251 case Phantom: 252 case Check: 253 case HardPhantom: 254 case MovHint: 255 case JSConstant: 256 case DoubleConstant: 257 case Int52Constant: 258 break; 259 260 default: 261 done = true; 262 break; 263 } 264 if (done) 265 break; 266 } 267 } else { 268 for (unsigned i = m_nodeIndex; i--;) { 269 Node* node = m_block->at(i); 270 if (node->op() == SetLocal && node->local() == local) { 271 setLocal = node; 272 break; 273 } 274 if (accessesOverlap(m_graph, node, AbstractHeap(Variables, local))) 275 break; 276 } 277 } 278 279 if (!setLocal) 280 break; 281 282 m_node->convertToPhantom(); 283 Node* dataNode = setLocal->child1().node(); 284 DFG_ASSERT(m_graph, m_node, dataNode->hasResult()); 285 m_node->child1() = dataNode->defaultEdge(); 286 m_graph.dethread(); 287 m_changed = true; 288 break; 289 } 290 219 291 default: 220 292 break; -
trunk/Source/JavaScriptCore/dfg/DFGWatchableStructureWatchingPhase.cpp
r171660 r172129 87 87 for (unsigned i = node->multiGetByOffsetData().variants.size(); i--;) { 88 88 GetByIdVariant& variant = node->multiGetByOffsetData().variants[i]; 89 tryWatch(m_graph.freeze(variant.specificValue())->structure());90 89 tryWatch(variant.structureSet()); 91 90 // Don't need to watch anything in the structure chain because that would
Note:
See TracChangeset
for help on using the changeset viewer.