Changeset 251468 in webkit for trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
- Timestamp:
- Oct 22, 2019, 5:55:38 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r251106 r251468 149 149 // It is also used when doing an early return from an inlined callee: it is easier to fix the bytecode index later on if needed 150 150 // than to move the right index all the way to the treatment of op_ret. 151 BasicBlock* allocateTargetableBlock( unsigned bytecodeIndex);151 BasicBlock* allocateTargetableBlock(BytecodeIndex); 152 152 BasicBlock* allocateUntargetableBlock(); 153 153 // An untargetable block can be given a bytecodeIndex to be later managed by linkBlock, but only once, and it can never go in the other direction 154 void makeBlockTargetable(BasicBlock*, unsigned bytecodeIndex);154 void makeBlockTargetable(BasicBlock*, BytecodeIndex); 155 155 void addJumpTo(BasicBlock*); 156 156 void addJumpTo(unsigned bytecodeIndex); … … 175 175 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind); 176 176 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing }; 177 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);178 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);177 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); 178 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); 179 179 template<typename ChecksFunctor> 180 180 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); … … 568 568 origin.walkUpInlineStack( 569 569 [&] (CodeOrigin origin) { 570 unsignedbytecodeIndex = origin.bytecodeIndex();570 BytecodeIndex bytecodeIndex = origin.bytecodeIndex(); 571 571 InlineCallFrame* inlineCallFrame = origin.inlineCallFrame(); 572 572 flushImpl(inlineCallFrame, addFlushDirect); … … 643 643 void flushIfTerminal(SwitchData& data) 644 644 { 645 if (data.fallThrough.bytecodeIndex() > m_currentIndex )645 if (data.fallThrough.bytecodeIndex() > m_currentIndex.offset()) 646 646 return; 647 647 648 648 for (unsigned i = data.cases.size(); i--;) { 649 if (data.cases[i].target.bytecodeIndex() > m_currentIndex )649 if (data.cases[i].target.bytecodeIndex() > m_currentIndex.offset()) 650 650 return; 651 651 } … … 710 710 // We assume that branches originating from bytecode always have a fall-through. We 711 711 // use this assumption to avoid checking for the creation of terminal blocks. 712 ASSERT((taken > m_currentIndex ) || (notTaken > m_currentIndex));712 ASSERT((taken > m_currentIndex.offset()) || (notTaken > m_currentIndex.offset())); 713 713 BranchData* data = m_graph.m_branchData.add(); 714 714 *data = BranchData::withBytecodeIndices(taken, notTaken); … … 835 835 } 836 836 837 SpeculatedType getPredictionWithoutOSRExit( unsignedbytecodeIndex)837 SpeculatedType getPredictionWithoutOSRExit(BytecodeIndex bytecodeIndex) 838 838 { 839 839 auto getValueProfilePredictionFromForCodeBlockAndBytecodeOffset = [&] (CodeBlock* codeBlock, const CodeOrigin& codeOrigin) … … 842 842 { 843 843 ConcurrentJSLocker locker(codeBlock->m_lock); 844 prediction = codeBlock->valueProfilePredictionForBytecode Offset(locker, codeOrigin.bytecodeIndex());844 prediction = codeBlock->valueProfilePredictionForBytecodeIndex(locker, codeOrigin.bytecodeIndex()); 845 845 } 846 846 auto* fuzzerAgent = m_vm->fuzzerAgent(); … … 861 861 // inlined tail call frames, we use SpecFullTop 862 862 // to avoid a spurious OSR exit. 863 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex );863 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex.offset()); 864 864 OpcodeID opcodeID = instruction->opcodeID(); 865 865 … … 895 895 } 896 896 897 SpeculatedType getPrediction( unsignedbytecodeIndex)897 SpeculatedType getPrediction(BytecodeIndex bytecodeIndex) 898 898 { 899 899 SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex); … … 921 921 { 922 922 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock; 923 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecode Offset(m_currentInstruction));923 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeIndex(m_currentInstruction)); 924 924 return getArrayMode(*profile, action); 925 925 } … … 944 944 945 945 { 946 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecode Offset(m_currentIndex);946 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex); 947 947 if (arithProfile) { 948 948 switch (node->op()) { … … 957 957 node->mergeFlags(NodeMayHaveBigIntResult); 958 958 break; 959 959 960 960 case ValueMul: 961 961 case ArithMul: { … … 988 988 break; 989 989 } 990 990 991 991 default: 992 992 break; … … 1029 1029 // is what the special fast case counter tells us. 1030 1030 1031 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecial FastCase(m_currentIndex))1031 if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialArithFastCase(m_currentIndex)) 1032 1032 return node; 1033 1033 … … 1035 1035 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline); 1036 1036 1037 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecode Offset(m_currentIndex);1037 ArithProfile* arithProfile = m_inlineStackTop->m_profiledBlock->arithProfileForBytecodeIndex(m_currentIndex); 1038 1038 if (arithProfile->didObserveBigInt()) 1039 1039 node->mergeFlags(NodeMayHaveBigIntResult); … … 1062 1062 BasicBlock* m_currentBlock; 1063 1063 // The bytecode index of the current instruction being generated. 1064 unsignedm_currentIndex;1064 BytecodeIndex m_currentIndex; 1065 1065 // The semantic origin of the current node if different from the current Index. 1066 1066 CodeOrigin m_currentSemanticOrigin; … … 1192 1192 }; 1193 1193 1194 BasicBlock* ByteCodeParser::allocateTargetableBlock( unsignedbytecodeIndex)1194 BasicBlock* ByteCodeParser::allocateTargetableBlock(BytecodeIndex bytecodeIndex) 1195 1195 { 1196 ASSERT(bytecodeIndex != UINT_MAX);1196 ASSERT(bytecodeIndex); 1197 1197 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1)); 1198 1198 BasicBlock* blockPtr = block.ptr(); 1199 1199 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin 1200 1200 if (m_inlineStackTop->m_blockLinkingTargets.size()) 1201 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);1201 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset()); 1202 1202 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr); 1203 1203 m_graph.appendBlock(WTFMove(block)); … … 1207 1207 BasicBlock* ByteCodeParser::allocateUntargetableBlock() 1208 1208 { 1209 Ref<BasicBlock> block = adoptRef(*new BasicBlock( UINT_MAX, m_numArguments, m_numLocals, 1));1209 Ref<BasicBlock> block = adoptRef(*new BasicBlock(BytecodeIndex(), m_numArguments, m_numLocals, 1)); 1210 1210 BasicBlock* blockPtr = block.ptr(); 1211 1211 m_graph.appendBlock(WTFMove(block)); … … 1213 1213 } 1214 1214 1215 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, unsignedbytecodeIndex)1215 void ByteCodeParser::makeBlockTargetable(BasicBlock* block, BytecodeIndex bytecodeIndex) 1216 1216 { 1217 RELEASE_ASSERT( block->bytecodeBegin == UINT_MAX);1217 RELEASE_ASSERT(!block->bytecodeBegin); 1218 1218 block->bytecodeBegin = bytecodeIndex; 1219 1219 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin 1220 1220 if (m_inlineStackTop->m_blockLinkingTargets.size()) 1221 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);1221 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin.offset() < bytecodeIndex.offset()); 1222 1222 m_inlineStackTop->m_blockLinkingTargets.append(block); 1223 1223 } … … 1279 1279 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset); 1280 1280 auto optimizationResult = handleInlining(callTarget, result, callLinkStatus, registerOffset, thisArgument, 1281 argumentCountIncludingThis, m_currentIndex + instructionSize, op, kind, prediction);1281 argumentCountIncludingThis, BytecodeIndex(m_currentIndex.offset() + instructionSize), op, kind, prediction); 1282 1282 if (optimizationResult == CallOptimizationResult::OptimizedToJump) 1283 1283 return Terminal; … … 1458 1458 1459 1459 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to. 1460 unsignedoldIndex = m_currentIndex;1460 BytecodeIndex oldIndex = m_currentIndex; 1461 1461 auto oldStackTop = m_inlineStackTop; 1462 1462 m_inlineStackTop = stackEntry; 1463 m_currentIndex = opcodeLengths[op_enter];1463 m_currentIndex = BytecodeIndex(opcodeLengths[op_enter]); 1464 1464 m_exitOK = true; 1465 1465 processSetLocalQueue(); … … 1468 1468 m_exitOK = false; 1469 1469 1470 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock);1470 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, BytecodeIndex>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), BytecodeIndex(opcodeLengths[op_enter]), getBytecodeBeginForBlock); 1471 1471 RELEASE_ASSERT(entryBlockPtr); 1472 1472 addJumpTo(*entryBlockPtr); … … 1626 1626 1627 1627 // This is where the actual inlining really happens. 1628 unsignedoldIndex = m_currentIndex;1629 m_currentIndex = 0;1628 BytecodeIndex oldIndex = m_currentIndex; 1629 m_currentIndex = BytecodeIndex(0); 1630 1630 1631 1631 switch (kind) { … … 1762 1762 } 1763 1763 1764 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)1764 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, BytecodeIndex nextIndex, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) 1765 1765 { 1766 1766 VERBOSE_LOG(" Considering callee ", callee, "\n"); … … 1790 1790 inliningBalance--; 1791 1791 if (continuationBlock) { 1792 m_currentIndex = next Offset;1792 m_currentIndex = nextIndex; 1793 1793 m_exitOK = true; 1794 1794 processSetLocalQueue(); … … 1973 1973 int registerOffset, VirtualRegister thisArgument, 1974 1974 int argumentCountIncludingThis, 1975 unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction)1975 BytecodeIndex nextIndex, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) 1976 1976 { 1977 1977 VERBOSE_LOG("Handling inlining...\nStack: ", currentCodeOrigin(), "\n"); … … 1986 1986 return handleCallVariant( 1987 1987 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument, 1988 argumentCountIncludingThis, next Offset, kind, prediction, inliningBalance, nullptr, true);1988 argumentCountIncludingThis, nextIndex, kind, prediction, inliningBalance, nullptr, true); 1989 1989 } 1990 1990 … … 2062 2062 VERBOSE_LOG("About to loop over functions at ", currentCodeOrigin(), ".\n"); 2063 2063 2064 unsigned oldOffset= m_currentIndex;2064 BytecodeIndex oldIndex = m_currentIndex; 2065 2065 for (unsigned i = 0; i < callLinkStatus.size(); ++i) { 2066 m_currentIndex = old Offset;2066 m_currentIndex = oldIndex; 2067 2067 BasicBlock* calleeEntryBlock = allocateUntargetableBlock(); 2068 2068 m_currentBlock = calleeEntryBlock; … … 2076 2076 auto inliningResult = handleCallVariant( 2077 2077 myCallTargetNode, result, callLinkStatus[i], registerOffset, 2078 thisArgument, argumentCountIncludingThis, next Offset, kind, prediction,2078 thisArgument, argumentCountIncludingThis, nextIndex, kind, prediction, 2079 2079 inliningBalance, continuationBlock, false); 2080 2080 … … 2105 2105 // Slow path block 2106 2106 m_currentBlock = allocateUntargetableBlock(); 2107 m_currentIndex = old Offset;2107 m_currentIndex = oldIndex; 2108 2108 m_exitOK = true; 2109 2109 data.fallThrough = BranchTarget(m_currentBlock); … … 2125 2125 } 2126 2126 2127 m_currentIndex = next Offset;2127 m_currentIndex = nextIndex; 2128 2128 m_exitOK = true; // Origin changed, so it's fine to exit again. 2129 2129 processSetLocalQueue(); … … 2137 2137 prepareToParseBlock(); 2138 2138 2139 m_currentIndex = old Offset;2139 m_currentIndex = oldIndex; 2140 2140 m_currentBlock = continuationBlock; 2141 2141 m_exitOK = true; … … 4694 4694 #define NEXT_OPCODE(name) \ 4695 4695 if (true) { \ 4696 m_currentIndex += currentInstruction->size(); \4696 m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \ 4697 4697 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \ 4698 4698 } else \ … … 4701 4701 4702 4702 #define LAST_OPCODE_LINKED(name) do { \ 4703 m_currentIndex += currentInstruction->size(); \4703 m_currentIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); \ 4704 4704 m_exitOK = false; \ 4705 4705 return; \ … … 4725 4725 { 4726 4726 auto& instructions = m_inlineStackTop->m_codeBlock->instructions(); 4727 unsignedblockBegin = m_currentIndex;4727 BytecodeIndex blockBegin = m_currentIndex; 4728 4728 4729 4729 // If we are the first basic block, introduce markers for arguments. This allows … … 4769 4769 4770 4770 // Don't extend over jump destinations. 4771 if (m_currentIndex == limit) {4771 if (m_currentIndex.offset() == limit) { 4772 4772 // Ordinarily we want to plant a jump. But refuse to do this if the block is 4773 4773 // empty. This is a special case for inlining, which might otherwise create … … 4779 4779 4780 4780 if (!m_currentBlock->isEmpty()) 4781 addJumpTo(m_currentIndex );4781 addJumpTo(m_currentIndex.offset()); 4782 4782 return; 4783 4783 } … … 5804 5804 auto bytecode = currentInstruction->as<OpJmp>(); 5805 5805 int relativeOffset = jumpTarget(bytecode.m_targetLabel); 5806 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset));5806 addToGraph(Jump, OpInfo(m_currentIndex.offset() + relativeOffset)); 5807 5807 if (relativeOffset <= 0) 5808 5808 flushForTerminal(); … … 5814 5814 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); 5815 5815 Node* condition = get(bytecode.m_condition); 5816 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5816 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5817 5817 LAST_OPCODE(op_jtrue); 5818 5818 } … … 5822 5822 unsigned relativeOffset = jumpTarget(bytecode.m_targetLabel); 5823 5823 Node* condition = get(bytecode.m_condition); 5824 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5824 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5825 5825 LAST_OPCODE(op_jfalse); 5826 5826 } … … 5832 5832 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5833 5833 Node* condition = addToGraph(CompareEq, value, nullConstant); 5834 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5834 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5835 5835 LAST_OPCODE(op_jeq_null); 5836 5836 } … … 5842 5842 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5843 5843 Node* condition = addToGraph(CompareEq, value, nullConstant); 5844 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5844 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5845 5845 LAST_OPCODE(op_jneq_null); 5846 5846 } … … 5851 5851 Node* value = get(bytecode.m_value); 5852 5852 Node* condition = addToGraph(IsUndefinedOrNull, value); 5853 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5853 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5854 5854 LAST_OPCODE(op_jundefined_or_null); 5855 5855 } … … 5860 5860 Node* value = get(bytecode.m_value); 5861 5861 Node* condition = addToGraph(IsUndefinedOrNull, value); 5862 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5862 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5863 5863 LAST_OPCODE(op_jnundefined_or_null); 5864 5864 } … … 5870 5870 Node* op2 = get(bytecode.m_rhs); 5871 5871 Node* condition = addToGraph(CompareLess, op1, op2); 5872 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5872 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5873 5873 LAST_OPCODE(op_jless); 5874 5874 } … … 5880 5880 Node* op2 = get(bytecode.m_rhs); 5881 5881 Node* condition = addToGraph(CompareLessEq, op1, op2); 5882 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5882 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5883 5883 LAST_OPCODE(op_jlesseq); 5884 5884 } … … 5890 5890 Node* op2 = get(bytecode.m_rhs); 5891 5891 Node* condition = addToGraph(CompareGreater, op1, op2); 5892 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5892 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5893 5893 LAST_OPCODE(op_jgreater); 5894 5894 } … … 5900 5900 Node* op2 = get(bytecode.m_rhs); 5901 5901 Node* condition = addToGraph(CompareGreaterEq, op1, op2); 5902 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5902 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5903 5903 LAST_OPCODE(op_jgreatereq); 5904 5904 } … … 5910 5910 Node* op2 = get(bytecode.m_rhs); 5911 5911 Node* condition = addToGraph(CompareEq, op1, op2); 5912 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5912 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5913 5913 LAST_OPCODE(op_jeq); 5914 5914 } … … 5920 5920 Node* op2 = get(bytecode.m_rhs); 5921 5921 Node* condition = addToGraph(CompareStrictEq, op1, op2); 5922 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5922 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5923 5923 LAST_OPCODE(op_jstricteq); 5924 5924 } … … 5930 5930 Node* op2 = get(bytecode.m_rhs); 5931 5931 Node* condition = addToGraph(CompareLess, op1, op2); 5932 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5932 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5933 5933 LAST_OPCODE(op_jnless); 5934 5934 } … … 5940 5940 Node* op2 = get(bytecode.m_rhs); 5941 5941 Node* condition = addToGraph(CompareLessEq, op1, op2); 5942 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5942 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5943 5943 LAST_OPCODE(op_jnlesseq); 5944 5944 } … … 5950 5950 Node* op2 = get(bytecode.m_rhs); 5951 5951 Node* condition = addToGraph(CompareGreater, op1, op2); 5952 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5952 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5953 5953 LAST_OPCODE(op_jngreater); 5954 5954 } … … 5960 5960 Node* op2 = get(bytecode.m_rhs); 5961 5961 Node* condition = addToGraph(CompareGreaterEq, op1, op2); 5962 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5962 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5963 5963 LAST_OPCODE(op_jngreatereq); 5964 5964 } … … 5970 5970 Node* op2 = get(bytecode.m_rhs); 5971 5971 Node* condition = addToGraph(CompareEq, op1, op2); 5972 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5972 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5973 5973 LAST_OPCODE(op_jneq); 5974 5974 } … … 5980 5980 Node* op2 = get(bytecode.m_rhs); 5981 5981 Node* condition = addToGraph(CompareStrictEq, op1, op2); 5982 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);5982 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 5983 5983 LAST_OPCODE(op_jnstricteq); 5984 5984 } … … 5990 5990 Node* op2 = get(bytecode.m_rhs); 5991 5991 Node* condition = addToGraph(CompareBelow, op1, op2); 5992 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);5992 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 5993 5993 LAST_OPCODE(op_jbelow); 5994 5994 } … … 6000 6000 Node* op2 = get(bytecode.m_rhs); 6001 6001 Node* condition = addToGraph(CompareBelowEq, op1, op2); 6002 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex+ currentInstruction->size())), condition);6002 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + relativeOffset, m_currentIndex.offset() + currentInstruction->size())), condition); 6003 6003 LAST_OPCODE(op_jbeloweq); 6004 6004 } … … 6009 6009 data.kind = SwitchImm; 6010 6010 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex]; 6011 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));6011 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset)); 6012 6012 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); 6013 6013 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { 6014 6014 if (!table.branchOffsets[i]) 6015 6015 continue; 6016 unsigned target = m_currentIndex + table.branchOffsets[i];6016 unsigned target = m_currentIndex.offset() + table.branchOffsets[i]; 6017 6017 if (target == data.fallThrough.bytecodeIndex()) 6018 6018 continue; … … 6029 6029 data.kind = SwitchChar; 6030 6030 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.m_tableIndex]; 6031 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));6031 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset)); 6032 6032 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); 6033 6033 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { 6034 6034 if (!table.branchOffsets[i]) 6035 6035 continue; 6036 unsigned target = m_currentIndex + table.branchOffsets[i];6036 unsigned target = m_currentIndex.offset() + table.branchOffsets[i]; 6037 6037 if (target == data.fallThrough.bytecodeIndex()) 6038 6038 continue; … … 6050 6050 data.kind = SwitchString; 6051 6051 data.switchTableIndex = bytecode.m_tableIndex; 6052 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.m_defaultOffset));6052 data.fallThrough.setBytecodeIndex(m_currentIndex.offset() + jumpTarget(bytecode.m_defaultOffset)); 6053 6053 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); 6054 6054 StringJumpTable::StringOffsetTable::iterator iter; 6055 6055 StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); 6056 6056 for (iter = table.offsetTable.begin(); iter != end; ++iter) { 6057 unsigned target = m_currentIndex + iter->value.branchOffset;6057 unsigned target = m_currentIndex.offset() + iter->value.branchOffset; 6058 6058 if (target == data.fallThrough.bytecodeIndex()) 6059 6059 continue; … … 6080 6080 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.m_value), ImmediateSetWithFlush); 6081 6081 6082 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) {6082 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex.offset() + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) { 6083 6083 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one. 6084 6084 // It is untargetable, because we do not know the appropriate index. … … 6208 6208 entrypointArguments.resize(m_numArguments); 6209 6209 6210 unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size();6210 BytecodeIndex exitBytecodeIndex = BytecodeIndex(m_currentIndex.offset() + currentInstruction->size()); 6211 6211 6212 6212 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) { … … 6317 6317 if (bytecode.metadata(codeBlock).m_hasJumped) { 6318 6318 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child); 6319 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex+ relativeOffset)), condition);6319 addToGraph(Branch, OpInfo(branchData(m_currentIndex.offset() + currentInstruction->size(), m_currentIndex.offset() + relativeOffset)), condition); 6320 6320 LAST_OPCODE(op_jneq_ptr); 6321 6321 } … … 7136 7136 switch (node->op()) { 7137 7137 case Jump: 7138 node->targetBlock() = blockForBytecode Offset(possibleTargets, node->targetBytecodeOffsetDuringParsing());7138 node->targetBlock() = blockForBytecodeIndex(possibleTargets, BytecodeIndex(node->targetBytecodeOffsetDuringParsing())); 7139 7139 break; 7140 7140 7141 7141 case Branch: { 7142 7142 BranchData* data = node->branchData(); 7143 data->taken.block = blockForBytecode Offset(possibleTargets, data->takenBytecodeIndex());7144 data->notTaken.block = blockForBytecode Offset(possibleTargets, data->notTakenBytecodeIndex());7143 data->taken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->takenBytecodeIndex())); 7144 data->notTaken.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->notTakenBytecodeIndex())); 7145 7145 break; 7146 7146 } … … 7149 7149 SwitchData* data = node->switchData(); 7150 7150 for (unsigned i = node->switchData()->cases.size(); i--;) 7151 data->cases[i].target.block = blockForBytecode Offset(possibleTargets, data->cases[i].target.bytecodeIndex());7152 data->fallThrough.block = blockForBytecode Offset(possibleTargets, data->fallThrough.bytecodeIndex());7151 data->cases[i].target.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->cases[i].target.bytecodeIndex())); 7152 data->fallThrough.block = blockForBytecodeIndex(possibleTargets, BytecodeIndex(data->fallThrough.bytecodeIndex())); 7153 7153 break; 7154 7154 } … … 7324 7324 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. 7325 7325 unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size(); 7326 ASSERT(m_currentIndex < limit);7326 ASSERT(m_currentIndex.offset() < limit); 7327 7327 7328 7328 // Loop until we reach the current limit (i.e. next jump target). … … 7347 7347 7348 7348 // We should not have gone beyond the limit. 7349 ASSERT(m_currentIndex <= limit);7349 ASSERT(m_currentIndex.offset() <= limit); 7350 7350 7351 7351 if (m_currentBlock->isEmpty()) { … … 7353 7353 // or polymorphic (creating an empty continuation block), 7354 7354 // and then we hit the limit before putting anything in the continuation block. 7355 ASSERT(m_currentIndex == limit);7355 ASSERT(m_currentIndex.offset() == limit); 7356 7356 makeBlockTargetable(m_currentBlock, m_currentIndex); 7357 7357 } else { 7358 ASSERT(m_currentBlock->terminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()));7358 ASSERT(m_currentBlock->terminal() || (m_currentIndex.offset() == codeBlock->instructions().size() && inlineCallFrame())); 7359 7359 m_currentBlock = nullptr; 7360 7360 } 7361 } while (m_currentIndex < limit);7361 } while (m_currentIndex.offset() < limit); 7362 7362 } 7363 7363 7364 7364 // Should have reached the end of the instructions. 7365 ASSERT(m_currentIndex == codeBlock->instructions().size());7365 ASSERT(m_currentIndex.offset() == codeBlock->instructions().size()); 7366 7366 7367 7367 VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n"); … … 7526 7526 { 7527 7527 // Set during construction. 7528 ASSERT(!m_currentIndex );7528 ASSERT(!m_currentIndex.offset()); 7529 7529 7530 7530 VERBOSE_LOG("Parsing ", *m_codeBlock, "\n");
Note:
See TracChangeset
for help on using the changeset viewer.