Changeset 172940 in webkit for trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
- Timestamp:
- Aug 25, 2014, 3:35:40 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r172853 r172940 51 51 namespace JSC { namespace DFG { 52 52 53 static const bool verbose = false; 54 53 55 class ConstantBufferKey { 54 56 public: … … 179 181 void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset); 180 182 void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind); 181 void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind); 183 void emitFunctionChecks(CallVariant, Node* callTarget, int registerOffset, CodeSpecializationKind); 184 void undoFunctionChecks(CallVariant); 182 185 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind); 186 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, CodeSpecializationKind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. 183 187 // Handle inlining. Return true if it succeeded, false if we need to plant a call. 184 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind); 188 bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); 189 enum CallerLinkability { CallerDoesNormalLinking, CallerLinksManually }; 190 bool attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability, SpeculatedType prediction, unsigned& inliningBalance); 191 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, CallerLinkability); 192 void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry. 185 193 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. 186 194 bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction); 187 195 bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType); 188 bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction,CodeSpecializationKind);196 bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind); 189 197 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); 190 198 Node* handleGetByOffset(SpeculatedType, Node* base, const StructureSet&, unsigned identifierNumber, PropertyOffset, NodeType op = GetByOffset); … … 201 209 Node* getScope(unsigned skipCount); 202 210 203 // Prepare to parse a block.204 211 void prepareToParseBlock(); 212 void clearCaches(); 213 205 214 // Parse a single basic block of bytecode instructions. 206 215 bool parseBlock(unsigned limit); … … 297 306 return delayed.execute(this, setMode); 298 307 } 308 309 void processSetLocalQueue() 310 { 311 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) 312 m_setLocalQueue[i].execute(this); 313 m_setLocalQueue.resize(0); 314 } 299 315 300 316 Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) … … 638 654 return result; 639 655 } 656 657 void removeLastNodeFromGraph(NodeType expectedNodeType) 658 { 659 Node* node = m_currentBlock->takeLast(); 660 RELEASE_ASSERT(node->op() == expectedNodeType); 661 m_graph.m_allocator.free(node); 662 } 640 663 641 664 void addVarArgChild(Node* child) … … 646 669 647 670 Node* addCallWithoutSettingResult( 648 NodeType op, Node* callee, int argCount, int registerOffset,671 NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, 649 672 SpeculatedType prediction) 650 673 { … … 654 677 m_parameterSlots = parameterSlots; 655 678 656 int dummyThisArgument = op == Call || op == NativeCall ? 0 : 1;679 int dummyThisArgument = op == Call || op == NativeCall || op == ProfiledCall ? 0 : 1; 657 680 for (int i = 0 + dummyThisArgument; i < argCount; ++i) 658 681 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); 659 682 660 return addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction));683 return addToGraph(Node::VarArg, op, opInfo, OpInfo(prediction)); 661 684 } 662 685 663 686 Node* addCall( 664 int result, NodeType op, Node* callee, int argCount, int registerOffset,687 int result, NodeType op, OpInfo opInfo, Node* callee, int argCount, int registerOffset, 665 688 SpeculatedType prediction) 666 689 { 667 690 Node* call = addCallWithoutSettingResult( 668 op, callee, argCount, registerOffset, prediction);691 op, opInfo, callee, argCount, registerOffset, prediction); 669 692 VirtualRegister resultReg(result); 670 693 if (resultReg.isValid()) … … 872 895 873 896 // Potential block linking targets. Must be sorted by bytecodeBegin, and 874 // cannot have two blocks that have the same bytecodeBegin. For this very 875 // reason, this is not equivalent to 897 // cannot have two blocks that have the same bytecodeBegin. 876 898 Vector<BasicBlock*> m_blockLinkingTargets; 877 899 … … 1020 1042 { 1021 1043 ASSERT(registerOffset <= 0); 1022 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);1023 1044 1024 1045 if (callTarget->hasConstant()) 1025 1046 callLinkStatus = CallLinkStatus(callTarget->asJSValue()).setIsProved(true); 1047 1048 if ((!callLinkStatus.canOptimize() || callLinkStatus.size() != 1) 1049 && !isFTL(m_graph.m_plan.mode) && Options::useFTLJIT() 1050 && InlineCallFrame::isNormalCall(kind) 1051 && CallEdgeLog::isEnabled() 1052 && Options::dfgDoesCallEdgeProfiling()) { 1053 ASSERT(op == Call || op == Construct); 1054 if (op == Call) 1055 op = ProfiledCall; 1056 else 1057 op = ProfiledConstruct; 1058 } 1026 1059 1027 1060 if (!callLinkStatus.canOptimize()) { … … 1029 1062 // that we cannot optimize them. 1030 1063 1031 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction);1064 addCall(result, op, OpInfo(), callTarget, argumentCountIncludingThis, registerOffset, prediction); 1032 1065 return; 1033 1066 } 1034 1067 1035 1068 unsigned nextOffset = m_currentIndex + instructionSize; 1036 1037 if (InternalFunction* function = callLinkStatus.internalFunction()) { 1038 if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, specializationKind)) { 1039 // This phantoming has to be *after* the code for the intrinsic, to signify that 1040 // the inputs must be kept alive whatever exits the intrinsic may do. 1041 addToGraph(Phantom, callTarget); 1042 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind); 1043 return; 1044 } 1045 1046 // Can only handle this using the generic call handler. 1047 addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1048 return; 1049 } 1050 1051 Intrinsic intrinsic = callLinkStatus.intrinsicFor(specializationKind); 1052 1053 JSFunction* knownFunction = nullptr; 1054 if (intrinsic != NoIntrinsic) { 1055 emitFunctionChecks(callLinkStatus, callTarget, registerOffset, specializationKind); 1056 1057 if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) { 1058 // This phantoming has to be *after* the code for the intrinsic, to signify that 1059 // the inputs must be kept alive whatever exits the intrinsic may do. 1060 addToGraph(Phantom, callTarget); 1061 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind); 1062 if (m_graph.compilation()) 1063 m_graph.compilation()->noticeInlinedCall(); 1064 return; 1065 } 1066 } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) { 1069 1070 OpInfo callOpInfo; 1071 1072 if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, op, kind, prediction)) { 1067 1073 if (m_graph.compilation()) 1068 1074 m_graph.compilation()->noticeInlinedCall(); 1069 1075 return; 1076 } 1077 1070 1078 #if ENABLE(FTL_NATIVE_CALL_INLINING) 1071 } else if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls()) { 1072 JSFunction* function = callLinkStatus.function(); 1079 if (isFTL(m_graph.m_plan.mode) && Options::optimizeNativeCalls() && callLinkStatus.size() == 1 && !callLinkStatus.couldTakeSlowPath()) { 1080 CallVariant callee = callLinkStatus[0].callee(); 1081 JSFunction* function = callee.function(); 1082 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); 1073 1083 if (function && function->isHostFunction()) { 1074 emitFunctionChecks(call LinkStatus, callTarget, registerOffset, specializationKind);1075 knownFunction = function;1076 1077 if (op == Call )1084 emitFunctionChecks(callee, callTarget, registerOffset, specializationKind); 1085 callOpInfo = OpInfo(m_graph.freeze(function)); 1086 1087 if (op == Call || op == ProfiledCall) 1078 1088 op = NativeCall; 1079 1089 else { 1080 ASSERT(op == Construct );1090 ASSERT(op == Construct || op == ProfiledConstruct); 1081 1091 op = NativeConstruct; 1082 1092 } 1083 1093 } 1094 } 1084 1095 #endif 1085 } 1086 Node* call = addCall(result, op, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1087 1088 if (knownFunction) 1089 call->giveKnownFunction(knownFunction); 1096 1097 addCall(result, op, callOpInfo, callTarget, argumentCountIncludingThis, registerOffset, prediction); 1090 1098 } 1091 1099 1092 void ByteCodeParser::emitFunctionChecks( const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind)1100 void ByteCodeParser::emitFunctionChecks(CallVariant callee, Node* callTarget, int registerOffset, CodeSpecializationKind kind) 1093 1101 { 1094 1102 Node* thisArgument; … … 1098 1106 thisArgument = 0; 1099 1107 1100 if (callLinkStatus.isProved()) { 1101 addToGraph(Phantom, callTarget, thisArgument); 1102 return; 1103 } 1104 1105 ASSERT(callLinkStatus.canOptimize()); 1106 1107 if (JSFunction* function = callLinkStatus.function()) 1108 addToGraph(CheckFunction, OpInfo(m_graph.freeze(function)), callTarget, thisArgument); 1109 else { 1110 ASSERT(callLinkStatus.executable()); 1111 1112 addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument); 1113 } 1108 JSCell* calleeCell; 1109 Node* callTargetForCheck; 1110 if (callee.isClosureCall()) { 1111 calleeCell = callee.executable(); 1112 callTargetForCheck = addToGraph(GetExecutable, callTarget); 1113 } else { 1114 calleeCell = callee.nonExecutableCallee(); 1115 callTargetForCheck = callTarget; 1116 } 1117 1118 ASSERT(calleeCell); 1119 addToGraph(CheckCell, OpInfo(m_graph.freeze(calleeCell)), callTargetForCheck, thisArgument); 1120 } 1121 1122 void ByteCodeParser::undoFunctionChecks(CallVariant callee) 1123 { 1124 removeLastNodeFromGraph(CheckCell); 1125 if (callee.isClosureCall()) 1126 removeLastNodeFromGraph(GetExecutable); 1114 1127 } 1115 1128 … … 1120 1133 } 1121 1134 1122 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind)1135 unsigned ByteCodeParser::inliningCost(CallVariant callee, int argumentCountIncludingThis, CodeSpecializationKind kind) 1123 1136 { 1124 static const bool verbose = false;1125 1126 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind);1127 1128 1137 if (verbose) 1129 dataLog("Considering inlining ", call LinkStatus, " into ", currentCodeOrigin(), "\n");1130 1131 // First, the really simple checks: do we have an actual JS function?1132 if (! callLinkStatus.executable()) {1138 dataLog("Considering inlining ", callee, " into ", currentCodeOrigin(), "\n"); 1139 1140 FunctionExecutable* executable = callee.functionExecutable(); 1141 if (!executable) { 1133 1142 if (verbose) 1134 dataLog(" Failing because there is no executable.\n"); 1135 return false; 1136 } 1137 if (callLinkStatus.executable()->isHostFunction()) { 1138 if (verbose) 1139 dataLog(" Failing because it's a host function.\n"); 1140 return false; 1141 } 1142 1143 FunctionExecutable* executable = jsCast<FunctionExecutable*>(callLinkStatus.executable()); 1143 dataLog(" Failing because there is no function executable."); 1144 return UINT_MAX; 1145 } 1144 1146 1145 1147 // Does the number of arguments we're passing match the arity of the target? We currently … … 1149 1151 if (verbose) 1150 1152 dataLog(" Failing because of arity mismatch.\n"); 1151 return false;1153 return UINT_MAX; 1152 1154 } 1153 1155 … … 1158 1160 // global function, where watchpointing gives us static information. Overall, it's a rare case 1159 1161 // because we expect that any hot callees would have already been compiled. 1160 CodeBlock* codeBlock = executable->baselineCodeBlockFor( specializationKind);1162 CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind); 1161 1163 if (!codeBlock) { 1162 1164 if (verbose) 1163 1165 dataLog(" Failing because no code block available.\n"); 1164 return false;1166 return UINT_MAX; 1165 1167 } 1166 1168 CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel( 1167 codeBlock, specializationKind, callLinkStatus.isClosureCall());1169 codeBlock, kind, callee.isClosureCall()); 1168 1170 if (!canInline(capabilityLevel)) { 1169 1171 if (verbose) 1170 1172 dataLog(" Failing because the function is not inlineable.\n"); 1171 return false;1173 return UINT_MAX; 1172 1174 } 1173 1175 … … 1179 1181 if (verbose) 1180 1182 dataLog(" Failing because the caller is too large.\n"); 1181 return false;1183 return UINT_MAX; 1182 1184 } 1183 1185 … … 1198 1200 if (verbose) 1199 1201 dataLog(" Failing because depth exceeded.\n"); 1200 return false;1202 return UINT_MAX; 1201 1203 } 1202 1204 … … 1206 1208 if (verbose) 1207 1209 dataLog(" Failing because recursion detected.\n"); 1208 return false;1210 return UINT_MAX; 1209 1211 } 1210 1212 } … … 1212 1214 1213 1215 if (verbose) 1214 dataLog(" Committing to inlining.\n"); 1215 1216 // Now we know without a doubt that we are committed to inlining. So begin the process 1217 // by checking the callee (if necessary) and making sure that arguments and the callee 1218 // are flushed. 1219 emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, specializationKind); 1220 1216 dataLog(" Inlining should be possible.\n"); 1217 1218 // It might be possible to inline. 1219 return codeBlock->instructionCount(); 1220 } 1221 1222 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability) 1223 { 1224 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); 1225 1226 ASSERT(inliningCost(callee, argumentCountIncludingThis, specializationKind) != UINT_MAX); 1227 1228 CodeBlock* codeBlock = callee.functionExecutable()->baselineCodeBlockFor(specializationKind); 1229 1221 1230 // FIXME: Don't flush constants! 1222 1231 … … 1234 1243 1235 1244 InlineStackEntry inlineStackEntry( 1236 this, codeBlock, codeBlock, m_graph.lastBlock(), call LinkStatus.function(), resultReg,1245 this, codeBlock, codeBlock, m_graph.lastBlock(), callee.function(), resultReg, 1237 1246 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind); 1238 1247 … … 1248 1257 RELEASE_ASSERT( 1249 1258 m_inlineStackTop->m_inlineCallFrame->isClosureCall 1250 == call LinkStatus.isClosureCall());1251 if (call LinkStatus.isClosureCall()) {1259 == callee.isClosureCall()); 1260 if (callee.isClosureCall()) { 1252 1261 VariableAccessData* calleeVariable = 1253 1262 set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData(); … … 1264 1273 1265 1274 parseCodeBlock(); 1266 prepareToParseBlock(); // Reset our state now that we're back to the outer code.1275 clearCaches(); // Reset our state now that we're back to the outer code. 1267 1276 1268 1277 m_currentIndex = oldIndex; … … 1277 1286 ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked); 1278 1287 1279 // It's possible that the callsite block head is not owned by the caller. 1280 if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) { 1281 // It's definitely owned by the caller, because the caller created new blocks. 1282 // Assert that this all adds up. 1283 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead); 1284 ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking); 1285 inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false; 1286 } else { 1287 // It's definitely not owned by the caller. Tell the caller that he does not 1288 // need to link his callsite block head, because we did it for him. 1289 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking); 1290 ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead); 1291 inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false; 1292 } 1288 if (callerLinkability == CallerDoesNormalLinking) 1289 cancelLinkingForBlock(inlineStackEntry.m_caller, inlineStackEntry.m_callsiteBlockHead); 1293 1290 1294 1291 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); … … 1309 1306 // in the linker's binary search. 1310 1307 lastBlock->bytecodeBegin = m_currentIndex; 1311 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); 1308 if (callerLinkability == CallerDoesNormalLinking) { 1309 if (verbose) 1310 dataLog("Adding unlinked block ", RawPointer(m_graph.lastBlock()), " (one return)\n"); 1311 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); 1312 } 1312 1313 } 1313 1314 1314 1315 m_currentBlock = m_graph.lastBlock(); 1315 return true;1316 return; 1316 1317 } 1317 1318 1318 1319 // If we get to this point then all blocks must end in some sort of terminals. 1319 1320 ASSERT(lastBlock->last()->isTerminal()); 1320 1321 1321 1322 1322 // Need to create a new basic block for the continuation at the caller. … … 1334 1334 node->targetBlock() = block.get(); 1335 1335 inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false; 1336 #if !ASSERT_DISABLED 1337 blockToLink->isLinked = true;1338 #endif 1336 if (verbose) 1337 dataLog("Marking ", RawPointer(blockToLink), " as linked (jumps to return)\n"); 1338 blockToLink->didLink(); 1339 1339 } 1340 1340 1341 1341 m_currentBlock = block.get(); 1342 1342 ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset); 1343 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); 1344 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); 1343 if (verbose) 1344 dataLog("Adding unlinked block ", RawPointer(block.get()), " (many returns)\n"); 1345 if (callerLinkability == CallerDoesNormalLinking) { 1346 m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); 1347 m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); 1348 } 1345 1349 m_graph.appendBlock(block); 1346 1350 prepareToParseBlock(); 1347 1348 // At this point we return and continue to generate code for the caller, but 1349 // in the new basic block. 1351 } 1352 1353 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry* inlineStackEntry, BasicBlock* block) 1354 { 1355 // It's possible that the callsite block head is not owned by the caller. 1356 if (!inlineStackEntry->m_unlinkedBlocks.isEmpty()) { 1357 // It's definitely owned by the caller, because the caller created new blocks. 1358 // Assert that this all adds up. 1359 ASSERT_UNUSED(block, inlineStackEntry->m_unlinkedBlocks.last().m_block == block); 1360 ASSERT(inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking); 1361 inlineStackEntry->m_unlinkedBlocks.last().m_needsNormalLinking = false; 1362 } else { 1363 // It's definitely not owned by the caller. Tell the caller that he does not 1364 // need to link his callsite block head, because we did it for him. 1365 ASSERT(inlineStackEntry->m_callsiteBlockHeadNeedsLinking); 1366 ASSERT_UNUSED(block, inlineStackEntry->m_callsiteBlockHead == block); 1367 inlineStackEntry->m_callsiteBlockHeadNeedsLinking = false; 1368 } 1369 } 1370 1371 bool ByteCodeParser::attemptToInlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, CallerLinkability callerLinkability, SpeculatedType prediction, unsigned& inliningBalance) 1372 { 1373 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); 1374 1375 if (!inliningBalance) 1376 return false; 1377 1378 if (InternalFunction* function = callee.internalFunction()) { 1379 if (handleConstantInternalFunction(resultOperand, function, registerOffset, argumentCountIncludingThis, specializationKind)) { 1380 addToGraph(Phantom, callTargetNode); 1381 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind); 1382 inliningBalance--; 1383 return true; 1384 } 1385 return false; 1386 } 1387 1388 Intrinsic intrinsic = callee.intrinsicFor(specializationKind); 1389 if (intrinsic != NoIntrinsic) { 1390 if (handleIntrinsic(resultOperand, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) { 1391 addToGraph(Phantom, callTargetNode); 1392 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind); 1393 inliningBalance--; 1394 return true; 1395 } 1396 return false; 1397 } 1398 1399 unsigned myInliningCost = inliningCost(callee, argumentCountIncludingThis, specializationKind); 1400 if (myInliningCost > inliningBalance) 1401 return false; 1402 1403 inlineCall(callTargetNode, resultOperand, callee, registerOffset, argumentCountIncludingThis, nextOffset, kind, callerLinkability); 1404 inliningBalance -= myInliningCost; 1405 return true; 1406 } 1407 1408 bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind kind, SpeculatedType prediction) 1409 { 1410 if (verbose) { 1411 dataLog("Handling inlining...\n"); 1412 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1413 } 1414 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); 1415 1416 if (!callLinkStatus.size()) { 1417 if (verbose) 1418 dataLog("Bailing inlining.\n"); 1419 return false; 1420 } 1421 1422 unsigned inliningBalance = Options::maximumFunctionForCallInlineCandidateInstructionCount(); 1423 if (specializationKind == CodeForConstruct) 1424 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForConstructInlineCandidateInstructionCount()); 1425 if (callLinkStatus.isClosureCall()) 1426 inliningBalance = std::min(inliningBalance, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount()); 1427 1428 // First check if we can avoid creating control flow. Our inliner does some CFG 1429 // simplification on the fly and this helps reduce compile times, but we can only leverage 1430 // this in cases where we don't need control flow diamonds to check the callee. 1431 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { 1432 emitFunctionChecks( 1433 callLinkStatus[0].callee(), callTargetNode, registerOffset, specializationKind); 1434 bool result = attemptToInlineCall( 1435 callTargetNode, resultOperand, callLinkStatus[0].callee(), registerOffset, 1436 argumentCountIncludingThis, nextOffset, kind, CallerDoesNormalLinking, prediction, 1437 inliningBalance); 1438 if (!result && !callLinkStatus.isProved()) 1439 undoFunctionChecks(callLinkStatus[0].callee()); 1440 if (verbose) { 1441 dataLog("Done inlining (simple).\n"); 1442 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1443 } 1444 return result; 1445 } 1446 1447 // We need to create some kind of switch over callee. For now we only do this if we believe that 1448 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to 1449 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in 1450 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that 1451 // we could improve that aspect of this by doing polymorphic inlining but having the profiling 1452 // also. Currently we opt against this, but it could be interesting. That would require having a 1453 // separate node for call edge profiling. 1454 // FIXME: Introduce the notion of a separate call edge profiling node. 1455 // https://bugs.webkit.org/show_bug.cgi?id=136033 1456 if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicCallInlining()) { 1457 if (verbose) { 1458 dataLog("Bailing inlining (hard).\n"); 1459 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1460 } 1461 return false; 1462 } 1463 1464 unsigned oldOffset = m_currentIndex; 1465 1466 bool allAreClosureCalls = true; 1467 bool allAreDirectCalls = true; 1468 for (unsigned i = callLinkStatus.size(); i--;) { 1469 if (callLinkStatus[i].callee().isClosureCall()) 1470 allAreDirectCalls = false; 1471 else 1472 allAreClosureCalls = false; 1473 } 1474 1475 Node* thingToSwitchOn; 1476 if (allAreDirectCalls) 1477 thingToSwitchOn = callTargetNode; 1478 else if (allAreClosureCalls) 1479 thingToSwitchOn = addToGraph(GetExecutable, callTargetNode); 1480 else { 1481 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases 1482 // where it would be beneficial. Also, CallLinkStatus would make all callees appear like 1483 // closure calls if any calls were closure calls - except for calls to internal functions. 1484 // So this will only arise if some callees are internal functions and others are closures. 1485 // https://bugs.webkit.org/show_bug.cgi?id=136020 1486 if (verbose) { 1487 dataLog("Bailing inlining (mix).\n"); 1488 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1489 } 1490 return false; 1491 } 1492 1493 if (verbose) { 1494 dataLog("Doing hard inlining...\n"); 1495 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1496 } 1497 1498 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to 1499 // store the callee so that it will be accessible to all of the blocks we're about to create. We 1500 // get away with doing an immediate-set here because we wouldn't have performed any side effects 1501 // yet. 1502 if (verbose) 1503 dataLog("Register offset: ", registerOffset); 1504 VirtualRegister calleeReg(registerOffset + JSStack::Callee); 1505 calleeReg = m_inlineStackTop->remapOperand(calleeReg); 1506 if (verbose) 1507 dataLog("Callee is going to be ", calleeReg, "\n"); 1508 setDirect(calleeReg, callTargetNode, ImmediateSetWithFlush); 1509 1510 SwitchData& data = *m_graph.m_switchData.add(); 1511 data.kind = SwitchCell; 1512 addToGraph(Switch, OpInfo(&data), thingToSwitchOn); 1513 1514 BasicBlock* originBlock = m_currentBlock; 1515 if (verbose) 1516 dataLog("Marking ", RawPointer(originBlock), " as linked (origin of poly inline)\n"); 1517 originBlock->didLink(); 1518 cancelLinkingForBlock(m_inlineStackTop, originBlock); 1519 1520 // Each inlined callee will have a landing block that it returns at. They should all have jumps 1521 // to the continuation block, which we create last. 1522 Vector<BasicBlock*> landingBlocks; 1523 1524 // We make force this true if we give up on inlining any of the edges. 1525 bool couldTakeSlowPath = callLinkStatus.couldTakeSlowPath(); 1526 1527 if (verbose) 1528 dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n"); 1529 1530 for (unsigned i = 0; i < callLinkStatus.size(); ++i) { 1531 m_currentIndex = oldOffset; 1532 RefPtr<BasicBlock> block = adoptRef(new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); 1533 m_currentBlock = block.get(); 1534 m_graph.appendBlock(block); 1535 prepareToParseBlock(); 1536 1537 Node* myCallTargetNode = getDirect(calleeReg); 1538 1539 bool inliningResult = attemptToInlineCall( 1540 myCallTargetNode, resultOperand, callLinkStatus[i].callee(), registerOffset, 1541 argumentCountIncludingThis, nextOffset, kind, CallerLinksManually, prediction, 1542 inliningBalance); 1543 1544 if (!inliningResult) { 1545 // That failed so we let the block die. Nothing interesting should have been added to 1546 // the block. We also give up on inlining any of the (less frequent) callees. 1547 ASSERT(m_currentBlock == block.get()); 1548 ASSERT(m_graph.m_blocks.last() == block); 1549 m_graph.killBlockAndItsContents(block.get()); 1550 m_graph.m_blocks.removeLast(); 1551 1552 // The fact that inlining failed means we need a slow path. 1553 couldTakeSlowPath = true; 1554 break; 1555 } 1556 1557 JSCell* thingToCaseOn; 1558 if (allAreDirectCalls) 1559 thingToCaseOn = callLinkStatus[i].callee().nonExecutableCallee(); 1560 else { 1561 ASSERT(allAreClosureCalls); 1562 thingToCaseOn = callLinkStatus[i].callee().executable(); 1563 } 1564 data.cases.append(SwitchCase(m_graph.freeze(thingToCaseOn), block.get())); 1565 m_currentIndex = nextOffset; 1566 processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue. 1567 addToGraph(Jump); 1568 if (verbose) 1569 dataLog("Marking ", RawPointer(m_currentBlock), " as linked (tail of poly inlinee)\n"); 1570 m_currentBlock->didLink(); 1571 landingBlocks.append(m_currentBlock); 1572 1573 if (verbose) 1574 dataLog("Finished inlining ", callLinkStatus[i].callee(), " at ", currentCodeOrigin(), ".\n"); 1575 } 1576 1577 RefPtr<BasicBlock> slowPathBlock = adoptRef( 1578 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); 1579 m_currentIndex = oldOffset; 1580 data.fallThrough = BranchTarget(slowPathBlock.get()); 1581 m_graph.appendBlock(slowPathBlock); 1582 if (verbose) 1583 dataLog("Marking ", RawPointer(slowPathBlock.get()), " as linked (slow path block)\n"); 1584 slowPathBlock->didLink(); 1585 prepareToParseBlock(); 1586 m_currentBlock = slowPathBlock.get(); 1587 Node* myCallTargetNode = getDirect(calleeReg); 1588 if (couldTakeSlowPath) { 1589 addCall( 1590 resultOperand, callOp, OpInfo(), myCallTargetNode, argumentCountIncludingThis, 1591 registerOffset, prediction); 1592 } else { 1593 addToGraph(CheckBadCell); 1594 addToGraph(Phantom, myCallTargetNode); 1595 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, specializationKind); 1596 1597 set(VirtualRegister(resultOperand), addToGraph(BottomValue)); 1598 } 1599 1600 m_currentIndex = nextOffset; 1601 processSetLocalQueue(); 1602 addToGraph(Jump); 1603 landingBlocks.append(m_currentBlock); 1604 1605 RefPtr<BasicBlock> continuationBlock = adoptRef( 1606 new BasicBlock(UINT_MAX, m_numArguments, m_numLocals, PNaN)); 1607 m_graph.appendBlock(continuationBlock); 1608 if (verbose) 1609 dataLog("Adding unlinked block ", RawPointer(continuationBlock.get()), " (continuation)\n"); 1610 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(continuationBlock.get())); 1611 prepareToParseBlock(); 1612 m_currentBlock = continuationBlock.get(); 1613 1614 for (unsigned i = landingBlocks.size(); i--;) 1615 landingBlocks[i]->last()->targetBlock() = continuationBlock.get(); 1616 1617 m_currentIndex = oldOffset; 1618 1619 if (verbose) { 1620 dataLog("Done inlining (hard).\n"); 1621 dataLog("Stack: ", currentCodeOrigin(), "\n"); 1622 } 1350 1623 return true; 1351 1624 } … … 1646 1919 bool ByteCodeParser::handleConstantInternalFunction( 1647 1920 int resultOperand, InternalFunction* function, int registerOffset, 1648 int argumentCountIncludingThis, SpeculatedType prediction,CodeSpecializationKind kind)1921 int argumentCountIncludingThis, CodeSpecializationKind kind) 1649 1922 { 1650 1923 // If we ever find that we have a lot of internal functions that we specialize for, … … 1654 1927 // we know about is small enough, that having just a linear cascade of if statements 1655 1928 // is good enough. 1656 1657 UNUSED_PARAM(prediction); // Remove this once we do more things.1658 1929 1659 1930 if (function->classInfo() == ArrayConstructor::info()) { … … 2021 2292 void ByteCodeParser::prepareToParseBlock() 2022 2293 { 2294 clearCaches(); 2295 ASSERT(m_setLocalQueue.isEmpty()); 2296 } 2297 2298 void ByteCodeParser::clearCaches() 2299 { 2023 2300 m_constants.resize(0); 2024 2301 } … … 2060 2337 2061 2338 while (true) { 2062 for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) 2063 m_setLocalQueue[i].execute(this); 2064 m_setLocalQueue.resize(0); 2339 processSetLocalQueue(); 2065 2340 2066 2341 // Don't extend over jump destinations. … … 2206 2481 if (!cachedFunction 2207 2482 || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) 2208 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Bad Function)) {2483 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { 2209 2484 set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee))); 2210 2485 } else { … … 2212 2487 ASSERT(cachedFunction->inherits(JSFunction::info())); 2213 2488 Node* actualCallee = get(VirtualRegister(JSStack::Callee)); 2214 addToGraph(Check Function, OpInfo(frozen), actualCallee);2489 addToGraph(CheckCell, OpInfo(frozen), actualCallee); 2215 2490 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(JSConstant, OpInfo(frozen))); 2216 2491 } … … 2894 3169 ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer)); 2895 3170 addToGraph( 2896 Check Function,3171 CheckCell, 2897 3172 OpInfo(m_graph.freeze(static_cast<JSCell*>(actualPointerFor( 2898 3173 m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)))), … … 3318 3593 } 3319 3594 3320 #if !ASSERT_DISABLED 3321 block->isLinked = true;3322 #endif 3595 if (verbose) 3596 dataLog("Marking ", RawPointer(block), " as linked (actually did linking)\n"); 3597 block->didLink(); 3323 3598 } 3324 3599 … … 3326 3601 { 3327 3602 for (size_t i = 0; i < unlinkedBlocks.size(); ++i) { 3603 if (verbose) 3604 dataLog("Attempting to link ", RawPointer(unlinkedBlocks[i].m_block), "\n"); 3328 3605 if (unlinkedBlocks[i].m_needsNormalLinking) { 3606 if (verbose) 3607 dataLog(" Does need normal linking.\n"); 3329 3608 linkBlock(unlinkedBlocks[i].m_block, possibleTargets); 3330 3609 unlinkedBlocks[i].m_needsNormalLinking = false; … … 3493 3772 void ByteCodeParser::parseCodeBlock() 3494 3773 { 3495 prepareToParseBlock();3774 clearCaches(); 3496 3775 3497 3776 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; … … 3559 3838 // a peephole coalescing of this block in the if statement above. So, we're 3560 3839 // generating suboptimal code and leaving more work for the CFG simplifier. 3561 ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex); 3840 if (!m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { 3841 unsigned lastBegin = 3842 m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin; 3843 ASSERT_UNUSED( 3844 lastBegin, lastBegin == UINT_MAX || lastBegin < m_currentIndex); 3845 } 3562 3846 m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); 3563 3847 m_inlineStackTop->m_blockLinkingTargets.append(block.get());
Note:
See TracChangeset
for help on using the changeset viewer.