Changeset 173069 in webkit for trunk/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
- Timestamp:
- Aug 28, 2014, 12:09:48 PM (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
r172961 r173069 33 33 #include "JSCInlines.h" 34 34 #include <wtf/CommaPrinter.h> 35 #include <wtf/ListDump.h> 35 36 36 37 namespace JSC { … … 39 40 40 41 CallLinkStatus::CallLinkStatus(JSValue value) 41 : m_callTarget(value) 42 , m_executable(0) 43 , m_couldTakeSlowPath(false) 42 : m_couldTakeSlowPath(false) 44 43 , m_isProved(false) 45 44 { 46 if (!value || !value.isCell()) 45 if (!value || !value.isCell()) { 46 m_couldTakeSlowPath = true; 47 47 return; 48 49 if (!value.asCell()->inherits(JSFunction::info())) 50 return; 51 52 m_executable = jsCast<JSFunction*>(value.asCell())->executable(); 53 } 54 55 JSFunction* CallLinkStatus::function() const 56 { 57 if (!m_callTarget || !m_callTarget.isCell()) 58 return 0; 59 60 if (!m_callTarget.asCell()->inherits(JSFunction::info())) 61 return 0; 62 63 return jsCast<JSFunction*>(m_callTarget.asCell()); 64 } 65 66 InternalFunction* CallLinkStatus::internalFunction() const 67 { 68 if (!m_callTarget || !m_callTarget.isCell()) 69 return 0; 70 71 if (!m_callTarget.asCell()->inherits(InternalFunction::info())) 72 return 0; 73 74 return jsCast<InternalFunction*>(m_callTarget.asCell()); 75 } 76 77 Intrinsic CallLinkStatus::intrinsicFor(CodeSpecializationKind kind) const 78 { 79 if (!m_executable) 80 return NoIntrinsic; 81 82 return m_executable->intrinsicFor(kind); 48 } 49 50 m_edges.append(CallEdge(CallVariant(value.asCell()), 1)); 83 51 } 84 52 … … 88 56 UNUSED_PARAM(bytecodeIndex); 89 57 #if ENABLE(DFG_JIT) 90 if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, Bad Function))) {58 if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell))) { 91 59 // We could force this to be a closure call, but instead we'll just assume that it 92 60 // takes slow path. … … 126 94 return computeFromLLInt(locker, profiledBlock, bytecodeIndex); 127 95 128 return computeFor(locker, *callLinkInfo, exitSiteData);96 return computeFor(locker, profiledBlock, *callLinkInfo, exitSiteData); 129 97 #else 130 98 return CallLinkStatus(); … … 140 108 #if ENABLE(DFG_JIT) 141 109 exitSiteData.m_takesSlowPath = 142 profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, Bad Cache, exitingJITType))110 profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadType, exitingJITType)) 143 111 || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable, exitingJITType)); 144 112 exitSiteData.m_badFunction = 145 profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, Bad Function, exitingJITType));113 profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCell, exitingJITType)); 146 114 #else 147 115 UNUSED_PARAM(locker); … … 155 123 156 124 #if ENABLE(JIT) 157 CallLinkStatus CallLinkStatus::computeFor(const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo) 125 CallLinkStatus CallLinkStatus::computeFor( 126 const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo) 127 { 128 // We don't really need this, but anytime we have to debug this code, it becomes indispensable. 129 UNUSED_PARAM(profiledBlock); 130 131 if (Options::callStatusShouldUseCallEdgeProfile()) { 132 // Always trust the call edge profile over anything else since this has precise counts. 133 // It can make the best possible decision because it never "forgets" what happened for any 134 // call, with the exception of fading out the counts of old calls (for example if the 135 // counter type is 16-bit then calls that happened more than 2^16 calls ago are given half 136 // weight, and this compounds for every 2^15 [sic] calls after that). The combination of 137 // high fidelity for recent calls and fading for older calls makes this the most useful 138 // mechamism of choosing how to optimize future calls. 139 CallEdgeProfile* edgeProfile = callLinkInfo.callEdgeProfile.get(); 140 WTF::loadLoadFence(); 141 if (edgeProfile) { 142 CallLinkStatus result = computeFromCallEdgeProfile(edgeProfile); 143 if (!!result) 144 return result; 145 } 146 } 147 148 return computeFromCallLinkInfo(locker, callLinkInfo); 149 } 150 151 CallLinkStatus CallLinkStatus::computeFromCallLinkInfo( 152 const ConcurrentJITLocker&, CallLinkInfo& callLinkInfo) 158 153 { 159 154 // Note that despite requiring that the locker is held, this code is racy with respect … … 178 173 JSFunction* target = callLinkInfo.lastSeenCallee.get(); 179 174 if (!target) 180 return CallLinkStatus();175 return takesSlowPath(); 181 176 182 177 if (callLinkInfo.hasSeenClosure) … … 186 181 } 187 182 183 CallLinkStatus CallLinkStatus::computeFromCallEdgeProfile(CallEdgeProfile* edgeProfile) 184 { 185 // In cases where the call edge profile saw nothing, use the CallLinkInfo instead. 186 if (!edgeProfile->totalCalls()) 187 return CallLinkStatus(); 188 189 // To do anything meaningful, we require that the majority of calls are to something we 190 // know how to handle. 191 unsigned numCallsToKnown = edgeProfile->numCallsToKnownCells(); 192 unsigned numCallsToUnknown = edgeProfile->numCallsToNotCell() + edgeProfile->numCallsToUnknownCell(); 193 194 // We require that the majority of calls were to something that we could possibly inline. 195 if (numCallsToKnown <= numCallsToUnknown) 196 return takesSlowPath(); 197 198 // We require that the number of such calls is greater than some minimal threshold, so that we 199 // avoid inlining completely cold calls. 200 if (numCallsToKnown < Options::frequentCallThreshold()) 201 return takesSlowPath(); 202 203 CallLinkStatus result; 204 result.m_edges = edgeProfile->callEdges(); 205 result.m_couldTakeSlowPath = !!numCallsToUnknown; 206 result.m_canTrustCounts = true; 207 208 return result; 209 } 210 188 211 CallLinkStatus CallLinkStatus::computeFor( 189 const ConcurrentJITLocker& locker, CallLinkInfo& callLinkInfo, ExitSiteData exitSiteData) 190 { 191 if (exitSiteData.m_takesSlowPath) 192 return takesSlowPath(); 193 194 CallLinkStatus result = computeFor(locker, callLinkInfo); 212 const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, CallLinkInfo& callLinkInfo, 213 ExitSiteData exitSiteData) 214 { 215 CallLinkStatus result = computeFor(locker, profiledBlock, callLinkInfo); 195 216 if (exitSiteData.m_badFunction) 196 217 result.makeClosureCall(); 218 if (exitSiteData.m_takesSlowPath) 219 result.m_couldTakeSlowPath = true; 197 220 198 221 return result; … … 228 251 { 229 252 ConcurrentJITLocker locker(dfgCodeBlock->m_lock); 230 map.add(info.codeOrigin, computeFor(locker, info, exitSiteData));253 map.add(info.codeOrigin, computeFor(locker, dfgCodeBlock, info, exitSiteData)); 231 254 } 232 255 } … … 257 280 } 258 281 282 bool CallLinkStatus::isClosureCall() const 283 { 284 for (unsigned i = m_edges.size(); i--;) { 285 if (m_edges[i].callee().isClosureCall()) 286 return true; 287 } 288 return false; 289 } 290 291 void CallLinkStatus::makeClosureCall() 292 { 293 ASSERT(!m_isProved); 294 for (unsigned i = m_edges.size(); i--;) 295 m_edges[i] = m_edges[i].despecifiedClosure(); 296 297 if (!ASSERT_DISABLED) { 298 // Doing this should not have created duplicates, because the CallEdgeProfile 299 // should despecify closures if doing so would reduce the number of known callees. 300 for (unsigned i = 0; i < m_edges.size(); ++i) { 301 for (unsigned j = i + 1; j < m_edges.size(); ++j) 302 ASSERT(m_edges[i].callee() != m_edges[j].callee()); 303 } 304 } 305 } 306 259 307 void CallLinkStatus::dump(PrintStream& out) const 260 308 { … … 272 320 out.print(comma, "Could Take Slow Path"); 273 321 274 if (m_callTarget) 275 out.print(comma, "Known target: ", m_callTarget); 276 277 if (m_executable) { 278 out.print(comma, "Executable/CallHash: ", RawPointer(m_executable)); 279 if (!isCompilationThread()) 280 out.print("/", m_executable->hashFor(CodeForCall)); 281 } 322 out.print(listDump(m_edges)); 282 323 } 283 324
Note:
See TracChangeset
for help on using the changeset viewer.