Changeset 120149 in webkit for trunk/Source/JavaScriptCore/heap/MarkStack.cpp
- Timestamp:
- Jun 12, 2012, 7:06:50 PM (13 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/heap/MarkStack.cpp
r119633 r120149 47 47 : m_nextFreeSegment(0) 48 48 { 49 m_lock.Init(); 49 50 } 50 51 … … 57 58 { 58 59 { 59 MutexLocker locker(m_lock);60 SpinLockHolder locker(&m_lock); 60 61 if (m_nextFreeSegment) { 61 62 MarkStackSegment* result = m_nextFreeSegment; … … 70 71 void MarkStackSegmentAllocator::release(MarkStackSegment* segment) 71 72 { 72 MutexLocker locker(m_lock);73 SpinLockHolder locker(&m_lock); 73 74 segment->m_previous = m_nextFreeSegment; 74 75 m_nextFreeSegment = segment; … … 79 80 MarkStackSegment* segments; 80 81 { 81 MutexLocker locker(m_lock);82 SpinLockHolder locker(&m_lock); 82 83 segments = m_nextFreeSegment; 83 84 m_nextFreeSegment = 0; … … 143 144 } 144 145 145 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other) 146 { 146 void MarkStackArray::donateSomeCellsTo(MarkStackArray& other) 147 { 148 // Try to donate about 1 / 2 of our cells. To reduce copying costs, 149 // we prefer donating whole segments over donating individual cells, 150 // even if this skews away from our 1 / 2 target. 151 147 152 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 153 154 size_t segmentsToDonate = (m_numberOfPreviousSegments + 2 - 1) / 2; // Round up to donate 1 / 1 previous segments. 155 156 if (!segmentsToDonate) { 157 size_t cellsToDonate = m_top / 2; // Round down to donate 0 / 1 cells. 158 while (cellsToDonate--) { 159 ASSERT(m_top); 160 other.append(removeLast()); 161 } 162 return; 163 } 164 148 165 validatePrevious(); 149 166 other.validatePrevious(); 150 151 // Fast check: see if the other mark stack already has enough segments. 152 if (other.m_numberOfPreviousSegments + 1 >= Options::maximumNumberOfSharedSegments) 153 return false; 154 155 size_t numberOfCellsToKeep = Options::minimumNumberOfCellsToKeep; 156 ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous); 157 158 // Looks like we should donate! Give the other mark stack all of our 159 // previous segments, and then top it off. 167 160 168 MarkStackSegment* previous = m_topSegment->m_previous; 161 while (previous) { 169 while (segmentsToDonate--) { 170 ASSERT(previous); 162 171 ASSERT(m_numberOfPreviousSegments); 163 172 … … 171 180 other.m_numberOfPreviousSegments++; 172 181 } 173 ASSERT(!m_numberOfPreviousSegments);174 m_topSegment->m_previous = 0; 182 m_topSegment->m_previous = previous; 183 175 184 validatePrevious(); 176 185 other.validatePrevious(); 177 178 // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if 179 // we really have a lot of work, we give up half. 180 if (m_top > numberOfCellsToKeep * 2) 181 numberOfCellsToKeep = m_top / 2; 182 while (m_top > numberOfCellsToKeep) 183 other.append(removeLast()); 184 185 return true; 186 } 187 188 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other) 189 { 186 } 187 188 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThreadCount) 189 { 190 // Try to steal 1 / Nth of the shared array, where N is the number of idle threads. 191 // To reduce copying costs, we prefer stealing a whole segment over stealing 192 // individual cells, even if this skews away from our 1 / N target. 193 190 194 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 191 195 validatePrevious(); … … 212 216 return; 213 217 } 214 215 // Otherwise drain 1/Nth of the shared array where N is the number of 216 // workers, or Options::minimumNumberOfCellsToKeep, whichever is bigger. 217 size_t numberOfCellsToSteal = std::max((size_t)Options::minimumNumberOfCellsToKeep, other.size() / Options::numberOfGCMarkers); 218 219 size_t numberOfCellsToSteal = (other.size() + idleThreadCount - 1) / idleThreadCount; // Round up to steal 1 / 1. 218 220 while (numberOfCellsToSteal-- > 0 && other.canRemoveLast()) 219 221 append(other.removeLast()); … … 344 346 } 345 347 346 void SlotVisitor::donateSlow() 347 { 348 // Refuse to donate if shared has more entries than I do. 349 if (m_shared.m_sharedMarkStack.size() > m_stack.size()) 350 return; 351 MutexLocker locker(m_shared.m_markingLock); 352 if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) { 353 // Only wake up threads if the shared stack is big enough; otherwise assume that 354 // it's more profitable for us to just scan this ourselves later. 355 if (m_shared.m_sharedMarkStack.size() >= Options::sharedStackWakeupThreshold) 356 m_shared.m_markingCondition.broadcast(); 357 } 348 void SlotVisitor::donateKnownParallel() 349 { 350 // NOTE: Because we re-try often, we can afford to be conservative, and 351 // assume that donating is not profitable. 352 353 // Avoid locking when a thread reaches a dead end in the object graph. 354 if (m_stack.size() < 2) 355 return; 356 357 // If there's already some shared work queued up, be conservative and assume 358 // that donating more is not profitable. 359 if (m_shared.m_sharedMarkStack.size()) 360 return; 361 362 // If we're contending on the lock, be conservative and assume that another 363 // thread is already donating. 364 MutexTryLocker locker(m_shared.m_markingLock); 365 if (!locker.locked()) 366 return; 367 368 // Otherwise, assume that a thread will go idle soon, and donate. 369 m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack); 370 371 if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers) 372 m_shared.m_markingCondition.broadcast(); 358 373 } 359 374 … … 455 470 } 456 471 457 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack); 472 size_t idleThreadCount = Options::numberOfGCMarkers - m_shared.m_numberOfActiveParallelMarkers; 473 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount); 458 474 m_shared.m_numberOfActiveParallelMarkers++; 459 475 }
Note:
See TracChangeset
for help on using the changeset viewer.