Changeset 98937 in webkit for trunk/Source/JavaScriptCore/heap
- Timestamp:
- Oct 31, 2011, 11:43:37 PM (14 years ago)
- Location:
- trunk/Source/JavaScriptCore/heap
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/heap/Heap.cpp
r98365 r98937 33 33 #include "Tracing.h" 34 34 #include <algorithm> 35 #include <wtf/CurrentTime.h> 35 36 36 37 … … 316 317 , m_activityCallback(DefaultGCActivityCallback::create(this)) 317 318 , m_machineThreads(this) 318 , m_slotVisitor(globalData->jsArrayVPtr, globalData->jsFinalObjectVPtr, globalData->jsStringVPtr) 319 , m_sharedData(globalData) 320 , m_slotVisitor(m_sharedData, globalData->jsArrayVPtr, globalData->jsFinalObjectVPtr, globalData->jsStringVPtr) 319 321 , m_handleHeap(globalData) 320 322 , m_isSafeToCollect(false) … … 325 327 m_numberOfFreeBlocks = 0; 326 328 m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree"); 329 327 330 ASSERT(m_blockFreeingThread); 328 331 } … … 330 333 Heap::~Heap() 331 334 { 332 // destroy our thread335 // Destroy our block freeing thread. 333 336 { 334 337 MutexLocker locker(m_freeBlockLock); … … 337 340 } 338 341 waitForThreadCompletion(m_blockFreeingThread, 0); 339 342 340 343 // The destroy function must already have been called, so assert this. 341 344 ASSERT(!m_globalData); … … 590 593 HeapRootVisitor heapRootVisitor(visitor); 591 594 595 { 596 ParallelModeEnabler enabler(visitor); 592 597 #if ENABLE(GGC) 593 {594 size_t dirtyCellCount = dirtyCells.size();595 GCPHASE(VisitDirtyCells);596 GCCOUNTER(DirtyCellCount, dirtyCellCount);597 for (size_t i = 0; i < dirtyCellCount; i++) {598 heapRootVisitor.visitChildren(dirtyCells[i]);599 visitor.drain();600 }601 }598 { 599 size_t dirtyCellCount = dirtyCells.size(); 600 GCPHASE(VisitDirtyCells); 601 GCCOUNTER(DirtyCellCount, dirtyCellCount); 602 for (size_t i = 0; i < dirtyCellCount; i++) { 603 heapRootVisitor.visitChildren(dirtyCells[i]); 604 visitor.donateAndDrain(); 605 } 606 } 602 607 #endif 603 608 604 if (m_globalData->codeBlocksBeingCompiled.size()) { 605 GCPHASE(VisitActiveCodeBlock); 606 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) 607 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); 608 } 609 610 { 611 GCPHASE(VisitMachineRoots); 612 visitor.append(machineThreadRoots); 613 visitor.drain(); 614 } 615 { 616 GCPHASE(VisitRegisterFileRoots); 617 visitor.append(registerFileRoots); 618 visitor.drain(); 619 } 620 { 621 GCPHASE(VisitProtectedObjects); 622 markProtectedObjects(heapRootVisitor); 623 visitor.drain(); 624 } 625 { 626 GCPHASE(VisitTempSortVectors); 627 markTempSortVectors(heapRootVisitor); 628 visitor.drain(); 629 } 630 631 { 632 GCPHASE(MarkingArgumentBuffers); 633 if (m_markListSet && m_markListSet->size()) { 634 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); 635 visitor.drain(); 636 } 637 } 638 if (m_globalData->exception) { 639 GCPHASE(MarkingException); 640 heapRootVisitor.visit(&m_globalData->exception); 641 visitor.drain(); 642 } 643 644 { 645 GCPHASE(VisitStrongHandles); 646 m_handleHeap.visitStrongHandles(heapRootVisitor); 647 visitor.drain(); 648 } 649 650 { 651 GCPHASE(HandleStack); 652 m_handleStack.visit(heapRootVisitor); 653 visitor.drain(); 654 } 655 656 { 657 GCPHASE(TraceCodeBlocks); 658 m_jettisonedCodeBlocks.traceCodeBlocks(visitor); 659 visitor.drain(); 609 if (m_globalData->codeBlocksBeingCompiled.size()) { 610 GCPHASE(VisitActiveCodeBlock); 611 for (size_t i = 0; i < m_globalData->codeBlocksBeingCompiled.size(); i++) 612 m_globalData->codeBlocksBeingCompiled[i]->visitAggregate(visitor); 613 } 614 615 { 616 GCPHASE(VisitMachineRoots); 617 visitor.append(machineThreadRoots); 618 visitor.donateAndDrain(); 619 } 620 { 621 GCPHASE(VisitRegisterFileRoots); 622 visitor.append(registerFileRoots); 623 visitor.donateAndDrain(); 624 } 625 { 626 GCPHASE(VisitProtectedObjects); 627 markProtectedObjects(heapRootVisitor); 628 visitor.donateAndDrain(); 629 } 630 { 631 GCPHASE(VisitTempSortVectors); 632 markTempSortVectors(heapRootVisitor); 633 visitor.donateAndDrain(); 634 } 635 636 { 637 GCPHASE(MarkingArgumentBuffers); 638 if (m_markListSet && m_markListSet->size()) { 639 MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); 640 visitor.donateAndDrain(); 641 } 642 } 643 if (m_globalData->exception) { 644 GCPHASE(MarkingException); 645 heapRootVisitor.visit(&m_globalData->exception); 646 visitor.donateAndDrain(); 647 } 648 649 { 650 GCPHASE(VisitStrongHandles); 651 m_handleHeap.visitStrongHandles(heapRootVisitor); 652 visitor.donateAndDrain(); 653 } 654 655 { 656 GCPHASE(HandleStack); 657 m_handleStack.visit(heapRootVisitor); 658 visitor.donateAndDrain(); 659 } 660 661 { 662 GCPHASE(TraceCodeBlocks); 663 m_jettisonedCodeBlocks.traceCodeBlocks(visitor); 664 visitor.donateAndDrain(); 665 } 666 667 #if ENABLE(PARALLEL_GC) 668 { 669 GCPHASE(Convergence); 670 visitor.drainFromShared(SlotVisitor::MasterDrain); 671 } 672 #endif 660 673 } 661 674 … … 668 681 lastOpaqueRootCount = visitor.opaqueRootCount(); 669 682 m_handleHeap.visitWeakHandles(heapRootVisitor); 670 visitor.drain(); 683 { 684 ParallelModeEnabler enabler(visitor); 685 visitor.donateAndDrain(); 686 #if ENABLE(PARALLEL_GC) 687 visitor.drainFromShared(SlotVisitor::MasterDrain); 688 #endif 689 } 671 690 // If the set of opaque roots has grown, more weak handles may have become reachable. 672 691 } while (lastOpaqueRootCount != visitor.opaqueRootCount()); … … 674 693 GCCOUNTER(VisitedValueCount, visitor.visitCount()); 675 694 visitor.reset(); 695 m_sharedData.reset(); 676 696 677 697 m_operationInProgress = NoOperation; -
trunk/Source/JavaScriptCore/heap/Heap.h
r96760 r98937 131 131 friend class MarkedBlock; 132 132 friend class AllocationSpace; 133 friend class SlotVisitor; 133 134 134 135 static const size_t minExtraCost = 256; … … 168 169 void blockFreeingThreadMain(); 169 170 static void* blockFreeingThreadStartFunc(void* heap); 170 171 171 172 const HeapSize m_heapSize; 172 173 const size_t m_minBytesPerCycle; … … 197 198 198 199 MachineThreads m_machineThreads; 200 201 MarkStackThreadSharedData m_sharedData; 199 202 SlotVisitor m_slotVisitor; 203 200 204 HandleHeap m_handleHeap; 201 205 HandleStack m_handleStack; -
trunk/Source/JavaScriptCore/heap/MarkStack.cpp
r97827 r98937 29 29 #include "ConservativeRoots.h" 30 30 #include "Heap.h" 31 #include "Heuristics.h" 31 32 #include "JSArray.h" 32 33 #include "JSCell.h" … … 35 36 #include "Structure.h" 36 37 #include "WriteBarrier.h" 38 #include <wtf/MainThread.h> 37 39 38 40 namespace JSC { 39 41 40 MarkStackArray::MarkStackArray() 41 : m_top(0) 42 , m_allocated(pageSize()) 43 { 44 m_data = static_cast<const JSCell**>(MarkStack::allocateStack(m_allocated)); 45 m_capacity = m_allocated / sizeof(JSCell*); 42 MarkStackSegmentAllocator::MarkStackSegmentAllocator() 43 : m_nextFreeSegment(0) 44 { 45 } 46 47 MarkStackSegmentAllocator::~MarkStackSegmentAllocator() 48 { 49 shrinkReserve(); 50 } 51 52 MarkStackSegment* MarkStackSegmentAllocator::allocate() 53 { 54 { 55 MutexLocker locker(m_lock); 56 if (m_nextFreeSegment) { 57 MarkStackSegment* result = m_nextFreeSegment; 58 m_nextFreeSegment = result->m_previous; 59 return result; 60 } 61 } 62 63 return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Heuristics::gcMarkStackSegmentSize)); 64 } 65 66 void MarkStackSegmentAllocator::release(MarkStackSegment* segment) 67 { 68 MutexLocker locker(m_lock); 69 segment->m_previous = m_nextFreeSegment; 70 m_nextFreeSegment = segment; 71 } 72 73 void MarkStackSegmentAllocator::shrinkReserve() 74 { 75 MarkStackSegment* segments; 76 { 77 MutexLocker locker(m_lock); 78 segments = m_nextFreeSegment; 79 m_nextFreeSegment = 0; 80 } 81 while (segments) { 82 MarkStackSegment* toFree = segments; 83 segments = segments->m_previous; 84 OSAllocator::decommitAndRelease(toFree, Heuristics::gcMarkStackSegmentSize); 85 } 86 } 87 88 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator) 89 : m_allocator(allocator) 90 , m_segmentCapacity(MarkStackSegment::capacityFromSize(Heuristics::gcMarkStackSegmentSize)) 91 , m_top(0) 92 , m_numberOfPreviousSegments(0) 93 { 94 m_topSegment = m_allocator.allocate(); 95 #if !ASSERT_DISABLED 96 m_topSegment->m_top = 0; 97 #endif 98 m_topSegment->m_previous = 0; 46 99 } 47 100 48 101 MarkStackArray::~MarkStackArray() 49 102 { 50 MarkStack::releaseStack(m_data, m_allocated); 103 ASSERT(!m_topSegment->m_previous); 104 m_allocator.release(m_topSegment); 51 105 } 52 106 53 107 void MarkStackArray::expand() 54 108 { 55 size_t oldAllocation = m_allocated; 56 m_allocated *= 2; 57 m_capacity = m_allocated / sizeof(JSCell*); 58 void* newData = MarkStack::allocateStack(m_allocated); 59 memcpy(newData, m_data, oldAllocation); 60 MarkStack::releaseStack(m_data, oldAllocation); 61 m_data = static_cast<const JSCell**>(newData); 62 } 63 64 void MarkStackArray::shrinkAllocation(size_t size) 65 { 66 ASSERT(size <= m_allocated); 67 ASSERT(isPageAligned(size)); 68 if (size == m_allocated) 69 return; 70 #if OS(WINDOWS) 71 // We cannot release a part of a region with VirtualFree. To get around this, 72 // we'll release the entire region and reallocate the size that we want. 73 MarkStack::releaseStack(m_data, m_allocated); 74 m_data = static_cast<const JSCell**>(MarkStack::allocateStack(size)); 109 ASSERT(m_topSegment->m_top == m_segmentCapacity); 110 111 m_numberOfPreviousSegments++; 112 113 MarkStackSegment* nextSegment = m_allocator.allocate(); 114 #if !ASSERT_DISABLED 115 nextSegment->m_top = 0; 116 #endif 117 nextSegment->m_previous = m_topSegment; 118 m_topSegment = nextSegment; 119 setTopForEmptySegment(); 120 validatePrevious(); 121 } 122 123 bool MarkStackArray::refill() 124 { 125 validatePrevious(); 126 if (top()) 127 return true; 128 MarkStackSegment* toFree = m_topSegment; 129 MarkStackSegment* previous = m_topSegment->m_previous; 130 if (!previous) 131 return false; 132 ASSERT(m_numberOfPreviousSegments); 133 m_numberOfPreviousSegments--; 134 m_topSegment = previous; 135 m_allocator.release(toFree); 136 setTopForFullSegment(); 137 validatePrevious(); 138 return true; 139 } 140 141 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other) 142 { 143 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 144 validatePrevious(); 145 other.validatePrevious(); 146 147 // Fast check: see if the other mark stack already has enough segments. 148 if (other.m_numberOfPreviousSegments + 1 >= Heuristics::maximumNumberOfSharedSegments) 149 return false; 150 151 size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep; 152 ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous); 153 154 // Looks like we should donate! Give the other mark stack all of our 155 // previous segments, and then top it off. 156 MarkStackSegment* previous = m_topSegment->m_previous; 157 while (previous) { 158 ASSERT(m_numberOfPreviousSegments); 159 160 MarkStackSegment* current = previous; 161 previous = current->m_previous; 162 163 current->m_previous = other.m_topSegment->m_previous; 164 other.m_topSegment->m_previous = current; 165 166 m_numberOfPreviousSegments--; 167 other.m_numberOfPreviousSegments++; 168 } 169 ASSERT(!m_numberOfPreviousSegments); 170 m_topSegment->m_previous = 0; 171 validatePrevious(); 172 other.validatePrevious(); 173 174 // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if 175 // we really have a lot of work, we give up half. 176 if (m_top > numberOfCellsToKeep * 2) 177 numberOfCellsToKeep = m_top / 2; 178 while (m_top > numberOfCellsToKeep) 179 other.append(removeLast()); 180 181 return true; 182 } 183 184 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other) 185 { 186 ASSERT(m_segmentCapacity == other.m_segmentCapacity); 187 validatePrevious(); 188 other.validatePrevious(); 189 190 // If other has an entire segment, steal it and return. 191 if (other.m_topSegment->m_previous) { 192 ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity); 193 194 // First remove a segment from other. 195 MarkStackSegment* current = other.m_topSegment->m_previous; 196 other.m_topSegment->m_previous = current->m_previous; 197 other.m_numberOfPreviousSegments--; 198 199 ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous); 200 201 // Now add it to this. 202 current->m_previous = m_topSegment->m_previous; 203 m_topSegment->m_previous = current; 204 m_numberOfPreviousSegments++; 205 206 validatePrevious(); 207 other.validatePrevious(); 208 return; 209 } 210 211 // Otherwise drain 1/Nth of the shared array where N is the number of 212 // workers, or Heuristics::minimumNumberOfCellsToKeep, whichever is bigger. 213 size_t numberOfCellsToSteal = std::max((size_t)Heuristics::minimumNumberOfCellsToKeep, other.size() / Heuristics::numberOfGCMarkers); 214 while (numberOfCellsToSteal-- > 0 && other.canRemoveLast()) 215 append(other.removeLast()); 216 } 217 218 #if ENABLE(PARALLEL_GC) 219 void MarkStackThreadSharedData::markingThreadMain() 220 { 221 WTF::registerGCThread(); 222 SlotVisitor slotVisitor(*this, m_globalData->jsArrayVPtr, m_globalData->jsFinalObjectVPtr, m_globalData->jsStringVPtr); 223 ParallelModeEnabler enabler(slotVisitor); 224 slotVisitor.drainFromShared(SlotVisitor::SlaveDrain); 225 } 226 227 void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared) 228 { 229 static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain(); 230 return 0; 231 } 232 #endif 233 234 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData) 235 : m_globalData(globalData) 236 , m_sharedMarkStack(m_segmentAllocator) 237 , m_numberOfActiveParallelMarkers(0) 238 , m_parallelMarkersShouldExit(false) 239 , m_firstWeakReferenceHarvester(0) 240 { 241 #if ENABLE(PARALLEL_GC) 242 for (unsigned i = 1; i < Heuristics::numberOfGCMarkers; ++i) { 243 m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking")); 244 ASSERT(m_markingThreads.last()); 245 } 246 #endif 247 } 248 249 MarkStackThreadSharedData::~MarkStackThreadSharedData() 250 { 251 #if ENABLE(PARALLEL_GC) 252 // Destroy our marking threads. 253 { 254 MutexLocker locker(m_markingLock); 255 m_parallelMarkersShouldExit = true; 256 m_markingCondition.broadcast(); 257 } 258 for (unsigned i = 0; i < m_markingThreads.size(); ++i) 259 waitForThreadCompletion(m_markingThreads[i], 0); 260 #endif 261 } 262 263 void MarkStackThreadSharedData::reset() 264 { 265 ASSERT(!m_numberOfActiveParallelMarkers); 266 ASSERT(!m_parallelMarkersShouldExit); 267 ASSERT(m_sharedMarkStack.isEmpty()); 268 269 #if ENABLE(PARALLEL_GC) 270 m_segmentAllocator.shrinkReserve(); 271 m_opaqueRoots.clear(); 75 272 #else 76 MarkStack::releaseStack(reinterpret_cast<char*>(m_data) + size, m_allocated - size); 77 #endif 78 m_allocated = size; 79 m_capacity = m_allocated / sizeof(JSCell*); 273 ASSERT(m_opaqueRoots.isEmpty()); 274 #endif 80 275 } 81 276 … … 83 278 { 84 279 m_visitCount = 0; 85 m_stack.shrinkAllocation(pageSize()); 280 ASSERT(m_stack.isEmpty()); 281 #if ENABLE(PARALLEL_GC) 282 ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now. 283 #else 86 284 m_opaqueRoots.clear(); 285 #endif 87 286 } 88 287 … … 121 320 } 122 321 322 void SlotVisitor::donateSlow() 323 { 324 // Refuse to donate if shared has more entries than I do. 325 if (m_shared.m_sharedMarkStack.size() > m_stack.size()) 326 return; 327 MutexLocker locker(m_shared.m_markingLock); 328 if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) { 329 // Only wake up threads if the shared stack is big enough; otherwise assume that 330 // it's more profitable for us to just scan this ourselves later. 331 if (m_shared.m_sharedMarkStack.size() >= Heuristics::sharedStackWakeupThreshold) 332 m_shared.m_markingCondition.broadcast(); 333 } 334 } 335 123 336 void SlotVisitor::drain() 124 337 { 338 ASSERT(m_isInParallelMode); 339 125 340 void* jsFinalObjectVPtr = m_jsFinalObjectVPtr; 126 341 void* jsArrayVPtr = m_jsArrayVPtr; 127 342 void* jsStringVPtr = m_jsStringVPtr; 128 343 129 while (!m_stack.isEmpty()) 130 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 344 #if ENABLE(PARALLEL_GC) 345 if (Heuristics::numberOfGCMarkers > 1) { 346 while (!m_stack.isEmpty()) { 347 m_stack.refill(); 348 for (unsigned countdown = Heuristics::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;) 349 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 350 donateKnownParallel(); 351 } 352 353 mergeOpaqueRootsIfNecessary(); 354 return; 355 } 356 #endif 357 358 while (!m_stack.isEmpty()) { 359 m_stack.refill(); 360 while (m_stack.canRemoveLast()) 361 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr); 362 } 363 } 364 365 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) 366 { 367 ASSERT(m_isInParallelMode); 368 369 ASSERT(Heuristics::numberOfGCMarkers); 370 371 bool shouldBeParallel; 372 373 #if ENABLE(PARALLEL_GC) 374 shouldBeParallel = Heuristics::numberOfGCMarkers > 1; 375 #else 376 ASSERT(Heuristics::numberOfGCMarkers == 1); 377 shouldBeParallel = false; 378 #endif 379 380 if (!shouldBeParallel) { 381 // This call should be a no-op. 382 ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain); 383 ASSERT(m_stack.isEmpty()); 384 ASSERT(m_shared.m_sharedMarkStack.isEmpty()); 385 return; 386 } 387 388 #if ENABLE(PARALLEL_GC) 389 { 390 MutexLocker locker(m_shared.m_markingLock); 391 m_shared.m_numberOfActiveParallelMarkers++; 392 } 393 while (true) { 394 { 395 MutexLocker locker(m_shared.m_markingLock); 396 m_shared.m_numberOfActiveParallelMarkers--; 397 398 // How we wait differs depending on drain mode. 399 if (sharedDrainMode == MasterDrain) { 400 // Wait until either termination is reached, or until there is some work 401 // for us to do. 402 while (true) { 403 // Did we reach termination? 404 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) 405 return; 406 407 // Is there work to be done? 408 if (!m_shared.m_sharedMarkStack.isEmpty()) 409 break; 410 411 // Otherwise wait. 412 m_shared.m_markingCondition.wait(m_shared.m_markingLock); 413 } 414 } else { 415 ASSERT(sharedDrainMode == SlaveDrain); 416 417 // Did we detect termination? If so, let the master know. 418 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) 419 m_shared.m_markingCondition.broadcast(); 420 421 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit) 422 m_shared.m_markingCondition.wait(m_shared.m_markingLock); 423 424 // Is the VM exiting? If so, exit this thread. 425 if (m_shared.m_parallelMarkersShouldExit) 426 return; 427 } 428 429 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack); 430 m_shared.m_numberOfActiveParallelMarkers++; 431 } 432 433 drain(); 434 } 435 #endif 436 } 437 438 void MarkStack::mergeOpaqueRoots() 439 { 440 ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty. 441 { 442 MutexLocker locker(m_shared.m_opaqueRootsLock); 443 HashSet<void*>::iterator begin = m_opaqueRoots.begin(); 444 HashSet<void*>::iterator end = m_opaqueRoots.end(); 445 for (HashSet<void*>::iterator iter = begin; iter != end; ++iter) 446 m_shared.m_opaqueRoots.add(*iter); 447 } 448 m_opaqueRoots.clear(); 131 449 } 132 450 133 451 void SlotVisitor::harvestWeakReferences() 134 452 { 135 while (m_ firstWeakReferenceHarvester) {136 WeakReferenceHarvester* current = m_ firstWeakReferenceHarvester;453 while (m_shared.m_firstWeakReferenceHarvester) { 454 WeakReferenceHarvester* current = m_shared.m_firstWeakReferenceHarvester; 137 455 WeakReferenceHarvester* next = reinterpret_cast<WeakReferenceHarvester*>(current->m_nextAndFlag & ~1); 138 456 current->m_nextAndFlag = 0; 139 m_ firstWeakReferenceHarvester = next;457 m_shared.m_firstWeakReferenceHarvester = next; 140 458 current->visitWeakReferences(*this); 141 459 } -
trunk/Source/JavaScriptCore/heap/MarkStack.h
r97642 r98937 28 28 29 29 #include "HandleTypes.h" 30 #include "Heuristics.h" 30 31 #include "JSValue.h" 31 32 #include "Register.h" … … 44 45 class JSGlobalData; 45 46 class MarkStack; 47 class ParallelModeEnabler; 46 48 class Register; 49 class SlotVisitor; 47 50 template<typename T> class WriteBarrierBase; 48 51 template<typename T> class JITWriteBarrier; 49 52 53 struct MarkStackSegment { 54 MarkStackSegment* m_previous; 55 #if !ASSERT_DISABLED 56 size_t m_top; 57 #endif 58 59 const JSCell** data() 60 { 61 return bitwise_cast<const JSCell**>(this + 1); 62 } 63 64 static size_t capacityFromSize(size_t size) 65 { 66 return (size - sizeof(MarkStackSegment)) / sizeof(const JSCell*); 67 } 68 69 static size_t sizeFromCapacity(size_t capacity) 70 { 71 return sizeof(MarkStackSegment) + capacity * sizeof(const JSCell*); 72 } 73 }; 74 75 class MarkStackSegmentAllocator { 76 public: 77 MarkStackSegmentAllocator(); 78 ~MarkStackSegmentAllocator(); 79 80 MarkStackSegment* allocate(); 81 void release(MarkStackSegment*); 82 83 void shrinkReserve(); 84 85 private: 86 Mutex m_lock; 87 MarkStackSegment* m_nextFreeSegment; 88 }; 89 50 90 class MarkStackArray { 51 91 public: 52 MarkStackArray( );92 MarkStackArray(MarkStackSegmentAllocator&); 53 93 ~MarkStackArray(); 54 94 95 void append(const JSCell*); 96 97 bool canRemoveLast(); 98 const JSCell* removeLast(); 99 bool refill(); 100 101 bool isEmpty(); 102 103 bool canDonateSomeCells(); // Returns false if you should definitely not call doanteSomeCellsTo(). 104 bool donateSomeCellsTo(MarkStackArray& other); // Returns true if some cells were donated. 105 106 void stealSomeCellsFrom(MarkStackArray& other); 107 108 size_t size(); 109 110 private: 111 MarkStackSegment* m_topSegment; 112 55 113 void expand(); 56 void append(const JSCell*); 57 58 const JSCell* removeLast(); 59 60 bool isEmpty(); 61 62 void shrinkAllocation(size_t); 63 114 115 MarkStackSegmentAllocator& m_allocator; 116 117 size_t m_segmentCapacity; 118 size_t m_top; 119 size_t m_numberOfPreviousSegments; 120 121 size_t postIncTop() 122 { 123 size_t result = m_top++; 124 ASSERT(result == m_topSegment->m_top++); 125 return result; 126 } 127 128 size_t preDecTop() 129 { 130 size_t result = --m_top; 131 ASSERT(result == --m_topSegment->m_top); 132 return result; 133 } 134 135 void setTopForFullSegment() 136 { 137 ASSERT(m_topSegment->m_top == m_segmentCapacity); 138 m_top = m_segmentCapacity; 139 } 140 141 void setTopForEmptySegment() 142 { 143 ASSERT(!m_topSegment->m_top); 144 m_top = 0; 145 } 146 147 size_t top() 148 { 149 ASSERT(m_top == m_topSegment->m_top); 150 return m_top; 151 } 152 153 #if ASSERT_DISABLED 154 void validatePrevious() { } 155 #else 156 void validatePrevious() 157 { 158 unsigned count = 0; 159 for (MarkStackSegment* current = m_topSegment->m_previous; current; current = current->m_previous) 160 count++; 161 ASSERT(count == m_numberOfPreviousSegments); 162 } 163 #endif 164 }; 165 166 class MarkStackThreadSharedData { 167 public: 168 MarkStackThreadSharedData(JSGlobalData*); 169 ~MarkStackThreadSharedData(); 170 171 void reset(); 172 64 173 private: 65 const JSCell** m_data; 66 size_t m_top; 67 size_t m_capacity; 68 size_t m_allocated; 174 friend class MarkStack; 175 friend class SlotVisitor; 176 177 #if ENABLE(PARALLEL_GC) 178 void markingThreadMain(); 179 static void* markingThreadStartFunc(void* heap); 180 #endif 181 182 JSGlobalData* m_globalData; 183 184 MarkStackSegmentAllocator m_segmentAllocator; 185 186 Vector<ThreadIdentifier> m_markingThreads; 187 188 Mutex m_markingLock; 189 ThreadCondition m_markingCondition; 190 MarkStackArray m_sharedMarkStack; 191 unsigned m_numberOfActiveParallelMarkers; 192 bool m_parallelMarkersShouldExit; 193 194 Mutex m_opaqueRootsLock; 195 HashSet<void*> m_opaqueRoots; 196 197 Mutex m_weakReferenceHarvesterLock; 198 WeakReferenceHarvester* m_firstWeakReferenceHarvester; 69 199 }; 70 200 … … 74 204 75 205 public: 76 static void* allocateStack(size_t); 77 static void releaseStack(void*, size_t); 78 79 MarkStack(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 206 MarkStack(MarkStackThreadSharedData&, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 80 207 ~MarkStack(); 81 208 … … 89 216 void appendUnbarrieredPointer(T**); 90 217 91 booladdOpaqueRoot(void*);218 void addOpaqueRoot(void*); 92 219 bool containsOpaqueRoot(void*); 93 220 int opaqueRootCount(); … … 103 230 void addWeakReferenceHarvester(WeakReferenceHarvester* weakReferenceHarvester) 104 231 { 232 MutexLocker locker(m_shared.m_weakReferenceHarvesterLock); 105 233 if (weakReferenceHarvester->m_nextAndFlag & 1) 106 234 return; 107 weakReferenceHarvester->m_nextAndFlag = reinterpret_cast<uintptr_t>(m_ firstWeakReferenceHarvester) | 1;108 m_ firstWeakReferenceHarvester = weakReferenceHarvester;235 weakReferenceHarvester->m_nextAndFlag = reinterpret_cast<uintptr_t>(m_shared.m_firstWeakReferenceHarvester) | 1; 236 m_shared.m_firstWeakReferenceHarvester = weakReferenceHarvester; 109 237 } 110 238 … … 118 246 void internalAppend(JSCell*); 119 247 void internalAppend(JSValue); 120 248 249 void mergeOpaqueRoots(); 250 251 void mergeOpaqueRootsIfNecessary() 252 { 253 if (m_opaqueRoots.isEmpty()) 254 return; 255 mergeOpaqueRoots(); 256 } 257 258 void mergeOpaqueRootsIfProfitable() 259 { 260 if (static_cast<unsigned>(m_opaqueRoots.size()) < Heuristics::opaqueRootMergeThreshold) 261 return; 262 mergeOpaqueRoots(); 263 } 264 121 265 MarkStackArray m_stack; 122 266 void* m_jsArrayVPtr; … … 124 268 void* m_jsStringVPtr; 125 269 HashSet<void*> m_opaqueRoots; // Handle-owning data structures not visible to the garbage collector. 126 WeakReferenceHarvester* m_firstWeakReferenceHarvester;127 270 128 271 #if !ASSERT_DISABLED … … 132 275 #endif 133 276 protected: 277 friend class ParallelModeEnabler; 278 134 279 size_t m_visitCount; 135 }; 136 137 inline MarkStack::MarkStack(void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 138 : m_jsArrayVPtr(jsArrayVPtr) 280 bool m_isInParallelMode; 281 282 MarkStackThreadSharedData& m_shared; 283 }; 284 285 inline MarkStack::MarkStack(MarkStackThreadSharedData& shared, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 286 : m_stack(shared.m_segmentAllocator) 287 , m_jsArrayVPtr(jsArrayVPtr) 139 288 , m_jsFinalObjectVPtr(jsFinalObjectVPtr) 140 289 , m_jsStringVPtr(jsStringVPtr) 141 , m_firstWeakReferenceHarvester(0)142 290 #if !ASSERT_DISABLED 143 291 , m_isCheckingForDefaultMarkViolation(false) … … 145 293 #endif 146 294 , m_visitCount(0) 295 , m_isInParallelMode(false) 296 , m_shared(shared) 147 297 { 148 298 } … … 153 303 } 154 304 155 inline bool MarkStack::addOpaqueRoot(void* root) 156 { 157 return m_opaqueRoots.add(root).second; 305 inline void MarkStack::addOpaqueRoot(void* root) 306 { 307 #if ENABLE(PARALLEL_GC) 308 if (Heuristics::numberOfGCMarkers == 1) { 309 // Put directly into the shared HashSet. 310 m_shared.m_opaqueRoots.add(root); 311 return; 312 } 313 // Put into the local set, but merge with the shared one every once in 314 // a while to make sure that the local sets don't grow too large. 315 mergeOpaqueRootsIfProfitable(); 316 m_opaqueRoots.add(root); 317 #else 318 m_opaqueRoots.add(root); 319 #endif 158 320 } 159 321 160 322 inline bool MarkStack::containsOpaqueRoot(void* root) 161 323 { 324 ASSERT(!m_isInParallelMode); 325 #if ENABLE(PARALLEL_GC) 326 ASSERT(m_opaqueRoots.isEmpty()); 327 return m_shared.m_opaqueRoots.contains(root); 328 #else 162 329 return m_opaqueRoots.contains(root); 330 #endif 163 331 } 164 332 165 333 inline int MarkStack::opaqueRootCount() 166 334 { 335 ASSERT(!m_isInParallelMode); 336 #if ENABLE(PARALLEL_GC) 337 ASSERT(m_opaqueRoots.isEmpty()); 338 return m_shared.m_opaqueRoots.size(); 339 #else 167 340 return m_opaqueRoots.size(); 168 } 169 170 inline void* MarkStack::allocateStack(size_t size) 171 { 172 return OSAllocator::reserveAndCommit(size); 173 } 174 175 inline void MarkStack::releaseStack(void* addr, size_t size) 176 { 177 OSAllocator::decommitAndRelease(addr, size); 341 #endif 178 342 } 179 343 180 344 inline void MarkStackArray::append(const JSCell* cell) 181 345 { 182 if (m_top == m_ capacity)346 if (m_top == m_segmentCapacity) 183 347 expand(); 184 m_data[m_top++] = cell; 348 m_topSegment->data()[postIncTop()] = cell; 349 } 350 351 inline bool MarkStackArray::canRemoveLast() 352 { 353 return !!m_top; 185 354 } 186 355 187 356 inline const JSCell* MarkStackArray::removeLast() 188 357 { 189 ASSERT(m_top); 190 return m_data[--m_top]; 191 } 192 358 return m_topSegment->data()[preDecTop()]; 359 } 360 193 361 inline bool MarkStackArray::isEmpty() 194 362 { 195 return !m_top; 363 if (m_top) 364 return false; 365 if (m_topSegment->m_previous) { 366 ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); 367 return false; 368 } 369 return true; 370 } 371 372 inline bool MarkStackArray::canDonateSomeCells() 373 { 374 size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep; 375 // Another check: see if we have enough cells to warrant donation. 376 if (m_top <= numberOfCellsToKeep) { 377 // This indicates that we might not want to donate anything; check if we have 378 // another full segment. If not, then don't donate. 379 if (!m_topSegment->m_previous) 380 return false; 381 382 ASSERT(m_topSegment->m_previous->m_top == m_segmentCapacity); 383 } 384 385 return true; 386 } 387 388 inline size_t MarkStackArray::size() 389 { 390 return m_top + m_segmentCapacity * m_numberOfPreviousSegments; 196 391 } 197 392 … … 235 430 } 236 431 237 class SlotVisitor; 432 class ParallelModeEnabler { 433 public: 434 ParallelModeEnabler(MarkStack& stack) 435 : m_stack(stack) 436 { 437 ASSERT(!m_stack.m_isInParallelMode); 438 m_stack.m_isInParallelMode = true; 439 } 440 441 ~ParallelModeEnabler() 442 { 443 ASSERT(m_stack.m_isInParallelMode); 444 m_stack.m_isInParallelMode = false; 445 } 446 447 private: 448 MarkStack& m_stack; 449 }; 238 450 239 451 } // namespace JSC -
trunk/Source/JavaScriptCore/heap/MarkedBlock.h
r97203 r98937 175 175 size_t m_atomsPerCell; 176 176 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom. 177 WTF::Bitmap<atomsPerBlock> m_marks; 177 #if ENABLE(PARALLEL_GC) 178 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks; 179 #else 180 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks; 181 #endif 178 182 BlockState m_state; 179 183 PageAllocationAligned m_allocation; … … 265 269 inline bool MarkedBlock::testAndSetMarked(const void* p) 266 270 { 267 return m_marks. testAndSet(atomNumber(p));271 return m_marks.concurrentTestAndSet(atomNumber(p)); 268 272 } 269 273 -
trunk/Source/JavaScriptCore/heap/SlotVisitor.h
r97642 r98937 31 31 namespace JSC { 32 32 33 class Heap; 34 33 35 class SlotVisitor : public MarkStack { 34 36 friend class HeapRootVisitor; 35 37 public: 36 SlotVisitor( void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr);38 SlotVisitor(MarkStackThreadSharedData&, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr); 37 39 40 void donate() 41 { 42 ASSERT(m_isInParallelMode); 43 if (Heuristics::numberOfGCMarkers == 1) 44 return; 45 46 donateKnownParallel(); 47 } 48 38 49 void drain(); 50 51 void donateAndDrain() 52 { 53 donate(); 54 drain(); 55 } 56 57 enum SharedDrainMode { SlaveDrain, MasterDrain }; 58 void drainFromShared(SharedDrainMode); 59 39 60 void harvestWeakReferences(); 61 62 private: 63 void donateSlow(); 64 65 void donateKnownParallel() 66 { 67 if (!m_stack.canDonateSomeCells()) 68 return; 69 donateSlow(); 70 } 40 71 }; 41 72 42 inline SlotVisitor::SlotVisitor( void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr)43 : MarkStack( jsArrayVPtr, jsFinalObjectVPtr, jsStringVPtr)73 inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared, void* jsArrayVPtr, void* jsFinalObjectVPtr, void* jsStringVPtr) 74 : MarkStack(shared, jsArrayVPtr, jsFinalObjectVPtr, jsStringVPtr) 44 75 { 45 76 } -
trunk/Source/JavaScriptCore/heap/WeakReferenceHarvester.h
r95901 r98937 26 26 27 27 class MarkStack; 28 class MarkStackSharedData; 28 29 class SlotVisitor; 29 30 … … 42 43 private: 43 44 friend class MarkStack; 45 friend class MarkStackSharedData; 44 46 friend class SlotVisitor; 45 47
Note:
See TracChangeset
for help on using the changeset viewer.