Changeset 46511 in webkit for trunk/JavaScriptCore/wtf/FastMalloc.cpp
- Timestamp:
- Jul 28, 2009, 6:09:02 PM (16 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/wtf/FastMalloc.cpp
r46387 r46511 96 96 #endif 97 97 98 99 // Use a background thread to periodically scavenge memory to release back to the system 100 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1 101 98 102 #ifndef NDEBUG 99 103 namespace WTF { … … 1188 1192 // ------------------------------------------------------------------------- 1189 1193 1194 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1195 // The central page heap collects spans of memory that have been deleted but are still committed until they are released 1196 // back to the system. We use a background thread to periodically scan the list of free spans and release some back to the 1197 // system. Every 5 seconds, the background thread wakes up and does the following: 1198 // - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short 1199 // of free committed pages and so we should not release them back to the system yet. 1200 // - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages 1201 // back to the system. 1202 // - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the 1203 // scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount. 1204 1205 // Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system. 1206 static const int kScavengeTimerDelayInSeconds = 5; 1207 1208 // Number of free committed pages that we want to keep around. 1209 static const size_t kMinimumFreeCommittedPageCount = 512; 1210 1211 // During a scavenge, we'll release up to a fraction of the free committed pages. 1212 #if PLATFORM(WIN) 1213 // We are slightly less aggressive in releasing memory on Windows due to performance reasons. 1214 static const int kMaxScavengeAmountFactor = 3; 1215 #else 1216 static const int kMaxScavengeAmountFactor = 2; 1217 #endif 1218 #endif 1219 1190 1220 class TCMalloc_PageHeap { 1191 1221 public: … … 1287 1317 uint64_t system_bytes_; 1288 1318 1319 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1320 // Number of pages kept in free lists that are still committed. 1321 Length free_committed_pages_; 1322 1323 // Number of pages that we committed in the last scavenge wait interval. 1324 Length pages_committed_since_last_scavenge_; 1325 #endif 1326 1289 1327 bool GrowHeap(Length n); 1290 1328 … … 1309 1347 Span* AllocLarge(Length n); 1310 1348 1349 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1311 1350 // Incrementally release some memory to the system. 1312 1351 // IncrementalScavenge(n) is called whenever n pages are freed. 1313 1352 void IncrementalScavenge(Length n); 1353 #endif 1314 1354 1315 1355 // Number of pages to deallocate before doing more scavenging … … 1322 1362 friend class FastMallocZone; 1323 1363 #endif 1364 1365 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1366 static NO_RETURN void* runScavengerThread(void*); 1367 1368 NO_RETURN void scavengerThread(); 1369 1370 void scavenge(); 1371 1372 inline bool shouldContinueScavenging() const; 1373 1374 pthread_mutex_t m_scavengeMutex; 1375 1376 pthread_cond_t m_scavengeCondition; 1377 1378 // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or 1379 // it's blocked waiting for more pages to be deleted. 1380 bool m_scavengeThreadActive; 1381 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1324 1382 }; 1325 1383 … … 1330 1388 free_pages_ = 0; 1331 1389 system_bytes_ = 0; 1390 1391 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1392 free_committed_pages_ = 0; 1393 pages_committed_since_last_scavenge_ = 0; 1394 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1395 1332 1396 scavenge_counter_ = 0; 1333 1397 // Start scavenging at kMaxPages list … … 1340 1404 DLL_Init(&free_[i].returned); 1341 1405 } 1342 } 1406 1407 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1408 pthread_mutex_init(&m_scavengeMutex, 0); 1409 pthread_cond_init(&m_scavengeCondition, 0); 1410 m_scavengeThreadActive = true; 1411 pthread_t thread; 1412 pthread_create(&thread, 0, runScavengerThread, this); 1413 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1414 } 1415 1416 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1417 void* TCMalloc_PageHeap::runScavengerThread(void* context) 1418 { 1419 reinterpret_cast<TCMalloc_PageHeap*>(context)->scavengerThread(); 1420 } 1421 1422 void TCMalloc_PageHeap::scavenge() 1423 { 1424 // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages 1425 // for the amount of allocations that we do. So hold off on releasing memory back to the system. 1426 if (pages_committed_since_last_scavenge_ > 0) { 1427 pages_committed_since_last_scavenge_ = 0; 1428 return; 1429 } 1430 Length pagesDecommitted = 0; 1431 for (int i = kMaxPages; i >= 0; i--) { 1432 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i]; 1433 if (!DLL_IsEmpty(&slist->normal)) { 1434 // Release the last span on the normal portion of this list 1435 Span* s = slist->normal.prev; 1436 // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0. 1437 if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_) 1438 continue; 1439 DLL_Remove(s); 1440 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), 1441 static_cast<size_t>(s->length << kPageShift)); 1442 if (!s->decommitted) { 1443 pagesDecommitted += s->length; 1444 s->decommitted = true; 1445 } 1446 DLL_Prepend(&slist->returned, s); 1447 // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around. 1448 if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted) 1449 break; 1450 } 1451 } 1452 pages_committed_since_last_scavenge_ = 0; 1453 ASSERT(free_committed_pages_ >= pagesDecommitted); 1454 free_committed_pages_ -= pagesDecommitted; 1455 } 1456 1457 inline bool TCMalloc_PageHeap::shouldContinueScavenging() const 1458 { 1459 return free_committed_pages_ > kMinimumFreeCommittedPageCount; 1460 } 1461 1462 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1343 1463 1344 1464 inline Span* TCMalloc_PageHeap::New(Length n) { … … 1367 1487 TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift)); 1368 1488 result->decommitted = false; 1369 } 1489 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1490 pages_committed_since_last_scavenge_ += n; 1491 #endif 1492 } 1493 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1494 else { 1495 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the 1496 // free committed pages count. 1497 ASSERT(free_committed_pages_ >= n); 1498 free_committed_pages_ -= n; 1499 } 1500 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1370 1501 ASSERT(Check()); 1371 1502 free_pages_ -= n; … … 1427 1558 TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift)); 1428 1559 best->decommitted = false; 1429 } 1560 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1561 pages_committed_since_last_scavenge_ += n; 1562 #endif 1563 } 1564 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1565 else { 1566 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the 1567 // free committed pages count. 1568 ASSERT(free_committed_pages_ >= n); 1569 free_committed_pages_ -= n; 1570 } 1571 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1430 1572 ASSERT(Check()); 1431 1573 free_pages_ -= n; … … 1509 1651 // entries for the pieces we are merging together because we only 1510 1652 // care about the pagemap entries for the boundaries. 1653 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1654 // Track the total size of the neighboring free spans that are committed. 1655 Length neighboringCommittedSpansLength = 0; 1656 #endif 1511 1657 const PageID p = span->start; 1512 1658 const Length n = span->length; … … 1516 1662 ASSERT(prev->start + prev->length == p); 1517 1663 const Length len = prev->length; 1664 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1665 if (!prev->decommitted) 1666 neighboringCommittedSpansLength += len; 1667 #endif 1518 1668 mergeDecommittedStates(span, prev); 1519 1669 DLL_Remove(prev); … … 1529 1679 ASSERT(next->start == p+n); 1530 1680 const Length len = next->length; 1681 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1682 if (!next->decommitted) 1683 neighboringCommittedSpansLength += len; 1684 #endif 1531 1685 mergeDecommittedStates(span, next); 1532 1686 DLL_Remove(next); … … 1552 1706 free_pages_ += n; 1553 1707 1708 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1709 if (span->decommitted) { 1710 // If the merged span is decommitted, that means we decommitted any neighboring spans that were 1711 // committed. Update the free committed pages count. 1712 free_committed_pages_ -= neighboringCommittedSpansLength; 1713 } else { 1714 // If the merged span remains committed, add the deleted span's size to the free committed pages count. 1715 free_committed_pages_ += n; 1716 } 1717 1718 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system. 1719 if (!m_scavengeThreadActive && shouldContinueScavenging()) 1720 pthread_cond_signal(&m_scavengeCondition); 1721 #else 1554 1722 IncrementalScavenge(n); 1723 #endif 1724 1555 1725 ASSERT(Check()); 1556 1726 } 1557 1727 1728 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1558 1729 void TCMalloc_PageHeap::IncrementalScavenge(Length n) { 1559 1730 // Fast path; not yet time to release memory … … 1593 1764 scavenge_counter_ = kDefaultReleaseDelay; 1594 1765 } 1766 #endif 1595 1767 1596 1768 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) { … … 1704 1876 ask = actual_size >> kPageShift; 1705 1877 1878 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1879 pages_committed_since_last_scavenge_ += ask; 1880 #endif 1881 1706 1882 uint64_t old_system_bytes = system_bytes_; 1707 1883 system_bytes_ += (ask << kPageShift); … … 2083 2259 2084 2260 #define pageheap getPageHeap() 2261 2262 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 2263 #if PLATFORM(WIN) 2264 static void sleep(unsigned seconds) 2265 { 2266 ::Sleep(seconds * 1000); 2267 } 2268 #endif 2269 2270 void TCMalloc_PageHeap::scavengerThread() 2271 { 2272 while (1) { 2273 if (!shouldContinueScavenging()) { 2274 pthread_mutex_lock(&m_scavengeMutex); 2275 m_scavengeThreadActive = false; 2276 // Block until there are enough freed pages to release back to the system. 2277 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex); 2278 m_scavengeThreadActive = true; 2279 pthread_mutex_unlock(&m_scavengeMutex); 2280 } 2281 sleep(kScavengeTimerDelayInSeconds); 2282 { 2283 SpinLockHolder h(&pageheap_lock); 2284 pageheap->scavenge(); 2285 } 2286 } 2287 } 2288 #endif 2085 2289 2086 2290 // If TLS is available, we also store a copy
Note:
See TracChangeset
for help on using the changeset viewer.