Changeset 58346 in webkit for trunk/JavaScriptCore/wtf


Ignore:
Timestamp:
Apr 27, 2010, 3:55:22 PM (15 years ago)
Author:
Stephanie Lewis
Message:

https://bugs.webkit.org/show_bug.cgi?id=38154 FastMalloc calls madvise too often.
<rdar://problem/7834433> REGRESSSION: 1.5% PLT regression due to 56028 (return memory quicker).
To save on madvise calls when recommitting memory recommit the entire span and then carve it
instead of carving the span up and only committing the part that will be used immediately.

Reviewed by Geoff Garen.

  • wtf/FastMalloc.cpp:

(WTF::TCMalloc_PageHeap::New):
(WTF::TCMalloc_PageHeap::AllocLarge):
(WTF::TCMalloc_PageHeap::Carve):

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/wtf/FastMalloc.cpp

    r57457 r58346  
    15841584    Span* result = ll->next;
    15851585    Carve(result, n, released);
    1586     if (result->decommitted) {
    1587         TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift));
    1588         result->decommitted = false;
    1589     }
    15901586#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
    1591     else {
    1592         // The newly allocated memory is from a span that's in the normal span list (already committed).  Update the
    1593         // free committed pages count.
    1594         ASSERT(free_committed_pages_ >= n);
    1595         free_committed_pages_ -= n;
    1596         if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
    1597             min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
    1598     }
     1587    // The newly allocated memory is from a span that's in the normal span list (already committed).  Update the
     1588    // free committed pages count.
     1589    ASSERT(free_committed_pages_ >= n);
     1590    free_committed_pages_ -= n;
     1591    if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
     1592      min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
    15991593#endif  // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
    16001594    ASSERT(Check());
     
    16541648  if (best != NULL) {
    16551649    Carve(best, n, from_released);
    1656     if (best->decommitted) {
    1657         TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift));
    1658         best->decommitted = false;
    1659     }
    16601650#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
    1661     else {
    1662         // The newly allocated memory is from a span that's in the normal span list (already committed).  Update the
    1663         // free committed pages count.
    1664         ASSERT(free_committed_pages_ >= n);
    1665         free_committed_pages_ -= n;
    1666         if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
    1667             min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
    1668     }
     1651    // The newly allocated memory is from a span that's in the normal span list (already committed).  Update the
     1652    // free committed pages count.
     1653    ASSERT(free_committed_pages_ >= n);
     1654    free_committed_pages_ -= n;
     1655    if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
     1656      min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
    16691657#endif  // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
    16701658    ASSERT(Check());
     
    16921680}
    16931681
    1694 static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source)
    1695 {
    1696     destination->decommitted = source->decommitted;
    1697 }
    1698 
    16991682inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
    17001683  ASSERT(n > 0);
     
    17031686  Event(span, 'A', n);
    17041687
     1688  if (released) {
     1689    // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
     1690    ASSERT(span->decommitted);
     1691    TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
     1692    span->decommitted = false;
     1693    free_committed_pages_ += span->length;
     1694  }
     1695 
    17051696  const int extra = static_cast<int>(span->length - n);
    17061697  ASSERT(extra >= 0);
     
    17081699    Span* leftover = NewSpan(span->start + n, extra);
    17091700    leftover->free = 1;
    1710     propagateDecommittedState(leftover, span);
     1701    leftover->decommitted = false;
    17111702    Event(leftover, 'S', extra);
    17121703    RecordSpan(leftover);
     
    17141705    // Place leftover span on appropriate free list
    17151706    SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
    1716     Span* dst = released ? &listpair->returned : &listpair->normal;
     1707    Span* dst = &listpair->normal;
    17171708    DLL_Prepend(dst, leftover);
    17181709
Note: See TracChangeset for help on using the changeset viewer.