Changeset 77145 in webkit for trunk/Source/JavaScriptCore
- Timestamp:
- Jan 31, 2011, 11:02:31 AM (14 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r77125 r77145 1 2011-01-31 Gavin Barraclough <[email protected]> 2 3 Reviewed by Geoff Garen. 4 5 https://bugs.webkit.org/show_bug.cgi?id=53352 6 Heavy external fragmentation in FixedVMPoolAllocator can lead to a CRASH(). 7 8 The FixedVMPoolAllocator currently uses a best fix policy - 9 switch to first fit, this is less prone to external fragmentation. 10 11 * jit/ExecutableAllocatorFixedVMPool.cpp: 12 (JSC::AllocationTableSizeClass::AllocationTableSizeClass): 13 (JSC::AllocationTableSizeClass::blockSize): 14 (JSC::AllocationTableSizeClass::blockCount): 15 (JSC::AllocationTableSizeClass::blockAlignment): 16 (JSC::AllocationTableSizeClass::size): 17 (JSC::AllocationTableLeaf::AllocationTableLeaf): 18 (JSC::AllocationTableLeaf::~AllocationTableLeaf): 19 (JSC::AllocationTableLeaf::allocate): 20 (JSC::AllocationTableLeaf::free): 21 (JSC::AllocationTableLeaf::isEmpty): 22 (JSC::AllocationTableLeaf::isFull): 23 (JSC::AllocationTableLeaf::size): 24 (JSC::AllocationTableLeaf::classForSize): 25 (JSC::AllocationTableLeaf::dump): 26 (JSC::LazyAllocationTable::LazyAllocationTable): 27 (JSC::LazyAllocationTable::~LazyAllocationTable): 28 (JSC::LazyAllocationTable::allocate): 29 (JSC::LazyAllocationTable::free): 30 (JSC::LazyAllocationTable::isEmpty): 31 (JSC::LazyAllocationTable::isFull): 32 (JSC::LazyAllocationTable::size): 33 (JSC::LazyAllocationTable::dump): 34 (JSC::LazyAllocationTable::classForSize): 35 (JSC::AllocationTableDirectory::AllocationTableDirectory): 36 (JSC::AllocationTableDirectory::~AllocationTableDirectory): 37 (JSC::AllocationTableDirectory::allocate): 38 (JSC::AllocationTableDirectory::free): 39 (JSC::AllocationTableDirectory::isEmpty): 40 (JSC::AllocationTableDirectory::isFull): 41 (JSC::AllocationTableDirectory::size): 42 (JSC::AllocationTableDirectory::classForSize): 43 (JSC::AllocationTableDirectory::dump): 44 (JSC::FixedVMPoolAllocator::FixedVMPoolAllocator): 45 (JSC::FixedVMPoolAllocator::alloc): 46 (JSC::FixedVMPoolAllocator::free): 47 (JSC::FixedVMPoolAllocator::allocated): 48 (JSC::FixedVMPoolAllocator::isValid): 49 (JSC::FixedVMPoolAllocator::classForSize): 50 (JSC::FixedVMPoolAllocator::offsetToPointer): 51 (JSC::FixedVMPoolAllocator::pointerToOffset): 52 (JSC::ExecutableAllocator::committedByteCount): 53 (JSC::ExecutableAllocator::isValid): 54 (JSC::ExecutableAllocator::underMemoryPressure): 55 (JSC::ExecutablePool::systemAlloc): 56 (JSC::ExecutablePool::systemRelease): 57 * wtf/PageReservation.h: 58 (WTF::PageReservation::PageReservation): 59 (WTF::PageReservation::commit): 60 (WTF::PageReservation::decommit): 61 (WTF::PageReservation::committed): 62 1 63 2011-01-31 Sheriff Bot <[email protected]> 2 64 -
trunk/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
r77090 r77145 43 43 #endif 44 44 45 static const unsigned vmPoolSizeOvercommit = 2u * 1024u * 1024u * 1024u; // 2Gb46 static const unsigned coalesceLimitOvercommit = 16u * 1024u * 1024u; // 16Mb47 48 static const unsigned vmPoolSizeNoOvercommit = 32u * 1024u * 1024u; // 32Mb49 static const unsigned coalesceLimitNoOvercommit = 4u * 1024u * 1024u; // 4Mb50 51 static const unsigned vmPoolSizeEmbedded = 16u * 1024u * 1024u; // 16Mb52 static const unsigned coalesceLimitEmbedded = 4u * 1024u * 1024u; // 4Mb53 54 #if CPU(X86_64) && !OS(LINUX)55 // These limits suitable on 64-bit platforms (particularly x86-64,56 // where we require all jumps to have a 2Gb max range). We don't57 // enable this by default on Linux, since it needs overcommit and58 // distros commonly disable that feature. We'll check the value59 // for the overcommit feature at runtime and re-assign the Generic60 // values if it's enabled.61 static unsigned vmPoolSize = vmPoolSizeOvercommit;62 static unsigned coalesceLimit = coalesceLimitOvercommit;63 #elif CPU(ARM)64 static unsigned vmPoolSize = vmPoolSizeEmbedded;65 static unsigned coalesceLimit = coalesceLimitEmbedded;66 #else67 static unsigned vmPoolSize = vmPoolSizeNoOvercommit;68 static unsigned coalesceLimit = coalesceLimitNoOvercommit;69 #endif70 71 45 using namespace WTF; 72 46 73 47 namespace JSC { 74 48 75 static size_t committedBytesCount = 0; 76 static SpinLock spinlock = SPINLOCK_INITIALIZER; 77 78 // FreeListEntry describes a free chunk of memory, stored in the freeList. 79 struct FreeListEntry { 80 FreeListEntry(void* pointer, size_t size) 81 : pointer(pointer) 82 , size(size) 83 , nextEntry(0) 84 , less(0) 85 , greater(0) 86 , balanceFactor(0) 87 { 88 } 89 90 // All entries of the same size share a single entry 91 // in the AVLTree, and are linked together in a linked 92 // list, using nextEntry. 93 void* pointer; 94 size_t size; 95 FreeListEntry* nextEntry; 96 97 // These fields are used by AVLTree. 98 FreeListEntry* less; 99 FreeListEntry* greater; 100 int balanceFactor; 49 #define TwoPow(n) (1ull << n) 50 51 class AllocationTableSizeClass { 52 public: 53 AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize) 54 : m_blockSize(blockSize) 55 { 56 ASSERT(blockSize == TwoPow(log2BlockSize)); 57 58 // Calculate the number of blocks needed to hold size. 59 size_t blockMask = blockSize - 1; 60 m_blockCount = (size + blockMask) >> log2BlockSize; 61 62 // Align to the smallest power of two >= m_blockCount. 63 m_blockAlignment = 1; 64 while (m_blockAlignment < m_blockCount) 65 m_blockAlignment += m_blockAlignment; 66 } 67 68 size_t blockSize() const { return m_blockSize; } 69 size_t blockCount() const { return m_blockCount; } 70 size_t blockAlignment() const { return m_blockAlignment; } 71 72 size_t size() 73 { 74 return m_blockSize * m_blockCount; 75 } 76 77 private: 78 size_t m_blockSize; 79 size_t m_blockCount; 80 size_t m_blockAlignment; 101 81 }; 102 82 103 // Abstractor class for use in AVLTree. 104 // Nodes in the AVLTree are of type FreeListEntry, keyed on 105 // (and thus sorted by) their size. 106 struct AVLTreeAbstractorForFreeList { 107 typedef FreeListEntry* handle; 108 typedef int32_t size; 109 typedef size_t key; 110 111 handle get_less(handle h) { return h->less; } 112 void set_less(handle h, handle lh) { h->less = lh; } 113 handle get_greater(handle h) { return h->greater; } 114 void set_greater(handle h, handle gh) { h->greater = gh; } 115 int get_balance_factor(handle h) { return h->balanceFactor; } 116 void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; } 117 118 static handle null() { return 0; } 119 120 int compare_key_key(key va, key vb) { return va - vb; } 121 int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); } 122 int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); } 83 template<unsigned log2Entries> 84 class AllocationTableLeaf { 85 typedef uint64_t BitField; 86 87 public: 88 static const unsigned log2SubregionSize = 12; // 2^12 == pagesize 89 static const unsigned log2RegionSize = log2SubregionSize + log2Entries; 90 91 static const size_t subregionSize = TwoPow(log2SubregionSize); 92 static const size_t regionSize = TwoPow(log2RegionSize); 93 static const unsigned entries = TwoPow(log2Entries); 94 COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField); 95 96 AllocationTableLeaf() 97 : m_allocated(0) 98 { 99 } 100 101 ~AllocationTableLeaf() 102 { 103 ASSERT(isEmpty()); 104 } 105 106 size_t allocate(AllocationTableSizeClass& sizeClass) 107 { 108 ASSERT(sizeClass.blockSize() == subregionSize); 109 ASSERT(!isFull()); 110 111 size_t alignment = sizeClass.blockAlignment(); 112 size_t count = sizeClass.blockCount(); 113 // Use this mask to check for spans of free blocks. 114 BitField mask = ((1ull << count) - 1) << (alignment - count); 115 116 // Step in units of alignment size. 117 for (unsigned i = 0; i < entries; i += alignment) { 118 if (!(m_allocated & mask)) { 119 m_allocated |= mask; 120 return (i + (alignment - count)) << log2SubregionSize; 121 } 122 mask <<= alignment; 123 } 124 return notFound; 125 } 126 127 void free(size_t location, AllocationTableSizeClass& sizeClass) 128 { 129 ASSERT(sizeClass.blockSize() == subregionSize); 130 131 size_t entry = location >> log2SubregionSize; 132 size_t count = sizeClass.blockCount(); 133 BitField mask = ((1ull << count) - 1) << entry; 134 135 ASSERT((m_allocated & mask) == mask); 136 m_allocated &= ~mask; 137 } 138 139 bool isEmpty() 140 { 141 return !m_allocated; 142 } 143 144 bool isFull() 145 { 146 return !~m_allocated; 147 } 148 149 static size_t size() 150 { 151 return regionSize; 152 } 153 154 static AllocationTableSizeClass classForSize(size_t size) 155 { 156 return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); 157 } 158 159 #ifndef NDEBUG 160 void dump(size_t parentOffset = 0, unsigned indent = 0) 161 { 162 for (unsigned i = 0; i < indent; ++i) 163 fprintf(stderr, " "); 164 fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated); 165 } 166 #endif 167 168 private: 169 BitField m_allocated; 123 170 }; 124 171 125 // Used to reverse sort an array of FreeListEntry pointers. 126 static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr) 127 { 128 FreeListEntry* left = *(FreeListEntry**)leftPtr; 129 FreeListEntry* right = *(FreeListEntry**)rightPtr; 130 131 return (intptr_t)(right->pointer) - (intptr_t)(left->pointer); 132 } 133 134 // Used to reverse sort an array of pointers. 135 static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr) 136 { 137 void* left = *(void**)leftPtr; 138 void* right = *(void**)rightPtr; 139 140 return (intptr_t)right - (intptr_t)left; 141 } 172 173 template<class NextLevel> 174 class LazyAllocationTable { 175 public: 176 static const unsigned log2RegionSize = NextLevel::log2RegionSize; 177 static const unsigned entries = NextLevel::entries; 178 179 LazyAllocationTable() 180 : m_ptr(0) 181 { 182 } 183 184 ~LazyAllocationTable() 185 { 186 ASSERT(isEmpty()); 187 } 188 189 size_t allocate(AllocationTableSizeClass& sizeClass) 190 { 191 if (!m_ptr) 192 m_ptr = new NextLevel(); 193 return m_ptr->allocate(sizeClass); 194 } 195 196 void free(size_t location, AllocationTableSizeClass& sizeClass) 197 { 198 ASSERT(m_ptr); 199 m_ptr->free(location, sizeClass); 200 if (m_ptr->isEmpty()) { 201 delete m_ptr; 202 m_ptr = 0; 203 } 204 } 205 206 bool isEmpty() 207 { 208 return !m_ptr; 209 } 210 211 bool isFull() 212 { 213 return m_ptr && m_ptr->isFull(); 214 } 215 216 static size_t size() 217 { 218 return NextLevel::size(); 219 } 220 221 #ifndef NDEBUG 222 void dump(size_t parentOffset = 0, unsigned indent = 0) 223 { 224 ASSERT(m_ptr); 225 m_ptr->dump(parentOffset, indent); 226 } 227 #endif 228 229 static AllocationTableSizeClass classForSize(size_t size) 230 { 231 return NextLevel::classForSize(size); 232 } 233 234 private: 235 NextLevel* m_ptr; 236 }; 237 238 template<class NextLevel, unsigned log2Entries> 239 class AllocationTableDirectory { 240 typedef uint64_t BitField; 241 242 public: 243 static const unsigned log2SubregionSize = NextLevel::log2RegionSize; 244 static const unsigned log2RegionSize = log2SubregionSize + log2Entries; 245 246 static const size_t subregionSize = TwoPow(log2SubregionSize); 247 static const size_t regionSize = TwoPow(log2RegionSize); 248 static const unsigned entries = TwoPow(log2Entries); 249 COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField); 250 251 AllocationTableDirectory() 252 : m_full(0) 253 , m_hasSuballocation(0) 254 { 255 } 256 257 ~AllocationTableDirectory() 258 { 259 ASSERT(isEmpty()); 260 } 261 262 size_t allocate(AllocationTableSizeClass& sizeClass) 263 { 264 ASSERT(sizeClass.blockSize() <= subregionSize); 265 ASSERT(!isFull()); 266 267 if (sizeClass.blockSize() < subregionSize) { 268 BitField bit = 1; 269 for (unsigned i = 0; i < entries; ++i, bit += bit) { 270 if (m_full & bit) 271 continue; 272 size_t location = m_suballocations[i].allocate(sizeClass); 273 if (location != notFound) { 274 // If this didn't already have a subregion, it does now! 275 m_hasSuballocation |= bit; 276 // Mirror the suballocation's full bit. 277 if (m_suballocations[i].isFull()) 278 m_full |= bit; 279 return (i * subregionSize) | location; 280 } 281 } 282 return notFound; 283 } 284 285 // A block is allocated if either it is fully allocated or contains suballocations. 286 BitField allocated = m_full | m_hasSuballocation; 287 288 size_t alignment = sizeClass.blockAlignment(); 289 size_t count = sizeClass.blockCount(); 290 // Use this mask to check for spans of free blocks. 291 BitField mask = ((1ull << count) - 1) << (alignment - count); 292 293 // Step in units of alignment size. 294 for (unsigned i = 0; i < entries; i += alignment) { 295 if (!(allocated & mask)) { 296 m_full |= mask; 297 return (i + (alignment - count)) << log2SubregionSize; 298 } 299 mask <<= alignment; 300 } 301 return notFound; 302 } 303 304 void free(size_t location, AllocationTableSizeClass& sizeClass) 305 { 306 ASSERT(sizeClass.blockSize() <= subregionSize); 307 308 size_t entry = location >> log2SubregionSize; 309 310 if (sizeClass.blockSize() < subregionSize) { 311 BitField bit = 1ull << entry; 312 m_suballocations[entry].free(location & (subregionSize - 1), sizeClass); 313 // Check if the suballocation is now empty. 314 if (m_suballocations[entry].isEmpty()) 315 m_hasSuballocation &= ~bit; 316 // No need to check, it clearly isn't full any more! 317 m_full &= ~bit; 318 } else { 319 size_t count = sizeClass.blockCount(); 320 BitField mask = ((1ull << count) - 1) << entry; 321 ASSERT((m_full & mask) == mask); 322 ASSERT(!(m_hasSuballocation & mask)); 323 m_full &= ~mask; 324 } 325 } 326 327 bool isEmpty() 328 { 329 return !(m_full | m_hasSuballocation); 330 } 331 332 bool isFull() 333 { 334 return !~m_full; 335 } 336 337 static size_t size() 338 { 339 return regionSize; 340 } 341 342 static AllocationTableSizeClass classForSize(size_t size) 343 { 344 if (size < subregionSize) { 345 AllocationTableSizeClass sizeClass = NextLevel::classForSize(size); 346 if (sizeClass.size() < NextLevel::size()) 347 return sizeClass; 348 } 349 return AllocationTableSizeClass(size, subregionSize, log2SubregionSize); 350 } 351 352 #ifndef NDEBUG 353 void dump(size_t parentOffset = 0, unsigned indent = 0) 354 { 355 for (unsigned i = 0; i < indent; ++i) 356 fprintf(stderr, " "); 357 fprintf(stderr, "%08x: [", (int)parentOffset); 358 for (unsigned i = 0; i < entries; ++i) { 359 BitField bit = 1ull << i; 360 char c = m_hasSuballocation & bit 361 ? (m_full & bit ? 'N' : 'n') 362 : (m_full & bit ? 'F' : '-'); 363 fprintf(stderr, "%c", c); 364 } 365 fprintf(stderr, "]\n"); 366 367 for (unsigned i = 0; i < entries; ++i) { 368 BitField bit = 1ull << i; 369 size_t offset = parentOffset | (subregionSize * i); 370 if (m_hasSuballocation & bit) 371 m_suballocations[i].dump(offset, indent + 1); 372 } 373 } 374 #endif 375 376 private: 377 NextLevel m_suballocations[entries]; 378 // Subregions exist in one of four states: 379 // (1) empty (both bits clear) 380 // (2) fully allocated as a single allocation (m_full set) 381 // (3) partially allocated through suballocations (m_hasSuballocation set) 382 // (4) fully allocated through suballocations (both bits set) 383 BitField m_full; 384 BitField m_hasSuballocation; 385 }; 386 387 388 typedef AllocationTableLeaf<6> PageTables256KB; 389 typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB; 390 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB; 391 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB; 392 393 #if CPU(ARM) 394 typedef PageTables16MB FixedVMPoolPageTables; 395 #elif CPU(X86_64) 396 typedef PageTables1GB FixedVMPoolPageTables; 397 #else 398 typedef PageTables32MB FixedVMPoolPageTables; 399 #endif 400 142 401 143 402 class FixedVMPoolAllocator 144 403 { 145 // The free list is stored in a sorted tree.146 typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;147 148 void release(void* position, size_t size)149 {150 m_allocation.decommit(position, size);151 addToCommittedByteCount(-static_cast<long>(size));152 }153 154 void reuse(void* position, size_t size)155 {156 m_allocation.commit(position, size);157 addToCommittedByteCount(static_cast<long>(size));158 }159 160 // All addition to the free list should go through this method, rather than161 // calling insert directly, to avoid multiple entries being added with the162 // same key. All nodes being added should be singletons, they should not163 // already be a part of a chain.164 void addToFreeList(FreeListEntry* entry)165 {166 ASSERT(!entry->nextEntry);167 168 if (entry->size == m_commonSize) {169 m_commonSizedAllocations.append(entry->pointer);170 delete entry;171 } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {172 // m_freeList already contain an entry for this size - insert this node into the chain.173 entry->nextEntry = entryInFreeList->nextEntry;174 entryInFreeList->nextEntry = entry;175 } else176 m_freeList.insert(entry);177 }178 179 // We do not attempt to coalesce addition, which may lead to fragmentation;180 // instead we periodically perform a sweep to try to coalesce neighboring181 // entries in m_freeList. Presently this is triggered at the point 16MB182 // of memory has been released.183 void coalesceFreeSpace()184 {185 Vector<FreeListEntry*> freeListEntries;186 SizeSortedFreeTree::Iterator iter;187 iter.start_iter_least(m_freeList);188 189 // Empty m_freeList into a Vector.190 for (FreeListEntry* entry; (entry = *iter); ++iter) {191 // Each entry in m_freeList might correspond to multiple192 // free chunks of memory (of the same size). Walk the chain193 // (this is likely of course only be one entry long!) adding194 // each entry to the Vector (at reseting the next in chain195 // pointer to separate each node out).196 FreeListEntry* next;197 do {198 next = entry->nextEntry;199 entry->nextEntry = 0;200 freeListEntries.append(entry);201 } while ((entry = next));202 }203 // All entries are now in the Vector; purge the tree.204 m_freeList.purge();205 206 // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.207 // We reverse-sort so that we can logically work forwards through memory,208 // whilst popping items off the end of the Vectors using last() and removeLast().209 qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);210 qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);211 212 // The entries from m_commonSizedAllocations that cannot be213 // coalesced into larger chunks will be temporarily stored here.214 Vector<void*> newCommonSizedAllocations;215 216 // Keep processing so long as entries remain in either of the vectors.217 while (freeListEntries.size() || m_commonSizedAllocations.size()) {218 // We're going to try to find a FreeListEntry node that we can coalesce onto.219 FreeListEntry* coalescionEntry = 0;220 221 // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?222 if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {223 // Pop an item from the m_commonSizedAllocations vector - this is the lowest224 // addressed free chunk. Find out the begin and end addresses of the memory chunk.225 void* begin = m_commonSizedAllocations.last();226 void* end = (void*)((intptr_t)begin + m_commonSize);227 m_commonSizedAllocations.removeLast();228 229 // Try to find another free chunk abutting onto the end of the one we have already found.230 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {231 // There is an existing FreeListEntry for the next chunk of memory!232 // we can reuse this. Pop it off the end of m_freeList.233 coalescionEntry = freeListEntries.last();234 freeListEntries.removeLast();235 // Update the existing node to include the common-sized chunk that we also found.236 coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);237 coalescionEntry->size += m_commonSize;238 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {239 // There is a second common-sized chunk that can be coalesced.240 // Allocate a new node.241 m_commonSizedAllocations.removeLast();242 coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);243 } else {244 // Nope - this poor little guy is all on his own. :-(245 // Add him into the newCommonSizedAllocations vector for now, we're246 // going to end up adding him back into the m_commonSizedAllocations247 // list when we're done.248 newCommonSizedAllocations.append(begin);249 continue;250 }251 } else {252 ASSERT(freeListEntries.size());253 ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));254 // The lowest addressed item is from m_freeList; pop it from the Vector.255 coalescionEntry = freeListEntries.last();256 freeListEntries.removeLast();257 }258 259 // Right, we have a FreeListEntry, we just need check if there is anything else260 // to coalesce onto the end.261 ASSERT(coalescionEntry);262 while (true) {263 // Calculate the end address of the chunk we have found so far.264 void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);265 266 // Is there another chunk adjacent to the one we already have?267 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {268 // Yes - another FreeListEntry -pop it from the list.269 FreeListEntry* coalescee = freeListEntries.last();270 freeListEntries.removeLast();271 // Add it's size onto our existing node.272 coalescionEntry->size += coalescee->size;273 delete coalescee;274 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {275 // We can coalesce the next common-sized chunk.276 m_commonSizedAllocations.removeLast();277 coalescionEntry->size += m_commonSize;278 } else279 break; // Nope, nothing to be added - stop here.280 }281 282 // We've coalesced everything we can onto the current chunk.283 // Add it back into m_freeList.284 addToFreeList(coalescionEntry);285 }286 287 // All chunks of free memory larger than m_commonSize should be288 // back in m_freeList by now. All that remains to be done is to289 // copy the contents on the newCommonSizedAllocations back into290 // the m_commonSizedAllocations Vector.291 ASSERT(m_commonSizedAllocations.size() == 0);292 m_commonSizedAllocations.append(newCommonSizedAllocations);293 }294 295 404 public: 296 297 FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize) 298 : m_commonSize(commonSize) 299 , m_countFreedSinceLastCoalesce(0) 300 { 301 m_allocation = PageReservation::reserve(totalHeapSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); 302 303 if (!!m_allocation) 304 m_freeList.insert(new FreeListEntry(m_allocation.base(), m_allocation.size())); 405 FixedVMPoolAllocator() 406 { 407 ASSERT(PageTables256KB::size() == 256 * 1024); 408 ASSERT(PageTables16MB::size() == 16 * 1024 * 1024); 409 ASSERT(PageTables32MB::size() == 32 * 1024 * 1024); 410 ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024); 411 412 m_reservation = PageReservation::reserve(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); 305 413 #if !ENABLE(INTERPRETER) 306 else414 if (!isValid()) 307 415 CRASH(); 308 416 #endif 309 417 } 310 311 ExecutablePool::Allocation alloc(size_t size) 312 { 313 return ExecutablePool::Allocation(allocInternal(size), size); 418 419 ExecutablePool::Allocation alloc(size_t requestedSize) 420 { 421 ASSERT(requestedSize); 422 AllocationTableSizeClass sizeClass = classForSize(requestedSize); 423 size_t size = sizeClass.size(); 424 ASSERT(size); 425 426 if (size >= FixedVMPoolPageTables::size()) 427 CRASH(); 428 if (m_pages.isFull()) 429 CRASH(); 430 431 size_t offset = m_pages.allocate(sizeClass); 432 if (offset == notFound) 433 CRASH(); 434 435 void* pointer = offsetToPointer(offset); 436 m_reservation.commit(pointer, size); 437 return ExecutablePool::Allocation(pointer, size); 314 438 } 315 439 … … 318 442 void* pointer = allocation.base(); 319 443 size_t size = allocation.size(); 320 321 ASSERT(!!m_allocation); 322 // Call release to report to the operating system that this 323 // memory is no longer in use, and need not be paged out. 324 ASSERT(isWithinVMPool(pointer, size)); 325 release(pointer, size); 326 327 // Common-sized allocations are stored in the m_commonSizedAllocations 328 // vector; all other freed chunks are added to m_freeList. 329 if (size == m_commonSize) 330 m_commonSizedAllocations.append(pointer); 331 else 332 addToFreeList(new FreeListEntry(pointer, size)); 333 334 // Do some housekeeping. Every time we reach a point that 335 // 16MB of allocations have been freed, sweep m_freeList 336 // coalescing any neighboring fragments. 337 m_countFreedSinceLastCoalesce += size; 338 if (m_countFreedSinceLastCoalesce >= coalesceLimit) { 339 m_countFreedSinceLastCoalesce = 0; 340 coalesceFreeSpace(); 341 } 342 } 343 344 bool isValid() const { return !!m_allocation; } 444 ASSERT(size); 445 446 m_reservation.decommit(pointer, size); 447 448 AllocationTableSizeClass sizeClass = classForSize(size); 449 ASSERT(sizeClass.size() == size); 450 m_pages.free(pointerToOffset(pointer), sizeClass); 451 } 452 453 size_t allocated() 454 { 455 return m_reservation.committed(); 456 } 457 458 bool isValid() const 459 { 460 return !!m_reservation; 461 } 345 462 346 463 private: 347 void* allocInternal(size_t size) 348 { 349 #if ENABLE(INTERPRETER) 350 if (!m_allocation) 351 return 0; 352 #else 353 ASSERT(!!m_allocation); 354 #endif 355 void* result; 356 357 // Freed allocations of the common size are not stored back into the main 358 // m_freeList, but are instead stored in a separate vector. If the request 359 // is for a common sized allocation, check this list. 360 if ((size == m_commonSize) && m_commonSizedAllocations.size()) { 361 result = m_commonSizedAllocations.last(); 362 m_commonSizedAllocations.removeLast(); 363 } else { 364 // Search m_freeList for a suitable sized chunk to allocate memory from. 365 FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); 366 367 // This would be bad news. 368 if (!entry) { 369 // Errk! Lets take a last-ditch desperation attempt at defragmentation... 370 coalesceFreeSpace(); 371 // Did that free up a large enough chunk? 372 entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); 373 // No?... *BOOM!* 374 if (!entry) 375 CRASH(); 376 } 377 ASSERT(entry->size != m_commonSize); 378 379 // Remove the entry from m_freeList. But! - 380 // Each entry in the tree may represent a chain of multiple chunks of the 381 // same size, and we only want to remove one on them. So, if this entry 382 // does have a chain, just remove the first-but-one item from the chain. 383 if (FreeListEntry* next = entry->nextEntry) { 384 // We're going to leave 'entry' in the tree; remove 'next' from its chain. 385 entry->nextEntry = next->nextEntry; 386 next->nextEntry = 0; 387 entry = next; 388 } else 389 m_freeList.remove(entry->size); 390 391 // Whoo!, we have a result! 392 ASSERT(entry->size >= size); 393 result = entry->pointer; 394 395 // If the allocation exactly fits the chunk we found in the, 396 // m_freeList then the FreeListEntry node is no longer needed. 397 if (entry->size == size) 398 delete entry; 399 else { 400 // There is memory left over, and it is not of the common size. 401 // We can reuse the existing FreeListEntry node to add this back 402 // into m_freeList. 403 entry->pointer = (void*)((intptr_t)entry->pointer + size); 404 entry->size -= size; 405 addToFreeList(entry); 406 } 407 } 408 409 // Call reuse to report to the operating system that this memory is in use. 410 ASSERT(isWithinVMPool(result, size)); 411 reuse(result, size); 412 return result; 413 } 414 415 #ifndef NDEBUG 416 bool isWithinVMPool(void* pointer, size_t size) 417 { 418 return pointer >= m_allocation.base() && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_allocation.base()) + m_allocation.size()); 419 } 420 #endif 421 422 void addToCommittedByteCount(long byteCount) 423 { 424 ASSERT(spinlock.IsHeld()); 425 ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1); 426 committedBytesCount += byteCount; 427 } 428 429 // Freed space from the most common sized allocations will be held in this list, ... 430 const size_t m_commonSize; 431 Vector<void*> m_commonSizedAllocations; 432 433 // ... and all other freed allocations are held in m_freeList. 434 SizeSortedFreeTree m_freeList; 435 436 // This is used for housekeeping, to trigger defragmentation of the freed lists. 437 size_t m_countFreedSinceLastCoalesce; 438 439 PageReservation m_allocation; 464 AllocationTableSizeClass classForSize(size_t size) 465 { 466 return FixedVMPoolPageTables::classForSize(size); 467 } 468 469 void* offsetToPointer(size_t offset) 470 { 471 return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset); 472 } 473 474 size_t pointerToOffset(void* pointer) 475 { 476 return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base()); 477 } 478 479 PageReservation m_reservation; 480 FixedVMPoolPageTables m_pages; 440 481 }; 441 482 483 484 static SpinLock spinlock = SPINLOCK_INITIALIZER; 485 static FixedVMPoolAllocator* allocator = 0; 486 487 442 488 size_t ExecutableAllocator::committedByteCount() 443 489 { 444 490 SpinLockHolder lockHolder(&spinlock); 445 return committedBytesCount;491 return allocator ? allocator->allocated() : 0; 446 492 } 447 493 … … 451 497 } 452 498 453 static FixedVMPoolAllocator* allocator = 0;454 static size_t allocatedCount = 0;455 456 #if OS(LINUX)457 static void maybeModifyVMPoolSize()458 {459 FILE* fp = fopen("/proc/sys/vm/overcommit_memory", "r");460 if (!fp)461 return;462 463 unsigned overcommit = 0;464 if (fscanf(fp, "%u", &overcommit) == 1) {465 if (overcommit == 1) {466 vmPoolSize = vmPoolSizeOvercommit;467 coalesceLimit = coalesceLimitOvercommit;468 }469 }470 471 fclose(fp);472 }473 #endif474 475 499 bool ExecutableAllocator::isValid() const 476 500 { 477 501 SpinLockHolder lock_holder(&spinlock); 478 if (!allocator) { 479 #if OS(LINUX) 480 maybeModifyVMPoolSize(); 481 #endif 482 allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, vmPoolSize); 483 } 502 if (!allocator) 503 allocator = new FixedVMPoolAllocator(); 484 504 return allocator->isValid(); 485 505 } … … 489 509 // Technically we should take the spin lock here, but we don't care if we get stale data. 490 510 // This is only really a heuristic anyway. 491 return allocat edCount > (vmPoolSize / 2);511 return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2)); 492 512 } 493 513 … … 496 516 SpinLockHolder lock_holder(&spinlock); 497 517 ASSERT(allocator); 498 allocatedCount += size;499 518 return allocator->alloc(size); 500 519 } … … 504 523 SpinLockHolder lock_holder(&spinlock); 505 524 ASSERT(allocator); 506 allocatedCount -= allocation.size();507 525 allocator->free(allocation); 508 526 } -
trunk/Source/JavaScriptCore/wtf/PageReservation.h
r77090 r77145 58 58 public: 59 59 PageReservation() 60 : m_writable(false) 60 : m_committed(0) 61 , m_writable(false) 61 62 , m_executable(false) 62 #ifndef NDEBUG63 , m_committed(0)64 #endif65 63 { 66 64 } … … 84 82 ASSERT(contains(start, size)); 85 83 86 #ifndef NDEBUG87 84 m_committed += size; 88 #endif89 85 OSAllocator::commit(start, size, m_writable, m_executable); 90 86 } … … 97 93 ASSERT(contains(start, size)); 98 94 99 #ifndef NDEBUG100 95 m_committed -= size; 101 #endif102 96 OSAllocator::decommit(start, size); 97 } 98 99 size_t committed() 100 { 101 return m_committed; 103 102 } 104 103 … … 127 126 PageReservation(void* base, size_t size, bool writable, bool executable) 128 127 : PageBlock(base, size) 128 , m_committed(0) 129 129 , m_writable(writable) 130 130 , m_executable(executable) 131 #ifndef NDEBUG132 , m_committed(0)133 #endif134 131 { 135 132 } 136 133 134 size_t m_committed; 137 135 bool m_writable; 138 136 bool m_executable; 139 #ifndef NDEBUG140 size_t m_committed;141 #endif142 137 }; 143 138
Note:
See TracChangeset
for help on using the changeset viewer.