Changeset 34360 in webkit for trunk/JavaScriptCore/kjs/collector.cpp
- Timestamp:
- Jun 4, 2008, 9:29:49 AM (17 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/kjs/collector.cpp
r34088 r34360 1 // -*- mode: c++; c-basic-offset: 4 -*-2 1 /* 3 2 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved. … … 182 181 template <Collector::HeapType heapType> void* Collector::heapAllocate(size_t s) 183 182 { 184 typedef typename HeapConstants<heapType>::Block Block;185 typedef typename HeapConstants<heapType>::Cell Cell;186 187 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;188 ASSERT(JSLock::lockCount() > 0);189 ASSERT(JSLock::currentThreadIsHoldingLock());190 ASSERT(s <= HeapConstants<heapType>::cellSize);191 UNUSED_PARAM(s); // s is now only used for the above assert192 193 ASSERT(heap.operationInProgress == NoOperation);194 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);195 // FIXME: If another global variable access here doesn't hurt performance196 // too much, we could abort() in NDEBUG builds, which could help ensure we197 // don't spend any time debugging cases where we allocate inside an object's198 // deallocation code.199 200 size_t numLiveObjects = heap.numLiveObjects;201 size_t usedBlocks = heap.usedBlocks;202 size_t i = heap.firstBlockWithPossibleSpace;183 typedef typename HeapConstants<heapType>::Block Block; 184 typedef typename HeapConstants<heapType>::Cell Cell; 185 186 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; 187 ASSERT(JSLock::lockCount() > 0); 188 ASSERT(JSLock::currentThreadIsHoldingLock()); 189 ASSERT(s <= HeapConstants<heapType>::cellSize); 190 UNUSED_PARAM(s); // s is now only used for the above assert 191 192 ASSERT(heap.operationInProgress == NoOperation); 193 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0); 194 // FIXME: If another global variable access here doesn't hurt performance 195 // too much, we could abort() in NDEBUG builds, which could help ensure we 196 // don't spend any time debugging cases where we allocate inside an object's 197 // deallocation code. 198 199 size_t numLiveObjects = heap.numLiveObjects; 200 size_t usedBlocks = heap.usedBlocks; 201 size_t i = heap.firstBlockWithPossibleSpace; 203 202 204 203 #if COLLECT_ON_EVERY_ALLOCATION 205 collect();206 #endif 207 208 // if we have a huge amount of extra cost, we'll try to collect even if we still have209 // free cells left.210 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {211 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;212 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;213 const size_t newCost = numNewObjects + heap.extraCost;214 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)215 goto collect;216 }217 218 ASSERT(heap.operationInProgress == NoOperation);204 collect(); 205 #endif 206 207 // if we have a huge amount of extra cost, we'll try to collect even if we still have 208 // free cells left. 209 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) { 210 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; 211 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; 212 const size_t newCost = numNewObjects + heap.extraCost; 213 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) 214 goto collect; 215 } 216 217 ASSERT(heap.operationInProgress == NoOperation); 219 218 #ifndef NDEBUG 220 // FIXME: Consider doing this in NDEBUG builds too (see comment above).221 heap.operationInProgress = Allocation;219 // FIXME: Consider doing this in NDEBUG builds too (see comment above). 220 heap.operationInProgress = Allocation; 222 221 #endif 223 222 224 223 scan: 225 Block* targetBlock;226 size_t targetBlockUsedCells;227 if (i != usedBlocks) {228 targetBlock = (Block*)heap.blocks[i];229 targetBlockUsedCells = targetBlock->usedCells;230 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);231 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {232 if (++i == usedBlocks)233 goto collect;234 targetBlock = (Block*)heap.blocks[i];235 targetBlockUsedCells = targetBlock->usedCells;236 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);237 }238 heap.firstBlockWithPossibleSpace = i;239 } else {224 Block* targetBlock; 225 size_t targetBlockUsedCells; 226 if (i != usedBlocks) { 227 targetBlock = (Block*)heap.blocks[i]; 228 targetBlockUsedCells = targetBlock->usedCells; 229 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); 230 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) { 231 if (++i == usedBlocks) 232 goto collect; 233 targetBlock = (Block*)heap.blocks[i]; 234 targetBlockUsedCells = targetBlock->usedCells; 235 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); 236 } 237 heap.firstBlockWithPossibleSpace = i; 238 } else { 240 239 241 240 collect: 242 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;243 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;244 const size_t newCost = numNewObjects + heap.extraCost;245 246 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {241 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; 242 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; 243 const size_t newCost = numNewObjects + heap.extraCost; 244 245 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) { 247 246 #ifndef NDEBUG 248 heap.operationInProgress = NoOperation;249 #endif 250 bool collected = collect();247 heap.operationInProgress = NoOperation; 248 #endif 249 bool collected = collect(); 251 250 #ifndef NDEBUG 252 heap.operationInProgress = Allocation;253 #endif 254 if (collected) {255 numLiveObjects = heap.numLiveObjects;256 usedBlocks = heap.usedBlocks;257 i = heap.firstBlockWithPossibleSpace;258 goto scan;259 }260 }251 heap.operationInProgress = Allocation; 252 #endif 253 if (collected) { 254 numLiveObjects = heap.numLiveObjects; 255 usedBlocks = heap.usedBlocks; 256 i = heap.firstBlockWithPossibleSpace; 257 goto scan; 258 } 259 } 261 260 262 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block263 size_t numBlocks = heap.numBlocks;264 if (usedBlocks == numBlocks) {265 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);266 heap.numBlocks = numBlocks;267 heap.blocks = static_cast<CollectorBlock **>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));268 }269 270 targetBlock = (Block*)allocateBlock();271 targetBlock->freeList = targetBlock->cells;272 targetBlockUsedCells = 0;273 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock;274 heap.usedBlocks = usedBlocks + 1;275 heap.firstBlockWithPossibleSpace = usedBlocks;276 }261 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block 262 size_t numBlocks = heap.numBlocks; 263 if (usedBlocks == numBlocks) { 264 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); 265 heap.numBlocks = numBlocks; 266 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*))); 267 } 268 269 targetBlock = (Block*)allocateBlock(); 270 targetBlock->freeList = targetBlock->cells; 271 targetBlockUsedCells = 0; 272 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock; 273 heap.usedBlocks = usedBlocks + 1; 274 heap.firstBlockWithPossibleSpace = usedBlocks; 275 } 277 276 278 // find a free spot in the block and detach it from the free list279 Cell *newCell = targetBlock->freeList;280 281 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized282 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;283 284 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);285 heap.numLiveObjects = numLiveObjects + 1;277 // find a free spot in the block and detach it from the free list 278 Cell* newCell = targetBlock->freeList; 279 280 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized 281 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next; 282 283 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1); 284 heap.numLiveObjects = numLiveObjects + 1; 286 285 287 286 #ifndef NDEBUG 288 // FIXME: Consider doing this in NDEBUG builds too (see comment above).289 heap.operationInProgress = NoOperation;290 #endif 291 292 return newCell;287 // FIXME: Consider doing this in NDEBUG builds too (see comment above). 288 heap.operationInProgress = NoOperation; 289 #endif 290 291 return newCell; 293 292 } 294 293 … … 409 408 class Collector::Thread { 410 409 public: 411 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 412 : posixThread(pthread), platformThread(platThread), stackBase(base) {} 413 Thread* next; 414 pthread_t posixThread; 415 PlatformThread platformThread; 416 void* stackBase; 410 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 411 : posixThread(pthread) 412 , platformThread(platThread) 413 , stackBase(base) 414 { 415 } 416 417 Thread* next; 418 pthread_t posixThread; 419 PlatformThread platformThread; 420 void* stackBase; 417 421 }; 418 422 … … 423 427 static void destroyRegisteredThread(void* data) 424 428 { 425 Collector::Thread* thread = (Collector::Thread*)data;426 427 // Can't use JSLock convenience object here because we don't want to re-register428 // an exiting thread.429 JSLock::lock();430 431 if (registeredThreads == thread) {432 registeredThreads = registeredThreads->next;433 } else {434 Collector::Thread *last = registeredThreads;435 Collector::Thread *t;436 for (t = registeredThreads->next; t != NULL; t = t->next) {437 if (t == thread) {438 last->next = t->next;439 break;440 }441 last = t;442 }443 ASSERT(t); // If t is NULL, we never found ourselves in the list.444 }445 446 JSLock::unlock();447 448 delete thread;429 Collector::Thread* thread = (Collector::Thread*)data; 430 431 // Can't use JSLock convenience object here because we don't want to re-register 432 // an exiting thread. 433 JSLock::lock(); 434 435 if (registeredThreads == thread) { 436 registeredThreads = registeredThreads->next; 437 } else { 438 Collector::Thread* last = registeredThreads; 439 Collector::Thread* t; 440 for (t = registeredThreads->next; t != NULL; t = t->next) { 441 if (t == thread) { 442 last->next = t->next; 443 break; 444 } 445 last = t; 446 } 447 ASSERT(t); // If t is NULL, we never found ourselves in the list. 448 } 449 450 JSLock::unlock(); 451 452 delete thread; 449 453 } 450 454 451 455 static void initializeRegisteredThreadKey() 452 456 { 453 pthread_key_create(®isteredThreadKey, destroyRegisteredThread);457 pthread_key_create(®isteredThreadKey, destroyRegisteredThread); 454 458 } 455 459 456 460 void Collector::registerThread() 457 461 { 458 ASSERT(JSLock::lockCount() > 0);459 ASSERT(JSLock::currentThreadIsHoldingLock());460 461 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey);462 463 if (!pthread_getspecific(registeredThreadKey)) {462 ASSERT(JSLock::lockCount() > 0); 463 ASSERT(JSLock::currentThreadIsHoldingLock()); 464 465 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey); 466 467 if (!pthread_getspecific(registeredThreadKey)) { 464 468 #if PLATFORM(DARWIN) 465 if (onMainThread())466 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap);467 #endif 468 469 Collector::Thread *thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());470 471 thread->next = registeredThreads;472 registeredThreads = thread;473 pthread_setspecific(registeredThreadKey, thread);474 }475 } 476 477 #endif 478 479 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char 469 if (onMainThread()) 470 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap); 471 #endif 472 473 Collector::Thread* thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); 474 475 thread->next = registeredThreads; 476 registeredThreads = thread; 477 pthread_setspecific(registeredThreadKey, thread); 478 } 479 } 480 481 #endif 482 483 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0) 480 484 481 485 // cell size needs to be a power of two for this to be valid 482 486 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0) 483 487 484 void Collector::markStackObjectsConservatively(void *start, void *end)485 { 486 if (start > end) {487 void* tmp = start;488 start = end;489 end = tmp;490 }491 492 ASSERT(((char*)end - (char*)start) < 0x1000000);493 ASSERT(IS_POINTER_ALIGNED(start));494 ASSERT(IS_POINTER_ALIGNED(end));495 496 char** p = (char**)start;497 char** e = (char**)end;498 499 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;500 size_t usedNumberBlocks = numberHeap.usedBlocks;501 CollectorBlock **primaryBlocks = primaryHeap.blocks;502 CollectorBlock **numberBlocks = numberHeap.blocks;503 504 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);505 506 while (p != e) {507 char* x = *p++;508 if (IS_HALF_CELL_ALIGNED(x) && x) {509 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);510 xAsBits &= CELL_ALIGN_MASK;511 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;512 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);513 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost514 for (size_t block = 0; block < usedNumberBlocks; block++) {515 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {516 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits));517 goto endMarkLoop;518 }519 }488 void Collector::markStackObjectsConservatively(void* start, void* end) 489 { 490 if (start > end) { 491 void* tmp = start; 492 start = end; 493 end = tmp; 494 } 495 496 ASSERT(((char*)end - (char*)start) < 0x1000000); 497 ASSERT(IS_POINTER_ALIGNED(start)); 498 ASSERT(IS_POINTER_ALIGNED(end)); 499 500 char** p = (char**)start; 501 char** e = (char**)end; 502 503 size_t usedPrimaryBlocks = primaryHeap.usedBlocks; 504 size_t usedNumberBlocks = numberHeap.usedBlocks; 505 CollectorBlock** primaryBlocks = primaryHeap.blocks; 506 CollectorBlock** numberBlocks = numberHeap.blocks; 507 508 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); 509 510 while (p != e) { 511 char* x = *p++; 512 if (IS_HALF_CELL_ALIGNED(x) && x) { 513 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); 514 xAsBits &= CELL_ALIGN_MASK; 515 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; 516 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); 517 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost 518 for (size_t block = 0; block < usedNumberBlocks; block++) { 519 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { 520 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits)); 521 goto endMarkLoop; 522 } 523 } 520 524 521 // Mark the primary heap522 for (size_t block = 0; block < usedPrimaryBlocks; block++) {523 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {524 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) {525 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);526 if (!imp->marked())527 imp->mark();528 }529 break;530 }531 }532 endMarkLoop:533 ;534 }535 }525 // Mark the primary heap 526 for (size_t block = 0; block < usedPrimaryBlocks; block++) { 527 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { 528 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) { 529 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits); 530 if (!imp->marked()) 531 imp->mark(); 532 } 533 break; 534 } 535 } 536 endMarkLoop: 537 ; 538 } 539 } 536 540 } 537 541 … … 565 569 { 566 570 #if PLATFORM(DARWIN) 567 thread_suspend(platformThread);571 thread_suspend(platformThread); 568 572 #elif PLATFORM(WIN_OS) 569 SuspendThread(platformThread.handle);573 SuspendThread(platformThread.handle); 570 574 #else 571 575 #error Need a way to suspend threads on this platform … … 576 580 { 577 581 #if PLATFORM(DARWIN) 578 thread_resume(platformThread);582 thread_resume(platformThread); 579 583 #elif PLATFORM(WIN_OS) 580 ResumeThread(platformThread.handle);584 ResumeThread(platformThread.handle); 581 585 #else 582 586 #error Need a way to resume threads on this platform … … 611 615 612 616 #if PLATFORM(X86) 613 unsigned user_count = sizeof(regs)/sizeof(int);614 thread_state_flavor_t flavor = i386_THREAD_STATE;617 unsigned user_count = sizeof(regs)/sizeof(int); 618 thread_state_flavor_t flavor = i386_THREAD_STATE; 615 619 #elif PLATFORM(X86_64) 616 unsigned user_count = x86_THREAD_STATE64_COUNT;617 thread_state_flavor_t flavor = x86_THREAD_STATE64;620 unsigned user_count = x86_THREAD_STATE64_COUNT; 621 thread_state_flavor_t flavor = x86_THREAD_STATE64; 618 622 #elif PLATFORM(PPC) 619 unsigned user_count = PPC_THREAD_STATE_COUNT;620 thread_state_flavor_t flavor = PPC_THREAD_STATE;623 unsigned user_count = PPC_THREAD_STATE_COUNT; 624 thread_state_flavor_t flavor = PPC_THREAD_STATE; 621 625 #elif PLATFORM(PPC64) 622 unsigned user_count = PPC_THREAD_STATE64_COUNT;623 thread_state_flavor_t flavor = PPC_THREAD_STATE64;626 unsigned user_count = PPC_THREAD_STATE64_COUNT; 627 thread_state_flavor_t flavor = PPC_THREAD_STATE64; 624 628 #else 625 629 #error Unknown Architecture 626 630 #endif 627 631 628 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);629 if (result != KERN_SUCCESS) {630 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,631 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);632 CRASH();633 }634 return user_count * sizeof(usword_t);632 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); 633 if (result != KERN_SUCCESS) { 634 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 635 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); 636 CRASH(); 637 } 638 return user_count * sizeof(usword_t); 635 639 // end PLATFORM(DARWIN) 636 640 637 641 #elif PLATFORM(WIN_OS) && PLATFORM(X86) 638 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;639 GetThreadContext(platformThread.handle, ®s);640 return sizeof(CONTEXT);642 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; 643 GetThreadContext(platformThread.handle, ®s); 644 return sizeof(CONTEXT); 641 645 #else 642 646 #error Need a way to get thread registers on this platform … … 651 655 652 656 #if PLATFORM(X86) 653 return (void*)regs.__esp;657 return (void*)regs.__esp; 654 658 #elif PLATFORM(X86_64) 655 return (void*)regs.__rsp;659 return (void*)regs.__rsp; 656 660 #elif PLATFORM(PPC) || PLATFORM(PPC64) 657 return (void*)regs.__r1;661 return (void*)regs.__r1; 658 662 #else 659 663 #error Unknown Architecture … … 663 667 664 668 #if PLATFORM(X86) 665 return (void*)regs.esp;669 return (void*)regs.esp; 666 670 #elif PLATFORM(X86_64) 667 return (void*)regs.rsp;671 return (void*)regs.rsp; 668 672 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) 669 return (void*)regs.r1;673 return (void*)regs.r1; 670 674 #else 671 675 #error Unknown Architecture … … 676 680 // end PLATFORM(DARWIN) 677 681 #elif PLATFORM(X86) && PLATFORM(WIN_OS) 678 return (void*)(uintptr_t)regs.Esp;682 return (void*)(uintptr_t)regs.Esp; 679 683 #else 680 684 #error Need a way to get the stack pointer for another thread on this platform … … 684 688 void Collector::markOtherThreadConservatively(Thread* thread) 685 689 { 686 suspendThread(thread->platformThread);687 688 PlatformThreadRegisters regs;689 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);690 691 // mark the thread's registers692 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize));693 694 void* stackPointer = otherThreadStackPointer(regs);695 markStackObjectsConservatively(stackPointer, thread->stackBase);696 697 resumeThread(thread->platformThread);690 suspendThread(thread->platformThread); 691 692 PlatformThreadRegisters regs; 693 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); 694 695 // mark the thread's registers 696 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize)); 697 698 void* stackPointer = otherThreadStackPointer(regs); 699 markStackObjectsConservatively(stackPointer, thread->stackBase); 700 701 resumeThread(thread->platformThread); 698 702 } 699 703 … … 702 706 void Collector::markStackObjectsConservatively() 703 707 { 704 markCurrentThreadConservatively();708 markCurrentThreadConservatively(); 705 709 706 710 #if USE(MULTIPLE_THREADS) 707 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {708 if (!pthread_equal(thread->posixThread, pthread_self())) {709 markOtherThreadConservatively(thread);710 }711 }711 for (Thread* thread = registeredThreads; thread != NULL; thread = thread->next) { 712 if (!pthread_equal(thread->posixThread, pthread_self())) { 713 markOtherThreadConservatively(thread); 714 } 715 } 712 716 #endif 713 717 } … … 721 725 } 722 726 723 void Collector::protect(JSValue *k)727 void Collector::protect(JSValue* k) 724 728 { 725 729 ASSERT(k); … … 728 732 729 733 if (JSImmediate::isImmediate(k)) 730 return;734 return; 731 735 732 736 protectedValues().add(k->asCell()); 733 737 } 734 738 735 void Collector::unprotect(JSValue *k)739 void Collector::unprotect(JSValue* k) 736 740 { 737 741 ASSERT(k); … … 740 744 741 745 if (JSImmediate::isImmediate(k)) 742 return;746 return; 743 747 744 748 protectedValues().remove(k->asCell()); … … 752 756 753 757 if (JSImmediate::isImmediate(value)) 754 return;758 return; 755 759 756 760 JSCell* cell = value->asCell(); … … 761 765 void Collector::markProtectedObjects() 762 766 { 763 ProtectCountSet& protectedValues = KJS::protectedValues();764 ProtectCountSet::iterator end = protectedValues.end();765 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) {766 JSCell *val = it->first;767 if (!val->marked())768 val->mark();769 }767 ProtectCountSet& protectedValues = KJS::protectedValues(); 768 ProtectCountSet::iterator end = protectedValues.end(); 769 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) { 770 JSCell* val = it->first; 771 if (!val->marked()) 772 val->mark(); 773 } 770 774 } 771 775 … … 864 868 size_t minimumCellsToProcess = usedCells; 865 869 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) { 866 Cell *cell = curBlock->cells + i;870 Cell* cell = curBlock->cells + i; 867 871 if (cell->u.freeCell.zeroIfFree == 0) { 868 872 ++minimumCellsToProcess; … … 870 874 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { 871 875 if (heapType != Collector::NumberHeap) { 872 JSCell *imp = reinterpret_cast<JSCell*>(cell);876 JSCell* imp = reinterpret_cast<JSCell*>(cell); 873 877 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i)); 874 878 if (curBlock->collectOnMainThreadOnly.get(i)) { … … 907 911 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) { 908 912 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; 909 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock 913 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)); 910 914 } 911 915 } … … 924 928 bool Collector::collect() 925 929 { 926 ASSERT(JSLock::lockCount() > 0);927 ASSERT(JSLock::currentThreadIsHoldingLock());928 929 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));930 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))931 abort();930 ASSERT(JSLock::lockCount() > 0); 931 ASSERT(JSLock::currentThreadIsHoldingLock()); 932 933 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation)); 934 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation)) 935 abort(); 932 936 933 primaryHeap.operationInProgress = Collection;934 numberHeap.operationInProgress = Collection;935 936 bool currentThreadIsMainThread = onMainThread();937 938 // MARK: first mark all referenced objects recursively starting out from the set of root objects937 primaryHeap.operationInProgress = Collection; 938 numberHeap.operationInProgress = Collection; 939 940 bool currentThreadIsMainThread = onMainThread(); 941 942 // MARK: first mark all referenced objects recursively starting out from the set of root objects 939 943 940 944 #ifndef NDEBUG 941 // Forbid malloc during the mark phase. Marking a thread suspends it, so942 // a malloc inside mark() would risk a deadlock with a thread that had been943 // suspended while holding the malloc lock.944 fastMallocForbid();945 #endif 946 947 markStackObjectsConservatively();948 markProtectedObjects();949 List::markProtectedLists();945 // Forbid malloc during the mark phase. Marking a thread suspends it, so 946 // a malloc inside mark() would risk a deadlock with a thread that had been 947 // suspended while holding the malloc lock. 948 fastMallocForbid(); 949 #endif 950 951 markStackObjectsConservatively(); 952 markProtectedObjects(); 953 List::markProtectedLists(); 950 954 #if USE(MULTIPLE_THREADS) 951 if (!currentThreadIsMainThread)952 markMainThreadOnlyObjects();955 if (!currentThreadIsMainThread) 956 markMainThreadOnlyObjects(); 953 957 #endif 954 958 955 959 #ifndef NDEBUG 956 fastMallocAllow();957 #endif 958 959 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;960 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread);961 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread);960 fastMallocAllow(); 961 #endif 962 963 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects; 964 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread); 965 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread); 962 966 963 primaryHeap.operationInProgress = NoOperation;964 numberHeap.operationInProgress = NoOperation;965 966 return numLiveObjects < originalLiveObjects;967 primaryHeap.operationInProgress = NoOperation; 968 numberHeap.operationInProgress = NoOperation; 969 970 return numLiveObjects < originalLiveObjects; 967 971 } 968 972 969 973 size_t Collector::size() 970 974 { 971 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;975 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects; 972 976 } 973 977 974 978 size_t Collector::globalObjectCount() 975 979 { 976 size_t count = 0;977 if (JSGlobalObject::head()) {978 JSGlobalObject* o = JSGlobalObject::head();979 do {980 ++count;981 o = o->next();982 } while (o != JSGlobalObject::head());983 }984 return count;980 size_t count = 0; 981 if (JSGlobalObject::head()) { 982 JSGlobalObject* o = JSGlobalObject::head(); 983 do { 984 ++count; 985 o = o->next(); 986 } while (o != JSGlobalObject::head()); 987 } 988 return count; 985 989 } 986 990 987 991 size_t Collector::protectedGlobalObjectCount() 988 992 { 989 size_t count = 0;990 if (JSGlobalObject::head()) {991 JSGlobalObject* o = JSGlobalObject::head();992 do {993 if (protectedValues().contains(o))994 ++count;995 o = o->next();996 } while (o != JSGlobalObject::head());997 }998 return count;993 size_t count = 0; 994 if (JSGlobalObject::head()) { 995 JSGlobalObject* o = JSGlobalObject::head(); 996 do { 997 if (protectedValues().contains(o)) 998 ++count; 999 o = o->next(); 1000 } while (o != JSGlobalObject::head()); 1001 } 1002 return count; 999 1003 } 1000 1004 1001 1005 size_t Collector::protectedObjectCount() 1002 1006 { 1003 return protectedValues().size();1004 } 1005 1006 static const char *typeName(JSCell *val)1007 { 1008 const char *name = "???";1009 switch (val->type()) {1010 case UnspecifiedType:1011 break;1012 case UndefinedType:1013 name = "undefined";1014 break;1015 case NullType:1016 name = "null";1017 break;1018 case BooleanType:1019 name = "boolean";1020 break;1021 case StringType:1022 name = "string";1023 break;1024 case NumberType:1025 name = "number";1026 break;1027 case ObjectType: {1028 const ClassInfo *info = static_cast<JSObject*>(val)->classInfo();1029 name = info ? info->className : "Object";1030 break;1031 }1032 case GetterSetterType:1033 name = "gettersetter";1034 break;1035 }1036 return name;1007 return protectedValues().size(); 1008 } 1009 1010 static const char* typeName(JSCell* val) 1011 { 1012 const char* name = "???"; 1013 switch (val->type()) { 1014 case UnspecifiedType: 1015 break; 1016 case UndefinedType: 1017 name = "undefined"; 1018 break; 1019 case NullType: 1020 name = "null"; 1021 break; 1022 case BooleanType: 1023 name = "boolean"; 1024 break; 1025 case StringType: 1026 name = "string"; 1027 break; 1028 case NumberType: 1029 name = "number"; 1030 break; 1031 case ObjectType: { 1032 const ClassInfo* info = static_cast<JSObject*>(val)->classInfo(); 1033 name = info ? info->className : "Object"; 1034 break; 1035 } 1036 case GetterSetterType: 1037 name = "gettersetter"; 1038 break; 1039 } 1040 return name; 1037 1041 } 1038 1042
Note:
See TracChangeset
for help on using the changeset viewer.