Changeset 34360 in webkit for trunk/JavaScriptCore/kjs
- Timestamp:
- Jun 4, 2008, 9:29:49 AM (17 years ago)
- Location:
- trunk/JavaScriptCore/kjs
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JavaScriptCore/kjs/collector.cpp
r34088 r34360 1 // -*- mode: c++; c-basic-offset: 4 -*-2 1 /* 3 2 * Copyright (C) 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved. … … 182 181 template <Collector::HeapType heapType> void* Collector::heapAllocate(size_t s) 183 182 { 184 typedef typename HeapConstants<heapType>::Block Block;185 typedef typename HeapConstants<heapType>::Cell Cell;186 187 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap;188 ASSERT(JSLock::lockCount() > 0);189 ASSERT(JSLock::currentThreadIsHoldingLock());190 ASSERT(s <= HeapConstants<heapType>::cellSize);191 UNUSED_PARAM(s); // s is now only used for the above assert192 193 ASSERT(heap.operationInProgress == NoOperation);194 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0);195 // FIXME: If another global variable access here doesn't hurt performance196 // too much, we could abort() in NDEBUG builds, which could help ensure we197 // don't spend any time debugging cases where we allocate inside an object's198 // deallocation code.199 200 size_t numLiveObjects = heap.numLiveObjects;201 size_t usedBlocks = heap.usedBlocks;202 size_t i = heap.firstBlockWithPossibleSpace;183 typedef typename HeapConstants<heapType>::Block Block; 184 typedef typename HeapConstants<heapType>::Cell Cell; 185 186 CollectorHeap& heap = heapType == PrimaryHeap ? primaryHeap : numberHeap; 187 ASSERT(JSLock::lockCount() > 0); 188 ASSERT(JSLock::currentThreadIsHoldingLock()); 189 ASSERT(s <= HeapConstants<heapType>::cellSize); 190 UNUSED_PARAM(s); // s is now only used for the above assert 191 192 ASSERT(heap.operationInProgress == NoOperation); 193 ASSERT(heapType == PrimaryHeap || heap.extraCost == 0); 194 // FIXME: If another global variable access here doesn't hurt performance 195 // too much, we could abort() in NDEBUG builds, which could help ensure we 196 // don't spend any time debugging cases where we allocate inside an object's 197 // deallocation code. 198 199 size_t numLiveObjects = heap.numLiveObjects; 200 size_t usedBlocks = heap.usedBlocks; 201 size_t i = heap.firstBlockWithPossibleSpace; 203 202 204 203 #if COLLECT_ON_EVERY_ALLOCATION 205 collect();206 #endif 207 208 // if we have a huge amount of extra cost, we'll try to collect even if we still have209 // free cells left.210 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) {211 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;212 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;213 const size_t newCost = numNewObjects + heap.extraCost;214 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect)215 goto collect;216 }217 218 ASSERT(heap.operationInProgress == NoOperation);204 collect(); 205 #endif 206 207 // if we have a huge amount of extra cost, we'll try to collect even if we still have 208 // free cells left. 209 if (heapType == PrimaryHeap && heap.extraCost > ALLOCATIONS_PER_COLLECTION) { 210 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; 211 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; 212 const size_t newCost = numNewObjects + heap.extraCost; 213 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) 214 goto collect; 215 } 216 217 ASSERT(heap.operationInProgress == NoOperation); 219 218 #ifndef NDEBUG 220 // FIXME: Consider doing this in NDEBUG builds too (see comment above).221 heap.operationInProgress = Allocation;219 // FIXME: Consider doing this in NDEBUG builds too (see comment above). 220 heap.operationInProgress = Allocation; 222 221 #endif 223 222 224 223 scan: 225 Block* targetBlock;226 size_t targetBlockUsedCells;227 if (i != usedBlocks) {228 targetBlock = (Block*)heap.blocks[i];229 targetBlockUsedCells = targetBlock->usedCells;230 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);231 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) {232 if (++i == usedBlocks)233 goto collect;234 targetBlock = (Block*)heap.blocks[i];235 targetBlockUsedCells = targetBlock->usedCells;236 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock);237 }238 heap.firstBlockWithPossibleSpace = i;239 } else {224 Block* targetBlock; 225 size_t targetBlockUsedCells; 226 if (i != usedBlocks) { 227 targetBlock = (Block*)heap.blocks[i]; 228 targetBlockUsedCells = targetBlock->usedCells; 229 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); 230 while (targetBlockUsedCells == HeapConstants<heapType>::cellsPerBlock) { 231 if (++i == usedBlocks) 232 goto collect; 233 targetBlock = (Block*)heap.blocks[i]; 234 targetBlockUsedCells = targetBlock->usedCells; 235 ASSERT(targetBlockUsedCells <= HeapConstants<heapType>::cellsPerBlock); 236 } 237 heap.firstBlockWithPossibleSpace = i; 238 } else { 240 239 241 240 collect: 242 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect;243 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect;244 const size_t newCost = numNewObjects + heap.extraCost;245 246 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) {241 size_t numLiveObjectsAtLastCollect = heap.numLiveObjectsAtLastCollect; 242 size_t numNewObjects = numLiveObjects - numLiveObjectsAtLastCollect; 243 const size_t newCost = numNewObjects + heap.extraCost; 244 245 if (newCost >= ALLOCATIONS_PER_COLLECTION && newCost >= numLiveObjectsAtLastCollect) { 247 246 #ifndef NDEBUG 248 heap.operationInProgress = NoOperation;249 #endif 250 bool collected = collect();247 heap.operationInProgress = NoOperation; 248 #endif 249 bool collected = collect(); 251 250 #ifndef NDEBUG 252 heap.operationInProgress = Allocation;253 #endif 254 if (collected) {255 numLiveObjects = heap.numLiveObjects;256 usedBlocks = heap.usedBlocks;257 i = heap.firstBlockWithPossibleSpace;258 goto scan;259 }260 }251 heap.operationInProgress = Allocation; 252 #endif 253 if (collected) { 254 numLiveObjects = heap.numLiveObjects; 255 usedBlocks = heap.usedBlocks; 256 i = heap.firstBlockWithPossibleSpace; 257 goto scan; 258 } 259 } 261 260 262 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block263 size_t numBlocks = heap.numBlocks;264 if (usedBlocks == numBlocks) {265 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);266 heap.numBlocks = numBlocks;267 heap.blocks = static_cast<CollectorBlock **>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*)));268 }269 270 targetBlock = (Block*)allocateBlock();271 targetBlock->freeList = targetBlock->cells;272 targetBlockUsedCells = 0;273 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock;274 heap.usedBlocks = usedBlocks + 1;275 heap.firstBlockWithPossibleSpace = usedBlocks;276 }261 // didn't find a block, and GC didn't reclaim anything, need to allocate a new block 262 size_t numBlocks = heap.numBlocks; 263 if (usedBlocks == numBlocks) { 264 numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); 265 heap.numBlocks = numBlocks; 266 heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*))); 267 } 268 269 targetBlock = (Block*)allocateBlock(); 270 targetBlock->freeList = targetBlock->cells; 271 targetBlockUsedCells = 0; 272 heap.blocks[usedBlocks] = (CollectorBlock*)targetBlock; 273 heap.usedBlocks = usedBlocks + 1; 274 heap.firstBlockWithPossibleSpace = usedBlocks; 275 } 277 276 278 // find a free spot in the block and detach it from the free list279 Cell *newCell = targetBlock->freeList;280 281 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized282 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next;283 284 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1);285 heap.numLiveObjects = numLiveObjects + 1;277 // find a free spot in the block and detach it from the free list 278 Cell* newCell = targetBlock->freeList; 279 280 // "next" field is a cell offset -- 0 means next cell, so a zeroed block is already initialized 281 targetBlock->freeList = (newCell + 1) + newCell->u.freeCell.next; 282 283 targetBlock->usedCells = static_cast<uint32_t>(targetBlockUsedCells + 1); 284 heap.numLiveObjects = numLiveObjects + 1; 286 285 287 286 #ifndef NDEBUG 288 // FIXME: Consider doing this in NDEBUG builds too (see comment above).289 heap.operationInProgress = NoOperation;290 #endif 291 292 return newCell;287 // FIXME: Consider doing this in NDEBUG builds too (see comment above). 288 heap.operationInProgress = NoOperation; 289 #endif 290 291 return newCell; 293 292 } 294 293 … … 409 408 class Collector::Thread { 410 409 public: 411 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 412 : posixThread(pthread), platformThread(platThread), stackBase(base) {} 413 Thread* next; 414 pthread_t posixThread; 415 PlatformThread platformThread; 416 void* stackBase; 410 Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 411 : posixThread(pthread) 412 , platformThread(platThread) 413 , stackBase(base) 414 { 415 } 416 417 Thread* next; 418 pthread_t posixThread; 419 PlatformThread platformThread; 420 void* stackBase; 417 421 }; 418 422 … … 423 427 static void destroyRegisteredThread(void* data) 424 428 { 425 Collector::Thread* thread = (Collector::Thread*)data;426 427 // Can't use JSLock convenience object here because we don't want to re-register428 // an exiting thread.429 JSLock::lock();430 431 if (registeredThreads == thread) {432 registeredThreads = registeredThreads->next;433 } else {434 Collector::Thread *last = registeredThreads;435 Collector::Thread *t;436 for (t = registeredThreads->next; t != NULL; t = t->next) {437 if (t == thread) {438 last->next = t->next;439 break;440 }441 last = t;442 }443 ASSERT(t); // If t is NULL, we never found ourselves in the list.444 }445 446 JSLock::unlock();447 448 delete thread;429 Collector::Thread* thread = (Collector::Thread*)data; 430 431 // Can't use JSLock convenience object here because we don't want to re-register 432 // an exiting thread. 433 JSLock::lock(); 434 435 if (registeredThreads == thread) { 436 registeredThreads = registeredThreads->next; 437 } else { 438 Collector::Thread* last = registeredThreads; 439 Collector::Thread* t; 440 for (t = registeredThreads->next; t != NULL; t = t->next) { 441 if (t == thread) { 442 last->next = t->next; 443 break; 444 } 445 last = t; 446 } 447 ASSERT(t); // If t is NULL, we never found ourselves in the list. 448 } 449 450 JSLock::unlock(); 451 452 delete thread; 449 453 } 450 454 451 455 static void initializeRegisteredThreadKey() 452 456 { 453 pthread_key_create(®isteredThreadKey, destroyRegisteredThread);457 pthread_key_create(®isteredThreadKey, destroyRegisteredThread); 454 458 } 455 459 456 460 void Collector::registerThread() 457 461 { 458 ASSERT(JSLock::lockCount() > 0);459 ASSERT(JSLock::currentThreadIsHoldingLock());460 461 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey);462 463 if (!pthread_getspecific(registeredThreadKey)) {462 ASSERT(JSLock::lockCount() > 0); 463 ASSERT(JSLock::currentThreadIsHoldingLock()); 464 465 pthread_once(®isteredThreadKeyOnce, initializeRegisteredThreadKey); 466 467 if (!pthread_getspecific(registeredThreadKey)) { 464 468 #if PLATFORM(DARWIN) 465 if (onMainThread())466 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap);467 #endif 468 469 Collector::Thread *thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());470 471 thread->next = registeredThreads;472 registeredThreads = thread;473 pthread_setspecific(registeredThreadKey, thread);474 }475 } 476 477 #endif 478 479 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char 469 if (onMainThread()) 470 CollectorHeapIntrospector::init(&primaryHeap, &numberHeap); 471 #endif 472 473 Collector::Thread* thread = new Collector::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); 474 475 thread->next = registeredThreads; 476 registeredThreads = thread; 477 pthread_setspecific(registeredThreadKey, thread); 478 } 479 } 480 481 #endif 482 483 #define IS_POINTER_ALIGNED(p) (((intptr_t)(p) & (sizeof(char*) - 1)) == 0) 480 484 481 485 // cell size needs to be a power of two for this to be valid 482 486 #define IS_HALF_CELL_ALIGNED(p) (((intptr_t)(p) & (CELL_MASK >> 1)) == 0) 483 487 484 void Collector::markStackObjectsConservatively(void *start, void *end)485 { 486 if (start > end) {487 void* tmp = start;488 start = end;489 end = tmp;490 }491 492 ASSERT(((char*)end - (char*)start) < 0x1000000);493 ASSERT(IS_POINTER_ALIGNED(start));494 ASSERT(IS_POINTER_ALIGNED(end));495 496 char** p = (char**)start;497 char** e = (char**)end;498 499 size_t usedPrimaryBlocks = primaryHeap.usedBlocks;500 size_t usedNumberBlocks = numberHeap.usedBlocks;501 CollectorBlock **primaryBlocks = primaryHeap.blocks;502 CollectorBlock **numberBlocks = numberHeap.blocks;503 504 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);505 506 while (p != e) {507 char* x = *p++;508 if (IS_HALF_CELL_ALIGNED(x) && x) {509 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);510 xAsBits &= CELL_ALIGN_MASK;511 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;512 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);513 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost514 for (size_t block = 0; block < usedNumberBlocks; block++) {515 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {516 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits));517 goto endMarkLoop;518 }519 }488 void Collector::markStackObjectsConservatively(void* start, void* end) 489 { 490 if (start > end) { 491 void* tmp = start; 492 start = end; 493 end = tmp; 494 } 495 496 ASSERT(((char*)end - (char*)start) < 0x1000000); 497 ASSERT(IS_POINTER_ALIGNED(start)); 498 ASSERT(IS_POINTER_ALIGNED(end)); 499 500 char** p = (char**)start; 501 char** e = (char**)end; 502 503 size_t usedPrimaryBlocks = primaryHeap.usedBlocks; 504 size_t usedNumberBlocks = numberHeap.usedBlocks; 505 CollectorBlock** primaryBlocks = primaryHeap.blocks; 506 CollectorBlock** numberBlocks = numberHeap.blocks; 507 508 const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1); 509 510 while (p != e) { 511 char* x = *p++; 512 if (IS_HALF_CELL_ALIGNED(x) && x) { 513 uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x); 514 xAsBits &= CELL_ALIGN_MASK; 515 uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK; 516 CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); 517 // Mark the the number heap, we can mark these Cells directly to avoid the virtual call cost 518 for (size_t block = 0; block < usedNumberBlocks; block++) { 519 if ((numberBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { 520 Collector::markCell(reinterpret_cast<JSCell*>(xAsBits)); 521 goto endMarkLoop; 522 } 523 } 520 524 521 // Mark the primary heap522 for (size_t block = 0; block < usedPrimaryBlocks; block++) {523 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) {524 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) {525 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits);526 if (!imp->marked())527 imp->mark();528 }529 break;530 }531 }532 endMarkLoop:533 ;534 }535 }525 // Mark the primary heap 526 for (size_t block = 0; block < usedPrimaryBlocks; block++) { 527 if ((primaryBlocks[block] == blockAddr) & (offset <= lastCellOffset)) { 528 if (((CollectorCell*)xAsBits)->u.freeCell.zeroIfFree != 0) { 529 JSCell* imp = reinterpret_cast<JSCell*>(xAsBits); 530 if (!imp->marked()) 531 imp->mark(); 532 } 533 break; 534 } 535 } 536 endMarkLoop: 537 ; 538 } 539 } 536 540 } 537 541 … … 565 569 { 566 570 #if PLATFORM(DARWIN) 567 thread_suspend(platformThread);571 thread_suspend(platformThread); 568 572 #elif PLATFORM(WIN_OS) 569 SuspendThread(platformThread.handle);573 SuspendThread(platformThread.handle); 570 574 #else 571 575 #error Need a way to suspend threads on this platform … … 576 580 { 577 581 #if PLATFORM(DARWIN) 578 thread_resume(platformThread);582 thread_resume(platformThread); 579 583 #elif PLATFORM(WIN_OS) 580 ResumeThread(platformThread.handle);584 ResumeThread(platformThread.handle); 581 585 #else 582 586 #error Need a way to resume threads on this platform … … 611 615 612 616 #if PLATFORM(X86) 613 unsigned user_count = sizeof(regs)/sizeof(int);614 thread_state_flavor_t flavor = i386_THREAD_STATE;617 unsigned user_count = sizeof(regs)/sizeof(int); 618 thread_state_flavor_t flavor = i386_THREAD_STATE; 615 619 #elif PLATFORM(X86_64) 616 unsigned user_count = x86_THREAD_STATE64_COUNT;617 thread_state_flavor_t flavor = x86_THREAD_STATE64;620 unsigned user_count = x86_THREAD_STATE64_COUNT; 621 thread_state_flavor_t flavor = x86_THREAD_STATE64; 618 622 #elif PLATFORM(PPC) 619 unsigned user_count = PPC_THREAD_STATE_COUNT;620 thread_state_flavor_t flavor = PPC_THREAD_STATE;623 unsigned user_count = PPC_THREAD_STATE_COUNT; 624 thread_state_flavor_t flavor = PPC_THREAD_STATE; 621 625 #elif PLATFORM(PPC64) 622 unsigned user_count = PPC_THREAD_STATE64_COUNT;623 thread_state_flavor_t flavor = PPC_THREAD_STATE64;626 unsigned user_count = PPC_THREAD_STATE64_COUNT; 627 thread_state_flavor_t flavor = PPC_THREAD_STATE64; 624 628 #else 625 629 #error Unknown Architecture 626 630 #endif 627 631 628 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count);629 if (result != KERN_SUCCESS) {630 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION,631 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);632 CRASH();633 }634 return user_count * sizeof(usword_t);632 kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)®s, &user_count); 633 if (result != KERN_SUCCESS) { 634 WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 635 "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result); 636 CRASH(); 637 } 638 return user_count * sizeof(usword_t); 635 639 // end PLATFORM(DARWIN) 636 640 637 641 #elif PLATFORM(WIN_OS) && PLATFORM(X86) 638 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;639 GetThreadContext(platformThread.handle, ®s);640 return sizeof(CONTEXT);642 regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS; 643 GetThreadContext(platformThread.handle, ®s); 644 return sizeof(CONTEXT); 641 645 #else 642 646 #error Need a way to get thread registers on this platform … … 651 655 652 656 #if PLATFORM(X86) 653 return (void*)regs.__esp;657 return (void*)regs.__esp; 654 658 #elif PLATFORM(X86_64) 655 return (void*)regs.__rsp;659 return (void*)regs.__rsp; 656 660 #elif PLATFORM(PPC) || PLATFORM(PPC64) 657 return (void*)regs.__r1;661 return (void*)regs.__r1; 658 662 #else 659 663 #error Unknown Architecture … … 663 667 664 668 #if PLATFORM(X86) 665 return (void*)regs.esp;669 return (void*)regs.esp; 666 670 #elif PLATFORM(X86_64) 667 return (void*)regs.rsp;671 return (void*)regs.rsp; 668 672 #elif (PLATFORM(PPC) || PLATFORM(PPC64)) 669 return (void*)regs.r1;673 return (void*)regs.r1; 670 674 #else 671 675 #error Unknown Architecture … … 676 680 // end PLATFORM(DARWIN) 677 681 #elif PLATFORM(X86) && PLATFORM(WIN_OS) 678 return (void*)(uintptr_t)regs.Esp;682 return (void*)(uintptr_t)regs.Esp; 679 683 #else 680 684 #error Need a way to get the stack pointer for another thread on this platform … … 684 688 void Collector::markOtherThreadConservatively(Thread* thread) 685 689 { 686 suspendThread(thread->platformThread);687 688 PlatformThreadRegisters regs;689 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);690 691 // mark the thread's registers692 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize));693 694 void* stackPointer = otherThreadStackPointer(regs);695 markStackObjectsConservatively(stackPointer, thread->stackBase);696 697 resumeThread(thread->platformThread);690 suspendThread(thread->platformThread); 691 692 PlatformThreadRegisters regs; 693 size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs); 694 695 // mark the thread's registers 696 markStackObjectsConservatively((void*)®s, (void*)((char*)®s + regSize)); 697 698 void* stackPointer = otherThreadStackPointer(regs); 699 markStackObjectsConservatively(stackPointer, thread->stackBase); 700 701 resumeThread(thread->platformThread); 698 702 } 699 703 … … 702 706 void Collector::markStackObjectsConservatively() 703 707 { 704 markCurrentThreadConservatively();708 markCurrentThreadConservatively(); 705 709 706 710 #if USE(MULTIPLE_THREADS) 707 for (Thread *thread = registeredThreads; thread != NULL; thread = thread->next) {708 if (!pthread_equal(thread->posixThread, pthread_self())) {709 markOtherThreadConservatively(thread);710 }711 }711 for (Thread* thread = registeredThreads; thread != NULL; thread = thread->next) { 712 if (!pthread_equal(thread->posixThread, pthread_self())) { 713 markOtherThreadConservatively(thread); 714 } 715 } 712 716 #endif 713 717 } … … 721 725 } 722 726 723 void Collector::protect(JSValue *k)727 void Collector::protect(JSValue* k) 724 728 { 725 729 ASSERT(k); … … 728 732 729 733 if (JSImmediate::isImmediate(k)) 730 return;734 return; 731 735 732 736 protectedValues().add(k->asCell()); 733 737 } 734 738 735 void Collector::unprotect(JSValue *k)739 void Collector::unprotect(JSValue* k) 736 740 { 737 741 ASSERT(k); … … 740 744 741 745 if (JSImmediate::isImmediate(k)) 742 return;746 return; 743 747 744 748 protectedValues().remove(k->asCell()); … … 752 756 753 757 if (JSImmediate::isImmediate(value)) 754 return;758 return; 755 759 756 760 JSCell* cell = value->asCell(); … … 761 765 void Collector::markProtectedObjects() 762 766 { 763 ProtectCountSet& protectedValues = KJS::protectedValues();764 ProtectCountSet::iterator end = protectedValues.end();765 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) {766 JSCell *val = it->first;767 if (!val->marked())768 val->mark();769 }767 ProtectCountSet& protectedValues = KJS::protectedValues(); 768 ProtectCountSet::iterator end = protectedValues.end(); 769 for (ProtectCountSet::iterator it = protectedValues.begin(); it != end; ++it) { 770 JSCell* val = it->first; 771 if (!val->marked()) 772 val->mark(); 773 } 770 774 } 771 775 … … 864 868 size_t minimumCellsToProcess = usedCells; 865 869 for (size_t i = 0; (i < minimumCellsToProcess) & (i < HeapConstants<heapType>::cellsPerBlock); i++) { 866 Cell *cell = curBlock->cells + i;870 Cell* cell = curBlock->cells + i; 867 871 if (cell->u.freeCell.zeroIfFree == 0) { 868 872 ++minimumCellsToProcess; … … 870 874 if (!curBlock->marked.get(i >> HeapConstants<heapType>::bitmapShift)) { 871 875 if (heapType != Collector::NumberHeap) { 872 JSCell *imp = reinterpret_cast<JSCell*>(cell);876 JSCell* imp = reinterpret_cast<JSCell*>(cell); 873 877 ASSERT(currentThreadIsMainThread || !curBlock->collectOnMainThreadOnly.get(i)); 874 878 if (curBlock->collectOnMainThreadOnly.get(i)) { … … 907 911 if (heap.numBlocks > MIN_ARRAY_SIZE && heap.usedBlocks < heap.numBlocks / LOW_WATER_FACTOR) { 908 912 heap.numBlocks = heap.numBlocks / GROWTH_FACTOR; 909 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock 913 heap.blocks = (CollectorBlock**)fastRealloc(heap.blocks, heap.numBlocks * sizeof(CollectorBlock*)); 910 914 } 911 915 } … … 924 928 bool Collector::collect() 925 929 { 926 ASSERT(JSLock::lockCount() > 0);927 ASSERT(JSLock::currentThreadIsHoldingLock());928 929 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation));930 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation))931 abort();930 ASSERT(JSLock::lockCount() > 0); 931 ASSERT(JSLock::currentThreadIsHoldingLock()); 932 933 ASSERT((primaryHeap.operationInProgress == NoOperation) | (numberHeap.operationInProgress == NoOperation)); 934 if ((primaryHeap.operationInProgress != NoOperation) | (numberHeap.operationInProgress != NoOperation)) 935 abort(); 932 936 933 primaryHeap.operationInProgress = Collection;934 numberHeap.operationInProgress = Collection;935 936 bool currentThreadIsMainThread = onMainThread();937 938 // MARK: first mark all referenced objects recursively starting out from the set of root objects937 primaryHeap.operationInProgress = Collection; 938 numberHeap.operationInProgress = Collection; 939 940 bool currentThreadIsMainThread = onMainThread(); 941 942 // MARK: first mark all referenced objects recursively starting out from the set of root objects 939 943 940 944 #ifndef NDEBUG 941 // Forbid malloc during the mark phase. Marking a thread suspends it, so942 // a malloc inside mark() would risk a deadlock with a thread that had been943 // suspended while holding the malloc lock.944 fastMallocForbid();945 #endif 946 947 markStackObjectsConservatively();948 markProtectedObjects();949 List::markProtectedLists();945 // Forbid malloc during the mark phase. Marking a thread suspends it, so 946 // a malloc inside mark() would risk a deadlock with a thread that had been 947 // suspended while holding the malloc lock. 948 fastMallocForbid(); 949 #endif 950 951 markStackObjectsConservatively(); 952 markProtectedObjects(); 953 List::markProtectedLists(); 950 954 #if USE(MULTIPLE_THREADS) 951 if (!currentThreadIsMainThread)952 markMainThreadOnlyObjects();955 if (!currentThreadIsMainThread) 956 markMainThreadOnlyObjects(); 953 957 #endif 954 958 955 959 #ifndef NDEBUG 956 fastMallocAllow();957 #endif 958 959 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects;960 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread);961 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread);960 fastMallocAllow(); 961 #endif 962 963 size_t originalLiveObjects = primaryHeap.numLiveObjects + numberHeap.numLiveObjects; 964 size_t numLiveObjects = sweep<PrimaryHeap>(currentThreadIsMainThread); 965 numLiveObjects += sweep<NumberHeap>(currentThreadIsMainThread); 962 966 963 primaryHeap.operationInProgress = NoOperation;964 numberHeap.operationInProgress = NoOperation;965 966 return numLiveObjects < originalLiveObjects;967 primaryHeap.operationInProgress = NoOperation; 968 numberHeap.operationInProgress = NoOperation; 969 970 return numLiveObjects < originalLiveObjects; 967 971 } 968 972 969 973 size_t Collector::size() 970 974 { 971 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects;975 return primaryHeap.numLiveObjects + numberHeap.numLiveObjects; 972 976 } 973 977 974 978 size_t Collector::globalObjectCount() 975 979 { 976 size_t count = 0;977 if (JSGlobalObject::head()) {978 JSGlobalObject* o = JSGlobalObject::head();979 do {980 ++count;981 o = o->next();982 } while (o != JSGlobalObject::head());983 }984 return count;980 size_t count = 0; 981 if (JSGlobalObject::head()) { 982 JSGlobalObject* o = JSGlobalObject::head(); 983 do { 984 ++count; 985 o = o->next(); 986 } while (o != JSGlobalObject::head()); 987 } 988 return count; 985 989 } 986 990 987 991 size_t Collector::protectedGlobalObjectCount() 988 992 { 989 size_t count = 0;990 if (JSGlobalObject::head()) {991 JSGlobalObject* o = JSGlobalObject::head();992 do {993 if (protectedValues().contains(o))994 ++count;995 o = o->next();996 } while (o != JSGlobalObject::head());997 }998 return count;993 size_t count = 0; 994 if (JSGlobalObject::head()) { 995 JSGlobalObject* o = JSGlobalObject::head(); 996 do { 997 if (protectedValues().contains(o)) 998 ++count; 999 o = o->next(); 1000 } while (o != JSGlobalObject::head()); 1001 } 1002 return count; 999 1003 } 1000 1004 1001 1005 size_t Collector::protectedObjectCount() 1002 1006 { 1003 return protectedValues().size();1004 } 1005 1006 static const char *typeName(JSCell *val)1007 { 1008 const char *name = "???";1009 switch (val->type()) {1010 case UnspecifiedType:1011 break;1012 case UndefinedType:1013 name = "undefined";1014 break;1015 case NullType:1016 name = "null";1017 break;1018 case BooleanType:1019 name = "boolean";1020 break;1021 case StringType:1022 name = "string";1023 break;1024 case NumberType:1025 name = "number";1026 break;1027 case ObjectType: {1028 const ClassInfo *info = static_cast<JSObject*>(val)->classInfo();1029 name = info ? info->className : "Object";1030 break;1031 }1032 case GetterSetterType:1033 name = "gettersetter";1034 break;1035 }1036 return name;1007 return protectedValues().size(); 1008 } 1009 1010 static const char* typeName(JSCell* val) 1011 { 1012 const char* name = "???"; 1013 switch (val->type()) { 1014 case UnspecifiedType: 1015 break; 1016 case UndefinedType: 1017 name = "undefined"; 1018 break; 1019 case NullType: 1020 name = "null"; 1021 break; 1022 case BooleanType: 1023 name = "boolean"; 1024 break; 1025 case StringType: 1026 name = "string"; 1027 break; 1028 case NumberType: 1029 name = "number"; 1030 break; 1031 case ObjectType: { 1032 const ClassInfo* info = static_cast<JSObject*>(val)->classInfo(); 1033 name = info ? info->className : "Object"; 1034 break; 1035 } 1036 case GetterSetterType: 1037 name = "gettersetter"; 1038 break; 1039 } 1040 return name; 1037 1041 } 1038 1042 -
trunk/JavaScriptCore/kjs/collector.h
r34088 r34360 1 // -*- c-basic-offset: 2 -*-2 1 /* 3 * This file is part of the KDE libraries4 2 * Copyright (C) 1999-2000 Harri Porten ([email protected]) 5 3 * Copyright (C) 2001 Peter Kelly ([email protected]) … … 30 28 namespace KJS { 31 29 32 class JSCell;33 class JSValue;34 class CollectorBlock;35 36 class Collector {37 public:38 class Thread;39 enum HeapType { PrimaryHeap, NumberHeap };30 class JSCell; 31 class JSValue; 32 class CollectorBlock; 33 34 class Collector { 35 public: 36 class Thread; 37 enum HeapType { PrimaryHeap, NumberHeap }; 40 38 41 39 #ifdef JAVASCRIPTCORE_BUILDING_ALL_IN_ONE_FILE 42 // We can inline these functions because everything is compiled as43 // one file, so the heapAllocate template definitions are available.44 // However, allocateNumber is used via jsNumberCell outside JavaScriptCore.45 // Thus allocateNumber needs to provide a non-inline version too.46 static void* allocate(size_t s) { return heapAllocate<PrimaryHeap>(s); }47 static void* inlineAllocateNumber(size_t s) { return heapAllocate<NumberHeap>(s); }40 // We can inline these functions because everything is compiled as 41 // one file, so the heapAllocate template definitions are available. 42 // However, allocateNumber is used via jsNumberCell outside JavaScriptCore. 43 // Thus allocateNumber needs to provide a non-inline version too. 44 static void* allocate(size_t s) { return heapAllocate<PrimaryHeap>(s); } 45 static void* inlineAllocateNumber(size_t s) { return heapAllocate<NumberHeap>(s); } 48 46 #else 49 static void* allocate(size_t);47 static void* allocate(size_t); 50 48 #endif 51 static void* allocateNumber(size_t s); 52 53 static bool collect(); 54 static bool isBusy(); // true if an allocation or collection is in progress 55 56 static const size_t minExtraCostSize = 256; 57 58 static void reportExtraMemoryCost(size_t cost); 59 60 static size_t size(); 61 62 static void protect(JSValue*); 63 static void unprotect(JSValue*); 64 65 static void collectOnMainThreadOnly(JSValue*); 66 67 static size_t globalObjectCount(); 68 static size_t protectedObjectCount(); 69 static size_t protectedGlobalObjectCount(); 70 static HashCountedSet<const char*>* protectedObjectTypeCounts(); 71 72 static void registerThread(); 73 74 static void registerAsMainThread(); 75 76 static bool isCellMarked(const JSCell*); 77 static void markCell(JSCell*); 78 79 static void markStackObjectsConservatively(void* start, void* end); 80 81 private: 82 template <Collector::HeapType heapType> static void* heapAllocate(size_t s); 83 template <Collector::HeapType heapType> static size_t sweep(bool); 84 static const CollectorBlock* cellBlock(const JSCell*); 85 static CollectorBlock* cellBlock(JSCell*); 86 static size_t cellOffset(const JSCell*); 87 88 Collector(); 89 90 static void recordExtraCost(size_t); 91 static void markProtectedObjects(); 92 static void markMainThreadOnlyObjects(); 93 static void markCurrentThreadConservatively(); 94 static void markCurrentThreadConservativelyInternal(); 95 static void markOtherThreadConservatively(Thread*); 96 static void markStackObjectsConservatively(); 97 98 static size_t mainThreadOnlyObjectCount; 99 static bool memoryFull; 100 }; 101 102 // tunable parameters 103 template<size_t bytesPerWord> struct CellSize; 104 105 // cell size needs to be a power of two for certain optimizations in collector.cpp 106 template<> struct CellSize<sizeof(uint32_t)> { static const size_t m_value = 32; }; // 32-bit 107 template<> struct CellSize<sizeof(uint64_t)> { static const size_t m_value = 64; }; // 64-bit 108 const size_t BLOCK_SIZE = 16 * 4096; // 64k 49 static void* allocateNumber(size_t s); 50 51 static bool collect(); 52 static bool isBusy(); // true if an allocation or collection is in progress 53 54 static const size_t minExtraCostSize = 256; 55 56 static void reportExtraMemoryCost(size_t cost); 57 58 static size_t size(); 59 60 static void protect(JSValue*); 61 static void unprotect(JSValue*); 62 63 static void collectOnMainThreadOnly(JSValue*); 64 65 static size_t globalObjectCount(); 66 static size_t protectedObjectCount(); 67 static size_t protectedGlobalObjectCount(); 68 static HashCountedSet<const char*>* protectedObjectTypeCounts(); 69 70 static void registerThread(); 71 72 static void registerAsMainThread(); 73 74 static bool isCellMarked(const JSCell*); 75 static void markCell(JSCell*); 76 77 static void markStackObjectsConservatively(void* start, void* end); 78 79 private: 80 template <Collector::HeapType heapType> static void* heapAllocate(size_t s); 81 template <Collector::HeapType heapType> static size_t sweep(bool); 82 static const CollectorBlock* cellBlock(const JSCell*); 83 static CollectorBlock* cellBlock(JSCell*); 84 static size_t cellOffset(const JSCell*); 85 86 Collector(); 87 88 static void recordExtraCost(size_t); 89 static void markProtectedObjects(); 90 static void markMainThreadOnlyObjects(); 91 static void markCurrentThreadConservatively(); 92 static void markCurrentThreadConservativelyInternal(); 93 static void markOtherThreadConservatively(Thread*); 94 static void markStackObjectsConservatively(); 95 96 static size_t mainThreadOnlyObjectCount; 97 static bool memoryFull; 98 }; 99 100 // tunable parameters 101 template<size_t bytesPerWord> struct CellSize; 102 103 // cell size needs to be a power of two for certain optimizations in collector.cpp 104 template<> struct CellSize<sizeof(uint32_t)> { static const size_t m_value = 32; }; // 32-bit 105 template<> struct CellSize<sizeof(uint64_t)> { static const size_t m_value = 64; }; // 64-bit 106 const size_t BLOCK_SIZE = 16 * 4096; // 64k 107 108 // derived constants 109 const size_t BLOCK_OFFSET_MASK = BLOCK_SIZE - 1; 110 const size_t BLOCK_MASK = ~BLOCK_OFFSET_MASK; 111 const size_t MINIMUM_CELL_SIZE = CellSize<sizeof(void*)>::m_value; 112 const size_t CELL_ARRAY_LENGTH = (MINIMUM_CELL_SIZE / sizeof(double)) + (MINIMUM_CELL_SIZE % sizeof(double) != 0 ? sizeof(double) : 0); 113 const size_t CELL_SIZE = CELL_ARRAY_LENGTH * sizeof(double); 114 const size_t SMALL_CELL_SIZE = CELL_SIZE / 2; 115 const size_t CELL_MASK = CELL_SIZE - 1; 116 const size_t CELL_ALIGN_MASK = ~CELL_MASK; 117 const size_t CELLS_PER_BLOCK = (BLOCK_SIZE * 8 - sizeof(uint32_t) * 8 - sizeof(void *) * 8 - 2 * (7 + 3 * 8)) / (CELL_SIZE * 8 + 2); 118 const size_t SMALL_CELLS_PER_BLOCK = 2 * CELLS_PER_BLOCK; 119 const size_t BITMAP_SIZE = (CELLS_PER_BLOCK + 7) / 8; 120 const size_t BITMAP_WORDS = (BITMAP_SIZE + 3) / sizeof(uint32_t); 109 121 110 // derived constants 111 const size_t BLOCK_OFFSET_MASK = BLOCK_SIZE - 1; 112 const size_t BLOCK_MASK = ~BLOCK_OFFSET_MASK; 113 const size_t MINIMUM_CELL_SIZE = CellSize<sizeof(void*)>::m_value; 114 const size_t CELL_ARRAY_LENGTH = (MINIMUM_CELL_SIZE / sizeof(double)) + (MINIMUM_CELL_SIZE % sizeof(double) != 0 ? sizeof(double) : 0); 115 const size_t CELL_SIZE = CELL_ARRAY_LENGTH * sizeof(double); 116 const size_t SMALL_CELL_SIZE = CELL_SIZE / 2; 117 const size_t CELL_MASK = CELL_SIZE - 1; 118 const size_t CELL_ALIGN_MASK = ~CELL_MASK; 119 const size_t CELLS_PER_BLOCK = (BLOCK_SIZE * 8 - sizeof(uint32_t) * 8 - sizeof(void *) * 8 - 2 * (7 + 3 * 8)) / (CELL_SIZE * 8 + 2); 120 const size_t SMALL_CELLS_PER_BLOCK = 2 * CELLS_PER_BLOCK; 121 const size_t BITMAP_SIZE = (CELLS_PER_BLOCK + 7) / 8; 122 const size_t BITMAP_WORDS = (BITMAP_SIZE + 3) / sizeof(uint32_t); 122 struct CollectorBitmap { 123 uint32_t bits[BITMAP_WORDS]; 124 bool get(size_t n) const { return !!(bits[n >> 5] & (1 << (n & 0x1F))); } 125 void set(size_t n) { bits[n >> 5] |= (1 << (n & 0x1F)); } 126 void clear(size_t n) { bits[n >> 5] &= ~(1 << (n & 0x1F)); } 127 void clearAll() { memset(bits, 0, sizeof(bits)); } 128 }; 123 129 124 struct CollectorBitmap { 125 uint32_t bits[BITMAP_WORDS]; 126 bool get(size_t n) const { return !!(bits[n >> 5] & (1 << (n & 0x1F))); } 127 void set(size_t n) { bits[n >> 5] |= (1 << (n & 0x1F)); } 128 void clear(size_t n) { bits[n >> 5] &= ~(1 << (n & 0x1F)); } 129 void clearAll() { memset(bits, 0, sizeof(bits)); } 130 }; 131 132 struct CollectorCell { 133 union { 134 double memory[CELL_ARRAY_LENGTH]; 135 struct { 136 void* zeroIfFree; 137 ptrdiff_t next; 138 } freeCell; 139 } u; 140 }; 141 142 struct SmallCollectorCell { 143 union { 144 double memory[CELL_ARRAY_LENGTH / 2]; 145 struct { 146 void* zeroIfFree; 147 ptrdiff_t next; 148 } freeCell; 149 } u; 150 }; 151 152 class CollectorBlock { 153 public: 154 CollectorCell cells[CELLS_PER_BLOCK]; 155 uint32_t usedCells; 156 CollectorCell* freeList; 157 CollectorBitmap marked; 158 CollectorBitmap collectOnMainThreadOnly; 159 }; 160 161 class SmallCellCollectorBlock { 162 public: 163 SmallCollectorCell cells[SMALL_CELLS_PER_BLOCK]; 164 uint32_t usedCells; 165 SmallCollectorCell* freeList; 166 CollectorBitmap marked; 167 CollectorBitmap collectOnMainThreadOnly; 168 }; 169 170 enum OperationInProgress { NoOperation, Allocation, Collection }; 171 172 struct CollectorHeap { 173 CollectorBlock** blocks; 174 size_t numBlocks; 175 size_t usedBlocks; 176 size_t firstBlockWithPossibleSpace; 177 178 size_t numLiveObjects; 179 size_t numLiveObjectsAtLastCollect; 180 size_t extraCost; 181 182 OperationInProgress operationInProgress; 183 }; 184 185 inline const CollectorBlock* Collector::cellBlock(const JSCell* cell) 186 { 187 return reinterpret_cast<const CollectorBlock*>(reinterpret_cast<uintptr_t>(cell) & BLOCK_MASK); 188 } 189 190 inline CollectorBlock* Collector::cellBlock(JSCell* cell) 191 { 192 return const_cast<CollectorBlock*>(cellBlock(const_cast<const JSCell*>(cell))); 193 } 194 195 inline size_t Collector::cellOffset(const JSCell* cell) 196 { 197 return (reinterpret_cast<uintptr_t>(cell) & BLOCK_OFFSET_MASK) / CELL_SIZE; 198 } 199 200 inline bool Collector::isCellMarked(const JSCell* cell) 201 { 202 return cellBlock(cell)->marked.get(cellOffset(cell)); 203 } 204 205 inline void Collector::markCell(JSCell* cell) 206 { 207 cellBlock(cell)->marked.set(cellOffset(cell)); 208 } 209 210 inline void Collector::reportExtraMemoryCost(size_t cost) 211 { 212 if (cost > minExtraCostSize) 213 recordExtraCost(cost / (CELL_SIZE * 2)); 214 } 130 struct CollectorCell { 131 union { 132 double memory[CELL_ARRAY_LENGTH]; 133 struct { 134 void* zeroIfFree; 135 ptrdiff_t next; 136 } freeCell; 137 } u; 138 }; 139 140 struct SmallCollectorCell { 141 union { 142 double memory[CELL_ARRAY_LENGTH / 2]; 143 struct { 144 void* zeroIfFree; 145 ptrdiff_t next; 146 } freeCell; 147 } u; 148 }; 149 150 class CollectorBlock { 151 public: 152 CollectorCell cells[CELLS_PER_BLOCK]; 153 uint32_t usedCells; 154 CollectorCell* freeList; 155 CollectorBitmap marked; 156 CollectorBitmap collectOnMainThreadOnly; 157 }; 158 159 class SmallCellCollectorBlock { 160 public: 161 SmallCollectorCell cells[SMALL_CELLS_PER_BLOCK]; 162 uint32_t usedCells; 163 SmallCollectorCell* freeList; 164 CollectorBitmap marked; 165 CollectorBitmap collectOnMainThreadOnly; 166 }; 167 168 enum OperationInProgress { NoOperation, Allocation, Collection }; 169 170 struct CollectorHeap { 171 CollectorBlock** blocks; 172 size_t numBlocks; 173 size_t usedBlocks; 174 size_t firstBlockWithPossibleSpace; 175 176 size_t numLiveObjects; 177 size_t numLiveObjectsAtLastCollect; 178 size_t extraCost; 179 180 OperationInProgress operationInProgress; 181 }; 182 183 inline const CollectorBlock* Collector::cellBlock(const JSCell* cell) 184 { 185 return reinterpret_cast<const CollectorBlock*>(reinterpret_cast<uintptr_t>(cell) & BLOCK_MASK); 186 } 187 188 inline CollectorBlock* Collector::cellBlock(JSCell* cell) 189 { 190 return const_cast<CollectorBlock*>(cellBlock(const_cast<const JSCell*>(cell))); 191 } 192 193 inline size_t Collector::cellOffset(const JSCell* cell) 194 { 195 return (reinterpret_cast<uintptr_t>(cell) & BLOCK_OFFSET_MASK) / CELL_SIZE; 196 } 197 198 inline bool Collector::isCellMarked(const JSCell* cell) 199 { 200 return cellBlock(cell)->marked.get(cellOffset(cell)); 201 } 202 203 inline void Collector::markCell(JSCell* cell) 204 { 205 cellBlock(cell)->marked.set(cellOffset(cell)); 206 } 207 208 inline void Collector::reportExtraMemoryCost(size_t cost) 209 { 210 if (cost > minExtraCostSize) 211 recordExtraCost(cost / (CELL_SIZE * 2)); 212 } 215 213 216 214 } // namespace KJS
Note:
See TracChangeset
for help on using the changeset viewer.