Ignore:
Timestamp:
Dec 22, 2008, 11:08:59 PM (16 years ago)
Author:
[email protected]
Message:

2008-12-22 Gavin Barraclough <[email protected]>

Reviewed by Oliver Hunt.

Fix rounding / bounds / signed comparison bug in ExecutableAllocator.

ExecutableAllocator::alloc assumed that m_freePtr would be aligned. This was
not always true, since the first allocation from an additional pool would not
be rounded up. Subsequent allocations would be unaligned, and too much memory
could be erroneously allocated from the pool, when the size requested was
available, but the size rounded up to word granularity was not available in the
pool. This may result in the value of m_freePtr being greater than m_end.

Under these circumstances, the unsigned check for space will always pass,
resulting in pointers to memory outside of the arena being returned, and
ultimately segfaulty goodness when attempting to memcpy the hot freshly jitted
code from the AssemblerBuffer.

https://bugs.webkit.org/show_bug.cgi?id=22974
... and probably many, many more.

  • jit/ExecutableAllocator.h: (JSC::ExecutablePool::alloc): (JSC::ExecutablePool::roundUpAllocationSize): (JSC::ExecutablePool::ExecutablePool): (JSC::ExecutablePool::poolAllocate):
File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/JavaScriptCore/jit/ExecutableAllocator.h

    r39083 r39450  
    3636#include <limits>
    3737
    38 #define JIT_ALLOCATOR_PAGE_MASK (ExecutableAllocator::pageSize - 1)
     38#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
    3939#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
    4040
     
    5757    void* alloc(size_t n)
    5858    {
    59         if (n < static_cast<size_t>(m_end - m_freePtr)) {
    60             char* result = m_freePtr;
    61             // ensure m_freePtr is word aligned.
    62             m_freePtr += n + (sizeof(void*) - n & (sizeof(void*) - 1));
     59        ASSERT(m_freePtr <= m_end);
     60
     61        // Round 'n' up to a multiple of word size; if all allocations are of
     62        // word sized quantities, then all subsequent allocations will be aligned.
     63        n = roundUpAllocationSize(n, sizeof(void*));
     64
     65        if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
     66            void* result = m_freePtr;
     67            m_freePtr += n;
    6368            return result;
    6469        }
     
    8287    static void systemRelease(const Allocation& alloc);
    8388
    84     ExecutablePool(size_t n)
     89    inline size_t roundUpAllocationSize(size_t request, size_t granularity)
    8590    {
    86         size_t allocSize = sizeForAllocation(n);
    87         Allocation mem = systemAlloc(allocSize);
    88         m_pools.append(mem);
    89         m_freePtr = mem.pages;
    90         if (!m_freePtr)
    91             CRASH(); // Failed to allocate
    92         m_end = m_freePtr + allocSize;
     91        if ((std::numeric_limits<size_t>::max() - granularity) <= request)
     92            CRASH(); // Allocation is too large
     93       
     94        // Round up to next page boundary
     95        size_t size = request + (granularity - 1);
     96        size = size & ~(granularity - 1);
     97        ASSERT(size >= request);
     98        return size;
    9399    }
    94100
    95     static inline size_t sizeForAllocation(size_t request);
     101    ExecutablePool(size_t n);
    96102
    97     void* poolAllocate(size_t n)
    98     {
    99         size_t allocSize = sizeForAllocation(n);
    100        
    101         Allocation result = systemAlloc(allocSize);
    102         if (!result.pages)
    103             CRASH(); // Failed to allocate
    104        
    105         ASSERT(m_end >= m_freePtr);
    106         if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
    107             // Replace allocation pool
    108             m_freePtr = result.pages + n;
    109             m_end = result.pages + allocSize;
    110         }
    111 
    112         m_pools.append(result);
    113         return result.pages;
    114     }
     103    void* poolAllocate(size_t n);
    115104
    116105    char* m_freePtr;
     
    154143};
    155144
    156 inline size_t ExecutablePool::sizeForAllocation(size_t request)
     145inline ExecutablePool::ExecutablePool(size_t n)
    157146{
    158     if ((std::numeric_limits<size_t>::max() - ExecutableAllocator::pageSize) <= request)
    159         CRASH(); // Allocation is too large
     147    size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
     148    Allocation mem = systemAlloc(allocSize);
     149    m_pools.append(mem);
     150    m_freePtr = mem.pages;
     151    if (!m_freePtr)
     152        CRASH(); // Failed to allocate
     153    m_end = m_freePtr + allocSize;
     154}
     155
     156inline void* ExecutablePool::poolAllocate(size_t n)
     157{
     158    size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
    160159   
    161     // Round up to next page boundary
    162     size_t size = request + JIT_ALLOCATOR_PAGE_MASK;
    163     size = size & ~JIT_ALLOCATOR_PAGE_MASK;
    164     ASSERT(size >= request);
    165     return size;
     160    Allocation result = systemAlloc(allocSize);
     161    if (!result.pages)
     162        CRASH(); // Failed to allocate
     163   
     164    ASSERT(m_end >= m_freePtr);
     165    if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
     166        // Replace allocation pool
     167        m_freePtr = result.pages + n;
     168        m_end = result.pages + allocSize;
     169    }
     170
     171    m_pools.append(result);
     172    return result.pages;
    166173}
    167174
Note: See TracChangeset for help on using the changeset viewer.