Ignore:
Timestamp:
Sep 14, 2017, 4:08:54 PM (8 years ago)
Author:
[email protected]
Message:

AddressSanitizer: stack-buffer-underflow in JSC::Probe::Page::Page
https://bugs.webkit.org/show_bug.cgi?id=176874
<rdar://problem/34436415>

Reviewed by Saam Barati.

Source/JavaScriptCore:

  1. Make Probe::Stack play nice with ASan by:
  1. using a local memcpy implementation that suppresses ASan on ASan builds. We don't want to use std:memcpy() which validates stack memory because we are intentionally copying stack memory beyond the current frame.
  1. changing Stack::s_chunkSize to equal sizeof(uintptr_t) on ASan builds. This ensures that Page::flushWrites() only writes stack memory that was modified by a probe. The probes should only modify stack memory that belongs to JSC stack data structures. We don't want to inadvertently modify adjacent words that may belong to ASan (which may happen if s_chunkSize is larger than sizeof(uintptr_t)).
  1. fixing a bug in Page dirtyBits management for when the size of the value to write is greater than s_chunkSize. The fix in generic, but in practice, this currently only manifests on 32-bit ASan builds because sizeof(uintptr_t) and s_chunkSize are 32-bit, and we may write 64-bit values.
  1. making Page::m_dirtyBits 64 bits always. This maximizes the number of s_chunksPerPage we can have even on ASan builds.
  1. Fixed the bottom most Probe::Context and Probe::Stack get/set methods to use std::memcpy to avoid strict aliasing issues.
  1. Optimized the implementation of Page::physicalAddressFor().
  1. Optimized the implementation of Stack::set() in the recording of the low watermark. We just record the lowest raw pointer now, and only compute the alignment to its chuck boundary later when the low watermark is requested.
  1. Changed a value in testmasm to make the test less vulnerable to rounding issues.

No new test needed because this is already covered by testmasm with ASan enabled.

  • assembler/ProbeContext.h:

(JSC::Probe::CPUState::gpr const):
(JSC::Probe::CPUState::spr const):
(JSC::Probe::Context::gpr):
(JSC::Probe::Context::spr):
(JSC::Probe::Context::fpr):
(JSC::Probe::Context::gprName):
(JSC::Probe::Context::sprName):
(JSC::Probe::Context::fprName):
(JSC::Probe::Context::gpr const):
(JSC::Probe::Context::spr const):
(JSC::Probe::Context::fpr const):
(JSC::Probe::Context::pc):
(JSC::Probe::Context::fp):
(JSC::Probe::Context::sp):
(JSC::Probe:: const): Deleted.

  • assembler/ProbeStack.cpp:

(JSC::Probe::copyStackPage):
(JSC::Probe::Page::Page):
(JSC::Probe::Page::flushWrites):

  • assembler/ProbeStack.h:

(JSC::Probe::Page::get):
(JSC::Probe::Page::set):
(JSC::Probe::Page::dirtyBitFor):
(JSC::Probe::Page::physicalAddressFor):
(JSC::Probe::Stack::lowWatermark):
(JSC::Probe::Stack::get):
(JSC::Probe::Stack::set):

  • assembler/testmasm.cpp:

(JSC::testProbeModifiesStackValues):

Source/WTF:

Added a convenience version of roundUpToMultipleOf() so that it can be applied to
pointers without the client having to cast explicitly.

  • wtf/StdLibExtras.h:

(WTF::roundUpToMultipleOf):

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/JavaScriptCore/assembler/ProbeStack.h

    r222009 r222058  
    2626#pragma once
    2727
     28#include "CPU.h"
    2829#include <wtf/HashMap.h>
    2930#include <wtf/StdLibExtras.h>
     
    5758    T get(void* logicalAddress)
    5859    {
    59         return *physicalAddressFor<T*>(logicalAddress);
     60        void* from = physicalAddressFor(logicalAddress);
     61        typename std::remove_const<T>::type to { };
     62        std::memcpy(&to, from, sizeof(to)); // Use std::memcpy to avoid strict aliasing issues.
     63        return to;
     64    }
     65    template<typename T>
     66    T get(void* logicalBaseAddress, ptrdiff_t offset)
     67    {
     68        return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset);
    6069    }
    6170
     
    6372    void set(void* logicalAddress, T value)
    6473    {
    65         m_dirtyBits |= dirtyBitFor(logicalAddress);
    66         *physicalAddressFor<T*>(logicalAddress) = value;
     74        if (sizeof(T) <= s_chunkSize)
     75            m_dirtyBits |= dirtyBitFor(logicalAddress);
     76        else {
     77            size_t numberOfChunks = roundUpToMultipleOf<sizeof(T)>(s_chunkSize) / s_chunkSize;
     78            uint8_t* dirtyAddress = reinterpret_cast<uint8_t*>(logicalAddress);
     79            for (size_t i = 0; i < numberOfChunks; ++i, dirtyAddress += s_chunkSize)
     80                m_dirtyBits |= dirtyBitFor(dirtyAddress);
     81        }
     82        void* to = physicalAddressFor(logicalAddress);
     83        std::memcpy(to, &value, sizeof(T)); // Use std::memcpy to avoid strict aliasing issues.
     84    }
     85    template<typename T>
     86    void set(void* logicalBaseAddress, ptrdiff_t offset, T value)
     87    {
     88        set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value);
    6789    }
    6890
     
    7597
    7698private:
    77     uintptr_t dirtyBitFor(void* logicalAddress)
     99    uint64_t dirtyBitFor(void* logicalAddress)
    78100    {
    79101        uintptr_t offset = reinterpret_cast<uintptr_t>(logicalAddress) & s_pageMask;
    80         return static_cast<uintptr_t>(1) << (offset >> s_chunkSizeShift);
    81     }
    82 
    83     template<typename T, typename = typename std::enable_if<std::is_pointer<T>::value>::type>
    84     T physicalAddressFor(void* logicalAddress)
    85     {
    86         uintptr_t offset = reinterpret_cast<uintptr_t>(logicalAddress) & s_pageMask;
    87         void* physicalAddress = reinterpret_cast<uint8_t*>(&m_buffer) + offset;
    88         return reinterpret_cast<T>(physicalAddress);
     102        return static_cast<uint64_t>(1) << (offset >> s_chunkSizeShift);
     103    }
     104
     105    void* physicalAddressFor(void* logicalAddress)
     106    {
     107        return reinterpret_cast<uint8_t*>(logicalAddress) + m_physicalAddressOffset;
    89108    }
    90109
     
    92111
    93112    void* m_baseLogicalAddress { nullptr };
    94     uintptr_t m_dirtyBits { 0 };
    95 
     113    ptrdiff_t m_physicalAddressOffset;
     114    uint64_t m_dirtyBits { 0 };
     115
     116#if ASAN_ENABLED
     117    // The ASan stack may contain poisoned words that may be manipulated at ASan's discretion.
     118    // We would never touch those words anyway, but let's ensure that the page size is set
     119    // such that the chunk size is guaranteed to be exactly sizeof(uintptr_t) so that we won't
     120    // inadvertently overwrite one of ASan's words on the stack when we copy back the dirty
     121    // chunks.
     122    // FIXME: we should consider using the same page size for both ASan and non-ASan builds.
     123    // https://bugs.webkit.org/show_bug.cgi?id=176961
     124    static constexpr size_t s_pageSize = 64 * sizeof(uintptr_t); // because there are 64 bits in m_dirtyBits.
     125#else // not ASAN_ENABLED
    96126    static constexpr size_t s_pageSize = 1024;
     127#endif // ASAN_ENABLED
    97128    static constexpr uintptr_t s_pageMask = s_pageSize - 1;
    98     static constexpr size_t s_chunksPerPage = sizeof(uintptr_t) * 8; // sizeof(m_dirtyBits) in bits.
     129    static constexpr size_t s_chunksPerPage = sizeof(uint64_t) * 8; // number of bits in m_dirtyBits.
    99130    static constexpr size_t s_chunkSize = s_pageSize / s_chunksPerPage;
    100131    static constexpr uintptr_t s_chunkMask = s_chunkSize - 1;
    101 #if USE(JSVALUE64)
     132#if ASAN_ENABLED
     133    static_assert(s_chunkSize == sizeof(uintptr_t), "bad chunkSizeShift");
     134    static constexpr size_t s_chunkSizeShift = is64Bit() ? 3 : 2;
     135#else // no ASAN_ENABLED
    102136    static constexpr size_t s_chunkSizeShift = 4;
    103 #else
    104     static constexpr size_t s_chunkSizeShift = 5;
    105 #endif
     137#endif // ASAN_ENABLED
    106138    static_assert(s_pageSize > s_chunkSize, "bad pageSize or chunkSize");
    107139    static_assert(s_chunkSize == (1 << s_chunkSizeShift), "bad chunkSizeShift");
    108 
    109140
    110141    typedef typename std::aligned_storage<s_pageSize, std::alignment_of<uintptr_t>::value>::type Buffer;
     
    121152    Stack(Stack&& other);
    122153
    123     void* lowWatermark() { return m_lowWatermark; }
    124 
    125     template<typename T>
    126     typename std::enable_if<!std::is_same<double, typename std::remove_cv<T>::type>::value, T>::type get(void* address)
    127     {
    128         Page* page = pageFor(address);
    129         return page->get<T>(address);
    130     }
    131 
    132     template<typename T, typename = typename std::enable_if<!std::is_same<double, typename std::remove_cv<T>::type>::value>::type>
    133     void set(void* address, T value)
    134     {
    135         Page* page = pageFor(address);
    136         page->set<T>(address, value);
    137 
     154    void* lowWatermark()
     155    {
    138156        // We use the chunkAddress for the low watermark because we'll be doing write backs
    139157        // to the stack in increments of chunks. Hence, we'll treat the lowest address of
    140158        // the chunk as the low watermark of any given set address.
    141         void* chunkAddress = Page::chunkAddressFor(address);
    142         if (chunkAddress < m_lowWatermark)
    143             m_lowWatermark = chunkAddress;
    144     }
    145 
    146     template<typename T>
    147     typename std::enable_if<std::is_same<double, typename std::remove_cv<T>::type>::value, T>::type get(void* address)
     159        return Page::chunkAddressFor(m_lowWatermark);
     160    }
     161
     162    template<typename T>
     163    T get(void* address)
    148164    {
    149165        Page* page = pageFor(address);
    150         return bitwise_cast<double>(page->get<uint64_t>(address));
    151     }
    152 
    153     template<typename T, typename = typename std::enable_if<std::is_same<double, typename std::remove_cv<T>::type>::value>::type>
    154     void set(void* address, double value)
    155     {
    156         set<uint64_t>(address, bitwise_cast<uint64_t>(value));
     166        return page->get<T>(address);
     167    }
     168    template<typename T>
     169    T get(void* logicalBaseAddress, ptrdiff_t offset)
     170    {
     171        return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset);
     172    }
     173
     174    template<typename T>
     175    void set(void* address, T value)
     176    {
     177        Page* page = pageFor(address);
     178        page->set<T>(address, value);
     179
     180        if (address < m_lowWatermark)
     181            m_lowWatermark = address;
     182    }
     183
     184    template<typename T>
     185    void set(void* logicalBaseAddress, ptrdiff_t offset, T value)
     186    {
     187        set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value);
    157188    }
    158189
Note: See TracChangeset for help on using the changeset viewer.