Ignore:
Timestamp:
Sep 14, 2017, 4:08:54 PM (8 years ago)
Author:
[email protected]
Message:

AddressSanitizer: stack-buffer-underflow in JSC::Probe::Page::Page
https://bugs.webkit.org/show_bug.cgi?id=176874
<rdar://problem/34436415>

Reviewed by Saam Barati.

Source/JavaScriptCore:

  1. Make Probe::Stack play nice with ASan by:
  1. using a local memcpy implementation that suppresses ASan on ASan builds. We don't want to use std:memcpy() which validates stack memory because we are intentionally copying stack memory beyond the current frame.
  1. changing Stack::s_chunkSize to equal sizeof(uintptr_t) on ASan builds. This ensures that Page::flushWrites() only writes stack memory that was modified by a probe. The probes should only modify stack memory that belongs to JSC stack data structures. We don't want to inadvertently modify adjacent words that may belong to ASan (which may happen if s_chunkSize is larger than sizeof(uintptr_t)).
  1. fixing a bug in Page dirtyBits management for when the size of the value to write is greater than s_chunkSize. The fix in generic, but in practice, this currently only manifests on 32-bit ASan builds because sizeof(uintptr_t) and s_chunkSize are 32-bit, and we may write 64-bit values.
  1. making Page::m_dirtyBits 64 bits always. This maximizes the number of s_chunksPerPage we can have even on ASan builds.
  1. Fixed the bottom most Probe::Context and Probe::Stack get/set methods to use std::memcpy to avoid strict aliasing issues.
  1. Optimized the implementation of Page::physicalAddressFor().
  1. Optimized the implementation of Stack::set() in the recording of the low watermark. We just record the lowest raw pointer now, and only compute the alignment to its chuck boundary later when the low watermark is requested.
  1. Changed a value in testmasm to make the test less vulnerable to rounding issues.

No new test needed because this is already covered by testmasm with ASan enabled.

  • assembler/ProbeContext.h:

(JSC::Probe::CPUState::gpr const):
(JSC::Probe::CPUState::spr const):
(JSC::Probe::Context::gpr):
(JSC::Probe::Context::spr):
(JSC::Probe::Context::fpr):
(JSC::Probe::Context::gprName):
(JSC::Probe::Context::sprName):
(JSC::Probe::Context::fprName):
(JSC::Probe::Context::gpr const):
(JSC::Probe::Context::spr const):
(JSC::Probe::Context::fpr const):
(JSC::Probe::Context::pc):
(JSC::Probe::Context::fp):
(JSC::Probe::Context::sp):
(JSC::Probe:: const): Deleted.

  • assembler/ProbeStack.cpp:

(JSC::Probe::copyStackPage):
(JSC::Probe::Page::Page):
(JSC::Probe::Page::flushWrites):

  • assembler/ProbeStack.h:

(JSC::Probe::Page::get):
(JSC::Probe::Page::set):
(JSC::Probe::Page::dirtyBitFor):
(JSC::Probe::Page::physicalAddressFor):
(JSC::Probe::Stack::lowWatermark):
(JSC::Probe::Stack::get):
(JSC::Probe::Stack::set):

  • assembler/testmasm.cpp:

(JSC::testProbeModifiesStackValues):

Source/WTF:

Added a convenience version of roundUpToMultipleOf() so that it can be applied to
pointers without the client having to cast explicitly.

  • wtf/StdLibExtras.h:

(WTF::roundUpToMultipleOf):

Location:
trunk/Source/JavaScriptCore/assembler
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/JavaScriptCore/assembler/ProbeContext.h

    r222009 r222058  
    4646    inline double& fpr(FPRegisterID);
    4747
    48     template<typename T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
    49     T gpr(RegisterID) const;
    50     template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type* = nullptr>
    51     T gpr(RegisterID) const;
    52     template<typename T, typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
    53     T spr(SPRegisterID) const;
    54     template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type* = nullptr>
    55     T spr(SPRegisterID) const;
     48    template<typename T> T gpr(RegisterID) const;
     49    template<typename T> T spr(SPRegisterID) const;
    5650    template<typename T> T fpr(FPRegisterID) const;
    5751
     
    8680}
    8781
    88 template<typename T, typename std::enable_if<std::is_integral<T>::value>::type*>
     82template<typename T>
    8983T CPUState::gpr(RegisterID id) const
    9084{
    9185    CPUState* cpu = const_cast<CPUState*>(this);
    92     return static_cast<T>(cpu->gpr(id));
    93 }
    94 
    95 template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type*>
    96 T CPUState::gpr(RegisterID id) const
    97 {
    98     CPUState* cpu = const_cast<CPUState*>(this);
    99     return reinterpret_cast<T>(cpu->gpr(id));
    100 }
    101 
    102 template<typename T, typename std::enable_if<std::is_integral<T>::value>::type*>
     86    auto& from = cpu->gpr(id);
     87    typename std::remove_const<T>::type to { };
     88    std::memcpy(&to, &from, sizeof(to)); // Use std::memcpy to avoid strict aliasing issues.
     89    return to;
     90}
     91
     92template<typename T>
    10393T CPUState::spr(SPRegisterID id) const
    10494{
    10595    CPUState* cpu = const_cast<CPUState*>(this);
    106     return static_cast<T>(cpu->spr(id));
    107 }
    108 
    109 template<typename T, typename std::enable_if<std::is_pointer<T>::value>::type*>
    110 T CPUState::spr(SPRegisterID id) const
    111 {
    112     CPUState* cpu = const_cast<CPUState*>(this);
    113     return reinterpret_cast<T>(cpu->spr(id));
     96    auto& from = cpu->spr(id);
     97    typename std::remove_const<T>::type to { };
     98    std::memcpy(&to, &from, sizeof(to)); // Use std::memcpy to avoid strict aliasing issues.
     99    return to;
    114100}
    115101
     
    211197    { }
    212198
    213     uintptr_t& gpr(RegisterID id) { return m_state->cpu.gpr(id); }
    214     uintptr_t& spr(SPRegisterID id) { return m_state->cpu.spr(id); }
    215     double& fpr(FPRegisterID id) { return m_state->cpu.fpr(id); }
    216     const char* gprName(RegisterID id) { return m_state->cpu.gprName(id); }
    217     const char* sprName(SPRegisterID id) { return m_state->cpu.sprName(id); }
    218     const char* fprName(FPRegisterID id) { return m_state->cpu.fprName(id); }
    219 
    220     void*& pc() { return m_state->cpu.pc(); }
    221     void*& fp() { return m_state->cpu.fp(); }
    222     void*& sp() { return m_state->cpu.sp(); }
    223 
    224     template<typename T> T pc() { return m_state->cpu.pc<T>(); }
    225     template<typename T> T fp() { return m_state->cpu.fp<T>(); }
    226     template<typename T> T sp() { return m_state->cpu.sp<T>(); }
     199    uintptr_t& gpr(RegisterID id) { return cpu.gpr(id); }
     200    uintptr_t& spr(SPRegisterID id) { return cpu.spr(id); }
     201    double& fpr(FPRegisterID id) { return cpu.fpr(id); }
     202    const char* gprName(RegisterID id) { return cpu.gprName(id); }
     203    const char* sprName(SPRegisterID id) { return cpu.sprName(id); }
     204    const char* fprName(FPRegisterID id) { return cpu.fprName(id); }
     205
     206    template<typename T> T gpr(RegisterID id) const { return cpu.gpr<T>(id); }
     207    template<typename T> T spr(SPRegisterID id) const { return cpu.spr<T>(id); }
     208    template<typename T> T fpr(FPRegisterID id) const { return cpu.fpr<T>(id); }
     209
     210    void*& pc() { return cpu.pc(); }
     211    void*& fp() { return cpu.fp(); }
     212    void*& sp() { return cpu.sp(); }
     213
     214    template<typename T> T pc() { return cpu.pc<T>(); }
     215    template<typename T> T fp() { return cpu.fp<T>(); }
     216    template<typename T> T sp() { return cpu.sp<T>(); }
    227217
    228218    Stack& stack()
  • trunk/Source/JavaScriptCore/assembler/ProbeStack.cpp

    r222009 r222058  
    2828
    2929#include <memory>
     30#include <wtf/StdLibExtras.h>
    3031
    3132#if ENABLE(MASM_PROBE)
     
    3435namespace Probe {
    3536
     37#if ASAN_ENABLED
     38// FIXME: we should consider using the copy function for both ASan and non-ASan builds.
     39// https://bugs.webkit.org/show_bug.cgi?id=176961
     40SUPPRESS_ASAN
     41static void copyStackPage(void* dst, void* src, size_t size)
     42{
     43    ASSERT(roundUpToMultipleOf<sizeof(uintptr_t)>(dst) == dst);
     44    ASSERT(roundUpToMultipleOf<sizeof(uintptr_t)>(src) == src);
     45   
     46    uintptr_t* dstPointer = reinterpret_cast<uintptr_t*>(dst);
     47    uintptr_t* srcPointer = reinterpret_cast<uintptr_t*>(src);
     48    for (; size; size -= sizeof(uintptr_t))
     49        *dstPointer++ = *srcPointer++;
     50}
     51#else
     52#define copyStackPage(dst, src, size) std::memcpy(dst, src, size);
     53#endif
     54
    3655Page::Page(void* baseAddress)
    3756    : m_baseLogicalAddress(baseAddress)
     57    , m_physicalAddressOffset(reinterpret_cast<uint8_t*>(&m_buffer) - reinterpret_cast<uint8_t*>(baseAddress))
    3858{
    39     memcpy(&m_buffer, baseAddress, s_pageSize);
     59    copyStackPage(&m_buffer, baseAddress, s_pageSize);
    4060}
    4161
    4262void Page::flushWrites()
    4363{
    44     uintptr_t dirtyBits = m_dirtyBits;
     64    uint64_t dirtyBits = m_dirtyBits;
    4565    size_t offset = 0;
    4666    while (dirtyBits) {
     
    5777            uint8_t* src = reinterpret_cast<uint8_t*>(&m_buffer) + startOffset;
    5878            uint8_t* dst = reinterpret_cast<uint8_t*>(m_baseLogicalAddress) + startOffset;
    59             memcpy(dst, src, size);
     79            copyStackPage(dst, src, size);
    6080        }
    6181        dirtyBits = dirtyBits >> 1;
  • trunk/Source/JavaScriptCore/assembler/ProbeStack.h

    r222009 r222058  
    2626#pragma once
    2727
     28#include "CPU.h"
    2829#include <wtf/HashMap.h>
    2930#include <wtf/StdLibExtras.h>
     
    5758    T get(void* logicalAddress)
    5859    {
    59         return *physicalAddressFor<T*>(logicalAddress);
     60        void* from = physicalAddressFor(logicalAddress);
     61        typename std::remove_const<T>::type to { };
     62        std::memcpy(&to, from, sizeof(to)); // Use std::memcpy to avoid strict aliasing issues.
     63        return to;
     64    }
     65    template<typename T>
     66    T get(void* logicalBaseAddress, ptrdiff_t offset)
     67    {
     68        return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset);
    6069    }
    6170
     
    6372    void set(void* logicalAddress, T value)
    6473    {
    65         m_dirtyBits |= dirtyBitFor(logicalAddress);
    66         *physicalAddressFor<T*>(logicalAddress) = value;
     74        if (sizeof(T) <= s_chunkSize)
     75            m_dirtyBits |= dirtyBitFor(logicalAddress);
     76        else {
     77            size_t numberOfChunks = roundUpToMultipleOf<sizeof(T)>(s_chunkSize) / s_chunkSize;
     78            uint8_t* dirtyAddress = reinterpret_cast<uint8_t*>(logicalAddress);
     79            for (size_t i = 0; i < numberOfChunks; ++i, dirtyAddress += s_chunkSize)
     80                m_dirtyBits |= dirtyBitFor(dirtyAddress);
     81        }
     82        void* to = physicalAddressFor(logicalAddress);
     83        std::memcpy(to, &value, sizeof(T)); // Use std::memcpy to avoid strict aliasing issues.
     84    }
     85    template<typename T>
     86    void set(void* logicalBaseAddress, ptrdiff_t offset, T value)
     87    {
     88        set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value);
    6789    }
    6890
     
    7597
    7698private:
    77     uintptr_t dirtyBitFor(void* logicalAddress)
     99    uint64_t dirtyBitFor(void* logicalAddress)
    78100    {
    79101        uintptr_t offset = reinterpret_cast<uintptr_t>(logicalAddress) & s_pageMask;
    80         return static_cast<uintptr_t>(1) << (offset >> s_chunkSizeShift);
    81     }
    82 
    83     template<typename T, typename = typename std::enable_if<std::is_pointer<T>::value>::type>
    84     T physicalAddressFor(void* logicalAddress)
    85     {
    86         uintptr_t offset = reinterpret_cast<uintptr_t>(logicalAddress) & s_pageMask;
    87         void* physicalAddress = reinterpret_cast<uint8_t*>(&m_buffer) + offset;
    88         return reinterpret_cast<T>(physicalAddress);
     102        return static_cast<uint64_t>(1) << (offset >> s_chunkSizeShift);
     103    }
     104
     105    void* physicalAddressFor(void* logicalAddress)
     106    {
     107        return reinterpret_cast<uint8_t*>(logicalAddress) + m_physicalAddressOffset;
    89108    }
    90109
     
    92111
    93112    void* m_baseLogicalAddress { nullptr };
    94     uintptr_t m_dirtyBits { 0 };
    95 
     113    ptrdiff_t m_physicalAddressOffset;
     114    uint64_t m_dirtyBits { 0 };
     115
     116#if ASAN_ENABLED
     117    // The ASan stack may contain poisoned words that may be manipulated at ASan's discretion.
     118    // We would never touch those words anyway, but let's ensure that the page size is set
     119    // such that the chunk size is guaranteed to be exactly sizeof(uintptr_t) so that we won't
     120    // inadvertently overwrite one of ASan's words on the stack when we copy back the dirty
     121    // chunks.
     122    // FIXME: we should consider using the same page size for both ASan and non-ASan builds.
     123    // https://bugs.webkit.org/show_bug.cgi?id=176961
     124    static constexpr size_t s_pageSize = 64 * sizeof(uintptr_t); // because there are 64 bits in m_dirtyBits.
     125#else // not ASAN_ENABLED
    96126    static constexpr size_t s_pageSize = 1024;
     127#endif // ASAN_ENABLED
    97128    static constexpr uintptr_t s_pageMask = s_pageSize - 1;
    98     static constexpr size_t s_chunksPerPage = sizeof(uintptr_t) * 8; // sizeof(m_dirtyBits) in bits.
     129    static constexpr size_t s_chunksPerPage = sizeof(uint64_t) * 8; // number of bits in m_dirtyBits.
    99130    static constexpr size_t s_chunkSize = s_pageSize / s_chunksPerPage;
    100131    static constexpr uintptr_t s_chunkMask = s_chunkSize - 1;
    101 #if USE(JSVALUE64)
     132#if ASAN_ENABLED
     133    static_assert(s_chunkSize == sizeof(uintptr_t), "bad chunkSizeShift");
     134    static constexpr size_t s_chunkSizeShift = is64Bit() ? 3 : 2;
     135#else // no ASAN_ENABLED
    102136    static constexpr size_t s_chunkSizeShift = 4;
    103 #else
    104     static constexpr size_t s_chunkSizeShift = 5;
    105 #endif
     137#endif // ASAN_ENABLED
    106138    static_assert(s_pageSize > s_chunkSize, "bad pageSize or chunkSize");
    107139    static_assert(s_chunkSize == (1 << s_chunkSizeShift), "bad chunkSizeShift");
    108 
    109140
    110141    typedef typename std::aligned_storage<s_pageSize, std::alignment_of<uintptr_t>::value>::type Buffer;
     
    121152    Stack(Stack&& other);
    122153
    123     void* lowWatermark() { return m_lowWatermark; }
    124 
    125     template<typename T>
    126     typename std::enable_if<!std::is_same<double, typename std::remove_cv<T>::type>::value, T>::type get(void* address)
    127     {
    128         Page* page = pageFor(address);
    129         return page->get<T>(address);
    130     }
    131 
    132     template<typename T, typename = typename std::enable_if<!std::is_same<double, typename std::remove_cv<T>::type>::value>::type>
    133     void set(void* address, T value)
    134     {
    135         Page* page = pageFor(address);
    136         page->set<T>(address, value);
    137 
     154    void* lowWatermark()
     155    {
    138156        // We use the chunkAddress for the low watermark because we'll be doing write backs
    139157        // to the stack in increments of chunks. Hence, we'll treat the lowest address of
    140158        // the chunk as the low watermark of any given set address.
    141         void* chunkAddress = Page::chunkAddressFor(address);
    142         if (chunkAddress < m_lowWatermark)
    143             m_lowWatermark = chunkAddress;
    144     }
    145 
    146     template<typename T>
    147     typename std::enable_if<std::is_same<double, typename std::remove_cv<T>::type>::value, T>::type get(void* address)
     159        return Page::chunkAddressFor(m_lowWatermark);
     160    }
     161
     162    template<typename T>
     163    T get(void* address)
    148164    {
    149165        Page* page = pageFor(address);
    150         return bitwise_cast<double>(page->get<uint64_t>(address));
    151     }
    152 
    153     template<typename T, typename = typename std::enable_if<std::is_same<double, typename std::remove_cv<T>::type>::value>::type>
    154     void set(void* address, double value)
    155     {
    156         set<uint64_t>(address, bitwise_cast<uint64_t>(value));
     166        return page->get<T>(address);
     167    }
     168    template<typename T>
     169    T get(void* logicalBaseAddress, ptrdiff_t offset)
     170    {
     171        return get<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset);
     172    }
     173
     174    template<typename T>
     175    void set(void* address, T value)
     176    {
     177        Page* page = pageFor(address);
     178        page->set<T>(address, value);
     179
     180        if (address < m_lowWatermark)
     181            m_lowWatermark = address;
     182    }
     183
     184    template<typename T>
     185    void set(void* logicalBaseAddress, ptrdiff_t offset, T value)
     186    {
     187        set<T>(reinterpret_cast<uint8_t*>(logicalBaseAddress) + offset, value);
    157188    }
    158189
  • trunk/Source/JavaScriptCore/assembler/testmasm.cpp

    r221012 r222058  
    601601            uintptr_t* p = reinterpret_cast<uintptr_t*>(newSP);
    602602            int count = 0;
    603             stack.set<double>(p++, 1.23456789);
     603            stack.set<double>(p++, 1.234567);
    604604            if (is32Bit())
    605605                p++; // On 32-bit targets, a double takes up 2 uintptr_t.
     
    632632            uintptr_t* p = reinterpret_cast<uintptr_t*>(newSP);
    633633            int count = 0;
    634             CHECK_EQ(stack.get<double>(p++), 1.23456789);
     634            CHECK_EQ(stack.get<double>(p++), 1.234567);
    635635            if (is32Bit())
    636636                p++; // On 32-bit targets, a double takes up 2 uintptr_t.
Note: See TracChangeset for help on using the changeset viewer.