// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/renderer/v8_unwinder.h" #include #include #include #include "build/build_config.h" #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) // V8 requires the embedder to establish the architecture define. #define V8_TARGET_ARCH_ARM 1 #include "v8/include/v8-unwinder-state.h" #endif namespace { class V8Module : public base::ModuleCache::Module { public: enum CodeRangeType { kEmbedded, kNonEmbedded }; V8Module(const v8::MemoryRange& memory_range, CodeRangeType code_range_type) : memory_range_(memory_range), code_range_type_(code_range_type) {} V8Module(const V8Module&) = delete; V8Module& operator=(const V8Module&) = delete; // ModuleCache::Module uintptr_t GetBaseAddress() const override { return reinterpret_cast(memory_range_.start); } std::string GetId() const override { return code_range_type_ == kEmbedded ? V8Unwinder::kV8EmbeddedCodeRangeBuildId : V8Unwinder::kV8CodeRangeBuildId; } base::FilePath GetDebugBasename() const override { return base::FilePath().AppendASCII(code_range_type_ == kEmbedded ? "V8 Embedded Code Range" : "V8 Code Range"); } size_t GetSize() const override { return memory_range_.length_in_bytes; } bool IsNative() const override { return false; } private: const v8::MemoryRange memory_range_; const CodeRangeType code_range_type_; }; // Heterogeneous comparator for MemoryRanges and Modules. Compares on both // base address and size because the module sizes can be updated while the // base address remains the same. struct MemoryRangeModuleCompare { bool operator()(const v8::MemoryRange& range, const base::ModuleCache::Module* module) const { return std::make_pair(reinterpret_cast(range.start), range.length_in_bytes) < std::make_pair(module->GetBaseAddress(), module->GetSize()); } bool operator()(const base::ModuleCache::Module* module, const v8::MemoryRange& range) const { return std::make_pair(module->GetBaseAddress(), module->GetSize()) < std::make_pair(reinterpret_cast(range.start), range.length_in_bytes); } bool operator()(const v8::MemoryRange& a, const v8::MemoryRange& b) const { return std::make_pair(a.start, a.length_in_bytes) < std::make_pair(b.start, b.length_in_bytes); } }; v8::MemoryRange GetEmbeddedCodeRange(v8::Isolate* isolate) { v8::MemoryRange range; isolate->GetEmbeddedCodeRange(&range.start, &range.length_in_bytes); return range; } void CopyCalleeSavedRegisterFromRegisterContext( const base::RegisterContext& register_context, v8::CalleeSavedRegisters* callee_saved_registers) { #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) // ARM requires callee-saved registers to be restored: // https://crbug.com/v8/10799. DCHECK(callee_saved_registers); callee_saved_registers->arm_r4 = reinterpret_cast(register_context.arm_r4); callee_saved_registers->arm_r5 = reinterpret_cast(register_context.arm_r5); callee_saved_registers->arm_r6 = reinterpret_cast(register_context.arm_r6); callee_saved_registers->arm_r7 = reinterpret_cast(register_context.arm_r7); callee_saved_registers->arm_r8 = reinterpret_cast(register_context.arm_r8); callee_saved_registers->arm_r9 = reinterpret_cast(register_context.arm_r9); callee_saved_registers->arm_r10 = reinterpret_cast(register_context.arm_r10); #endif } void CopyCalleeSavedRegisterToRegisterContext( const v8::CalleeSavedRegisters* callee_saved_registers, base::RegisterContext& register_context) { #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) DCHECK(callee_saved_registers); register_context.arm_r4 = reinterpret_cast(callee_saved_registers->arm_r4); register_context.arm_r5 = reinterpret_cast(callee_saved_registers->arm_r5); register_context.arm_r6 = reinterpret_cast(callee_saved_registers->arm_r6); register_context.arm_r7 = reinterpret_cast(callee_saved_registers->arm_r7); register_context.arm_r8 = reinterpret_cast(callee_saved_registers->arm_r8); register_context.arm_r9 = reinterpret_cast(callee_saved_registers->arm_r9); register_context.arm_r10 = reinterpret_cast(callee_saved_registers->arm_r10); #endif } } // namespace V8Unwinder::V8Unwinder(v8::Isolate* isolate) : isolate_(isolate), js_entry_stubs_(isolate->GetJSEntryStubs()), embedded_code_range_(GetEmbeddedCodeRange(isolate)) {} V8Unwinder::~V8Unwinder() = default; void V8Unwinder::InitializeModules() { // This function must be called only once. DCHECK(modules_.empty()); // Add a module for the embedded code range. std::vector> new_module; new_module.push_back( std::make_unique(embedded_code_range_, V8Module::kEmbedded)); modules_.insert(new_module.front().get()); module_cache()->UpdateNonNativeModules({}, std::move(new_module)); } // IMPORTANT NOTE: to avoid deadlock this function must not invoke any // non-reentrant code that is also invoked by the target thread. In particular, // no heap allocation or deallocation is permitted, including indirectly via use // of DCHECK/CHECK or other logging statements. void V8Unwinder::OnStackCapture() { required_code_ranges_capacity_ = CopyCodePages(code_ranges_.capacity(), code_ranges_.buffer()); code_ranges_.SetSize( std::min(required_code_ranges_capacity_, code_ranges_.capacity())); } // Update the modules based on what was recorded in |code_ranges_|. The singular // embedded code range was already added in in InitializeModules(). It is // preserved by the algorithm below, which is why kNonEmbedded is // unconditionally passed when creating new modules. void V8Unwinder::UpdateModules() { MemoryRangeModuleCompare less_than; const auto is_embedded_code_range_module = [this](const base::ModuleCache::Module* module) { return module->GetBaseAddress() == reinterpret_cast(embedded_code_range_.start) && module->GetSize() == embedded_code_range_.length_in_bytes; }; std::vector> new_modules; std::vector defunct_modules; // Identify defunct modules and create new modules seen since the last // sample. Code ranges provided by V8 are in sorted order. v8::MemoryRange* const code_ranges_start = code_ranges_.buffer(); v8::MemoryRange* const code_ranges_end = code_ranges_start + code_ranges_.size(); CHECK(std::is_sorted(code_ranges_start, code_ranges_end, less_than)); v8::MemoryRange* range_it = code_ranges_start; auto modules_it = modules_.begin(); while (range_it != code_ranges_end && modules_it != modules_.end()) { if (less_than(*range_it, *modules_it)) { new_modules.push_back( std::make_unique(*range_it, V8Module::kNonEmbedded)); modules_.insert(modules_it, new_modules.back().get()); ++range_it; } else if (less_than(*modules_it, *range_it)) { // Avoid deleting the embedded code range module if it wasn't provided in // |code_ranges_|. This could happen if |code_ranges_| had insufficient // capacity when the code pages were copied. if (!is_embedded_code_range_module(*modules_it)) { defunct_modules.push_back(*modules_it); modules_it = modules_.erase(modules_it); } else { ++modules_it; } } else { // The range already has a module, so there's nothing to do. ++range_it; ++modules_it; } } while (range_it != code_ranges_end) { new_modules.push_back( std::make_unique(*range_it, V8Module::kNonEmbedded)); modules_.insert(modules_it, new_modules.back().get()); ++range_it; } while (modules_it != modules_.end()) { if (!is_embedded_code_range_module(*modules_it)) { defunct_modules.push_back(*modules_it); modules_it = modules_.erase(modules_it); } else { ++modules_it; } } module_cache()->UpdateNonNativeModules(defunct_modules, std::move(new_modules)); code_ranges_.ExpandCapacityIfNecessary(required_code_ranges_capacity_); } bool V8Unwinder::CanUnwindFrom(const base::Frame& current_frame) const { const base::ModuleCache::Module* module = current_frame.module; if (!module) return false; const auto loc = modules_.find(module); DCHECK(loc == modules_.end() || *loc == module); return loc != modules_.end(); } base::UnwindResult V8Unwinder::TryUnwind( base::RegisterContext* thread_context, uintptr_t stack_top, std::vector* stack) const { v8::RegisterState register_state; register_state.pc = reinterpret_cast( base::RegisterContextInstructionPointer(thread_context)); register_state.sp = reinterpret_cast( base::RegisterContextStackPointer(thread_context)); register_state.fp = reinterpret_cast( base::RegisterContextFramePointer(thread_context)); #if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) if (!register_state.callee_saved) register_state.callee_saved = std::make_unique(); #endif CopyCalleeSavedRegisterFromRegisterContext(*thread_context, register_state.callee_saved.get()); if (!v8::Unwinder::TryUnwindV8Frames( js_entry_stubs_, code_ranges_.size(), code_ranges_.buffer(), ®ister_state, reinterpret_cast(stack_top))) { return base::UnwindResult::ABORTED; } const uintptr_t prev_stack_pointer = base::RegisterContextStackPointer(thread_context); DCHECK_GT(reinterpret_cast(register_state.sp), prev_stack_pointer); DCHECK_LT(reinterpret_cast(register_state.sp), stack_top); base::RegisterContextInstructionPointer(thread_context) = reinterpret_cast(register_state.pc); base::RegisterContextStackPointer(thread_context) = reinterpret_cast(register_state.sp); base::RegisterContextFramePointer(thread_context) = reinterpret_cast(register_state.fp); CopyCalleeSavedRegisterToRegisterContext(register_state.callee_saved.get(), *thread_context); stack->emplace_back( base::RegisterContextInstructionPointer(thread_context), module_cache()->GetModuleForAddress( base::RegisterContextInstructionPointer(thread_context))); return base::UnwindResult::UNRECOGNIZED_FRAME; } size_t V8Unwinder::CopyCodePages(size_t capacity, v8::MemoryRange* code_pages) { return isolate_->CopyCodePages(capacity, code_pages); } // Synthetic build ids to use for V8 modules. The difference is in the digit // after the leading 5's. const char V8Unwinder::kV8EmbeddedCodeRangeBuildId[] = "5555555507284E1E874EFA4EB754964B999"; const char V8Unwinder::kV8CodeRangeBuildId[] = "5555555517284E1E874EFA4EB754964B999"; V8Unwinder::MemoryRanges::MemoryRanges() : capacity_(v8::Isolate::kMinCodePagesBufferSize), size_(0), ranges_(std::make_unique(capacity_)) {} V8Unwinder::MemoryRanges::MemoryRanges::~MemoryRanges() = default; void V8Unwinder::MemoryRanges::SetSize(size_t size) { // DCHECKing size_ <= capacity_ is deferred to size() because the DCHECK may // heap allocate. size_ = size; } void V8Unwinder::MemoryRanges::ExpandCapacityIfNecessary( size_t required_capacity) { if (required_capacity > capacity_) { while (required_capacity > capacity_) capacity_ *= 2; auto new_ranges = std::make_unique(capacity_); std::copy(buffer(), buffer() + size_, new_ranges.get()); ranges_ = std::move(new_ranges); } } bool V8Unwinder::ModuleCompare::operator()( const base::ModuleCache::Module* a, const base::ModuleCache::Module* b) const { return std::make_pair(a->GetBaseAddress(), a->GetSize()) < std::make_pair(b->GetBaseAddress(), b->GetSize()); }