Merge "Fix scudo fault address processing."
This commit is contained in:
commit
d17cefe7e4
|
@ -293,6 +293,13 @@ cc_test {
|
||||||
"libdebuggerd/test/utility_test.cpp",
|
"libdebuggerd/test/utility_test.cpp",
|
||||||
],
|
],
|
||||||
|
|
||||||
|
product_variables: {
|
||||||
|
malloc_not_svelte: {
|
||||||
|
srcs: ["libdebuggerd/test/scudo_test.cpp"],
|
||||||
|
header_libs: ["scudo_headers"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
target: {
|
target: {
|
||||||
android: {
|
android: {
|
||||||
srcs: [
|
srcs: [
|
||||||
|
|
|
@ -34,9 +34,10 @@ class Memory;
|
||||||
|
|
||||||
class ScudoCrashData {
|
class ScudoCrashData {
|
||||||
public:
|
public:
|
||||||
ScudoCrashData() = delete;
|
ScudoCrashData() = default;
|
||||||
~ScudoCrashData() = default;
|
~ScudoCrashData() = default;
|
||||||
ScudoCrashData(unwindstack::Memory* process_memory, const ProcessInfo& process_info);
|
|
||||||
|
bool SetErrorInfo(unwindstack::Memory* process_memory, const ProcessInfo& process_info);
|
||||||
|
|
||||||
bool CrashIsMine() const;
|
bool CrashIsMine() const;
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,11 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "libdebuggerd/scudo.h"
|
#include "libdebuggerd/scudo.h"
|
||||||
#include "libdebuggerd/tombstone.h"
|
#include "libdebuggerd/tombstone.h"
|
||||||
|
|
||||||
|
@ -25,54 +30,92 @@
|
||||||
|
|
||||||
#include "tombstone.pb.h"
|
#include "tombstone.pb.h"
|
||||||
|
|
||||||
std::unique_ptr<char[]> AllocAndReadFully(unwindstack::Memory* process_memory, uint64_t addr,
|
bool ScudoCrashData::SetErrorInfo(unwindstack::Memory* process_memory,
|
||||||
size_t size) {
|
const ProcessInfo& process_info) {
|
||||||
auto buf = std::make_unique<char[]>(size);
|
|
||||||
if (!process_memory->ReadFully(addr, buf.get(), size)) {
|
|
||||||
return std::unique_ptr<char[]>();
|
|
||||||
}
|
|
||||||
return buf;
|
|
||||||
}
|
|
||||||
|
|
||||||
ScudoCrashData::ScudoCrashData(unwindstack::Memory* process_memory,
|
|
||||||
const ProcessInfo& process_info) {
|
|
||||||
if (!process_info.has_fault_address) {
|
if (!process_info.has_fault_address) {
|
||||||
return;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto stack_depot = AllocAndReadFully(process_memory, process_info.scudo_stack_depot,
|
std::vector<char> stack_depot(__scudo_get_stack_depot_size());
|
||||||
__scudo_get_stack_depot_size());
|
if (!process_memory->ReadFully(process_info.scudo_stack_depot, stack_depot.data(),
|
||||||
auto region_info = AllocAndReadFully(process_memory, process_info.scudo_region_info,
|
stack_depot.size())) {
|
||||||
__scudo_get_region_info_size());
|
return false;
|
||||||
auto ring_buffer = AllocAndReadFully(process_memory, process_info.scudo_ring_buffer,
|
}
|
||||||
__scudo_get_ring_buffer_size());
|
std::vector<char> region_info(__scudo_get_region_info_size());
|
||||||
|
if (!process_memory->ReadFully(process_info.scudo_region_info, region_info.data(),
|
||||||
|
region_info.size())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
std::vector<char> ring_buffer(__scudo_get_ring_buffer_size());
|
||||||
|
if (!process_memory->ReadFully(process_info.scudo_ring_buffer, ring_buffer.data(),
|
||||||
|
ring_buffer.size())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintptr_t page_size = getpagesize();
|
||||||
|
|
||||||
untagged_fault_addr_ = process_info.untagged_fault_address;
|
untagged_fault_addr_ = process_info.untagged_fault_address;
|
||||||
uintptr_t fault_page = untagged_fault_addr_ & ~(PAGE_SIZE - 1);
|
uintptr_t fault_page = untagged_fault_addr_ & ~(page_size - 1);
|
||||||
|
|
||||||
uintptr_t memory_begin = fault_page - PAGE_SIZE * 16;
|
// Attempt to get 16 pages before the fault page and 16 pages after.
|
||||||
if (memory_begin > fault_page) {
|
constexpr size_t kExtraPages = 16;
|
||||||
return;
|
std::vector<char> memory(page_size * (kExtraPages * 2 + 1));
|
||||||
|
|
||||||
|
// Read faulting page first.
|
||||||
|
size_t memory_index = kExtraPages;
|
||||||
|
if (!process_memory->ReadFully(fault_page, &memory[memory_index * page_size], page_size)) {
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintptr_t memory_end = fault_page + PAGE_SIZE * 16;
|
// Attempt to read the pages after the fault page, stop as soon as we
|
||||||
if (memory_end < fault_page) {
|
// fail to read.
|
||||||
return;
|
uintptr_t read_addr = fault_page;
|
||||||
|
if (!__builtin_add_overflow(fault_page, page_size, &read_addr)) {
|
||||||
|
memory_index++;
|
||||||
|
for (size_t i = 0; i < kExtraPages; i++, memory_index++) {
|
||||||
|
if (!process_memory->ReadFully(read_addr, &memory[memory_index * page_size], page_size)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (__builtin_add_overflow(read_addr, page_size, &read_addr)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
uintptr_t memory_end = read_addr;
|
||||||
|
|
||||||
|
// Attempt to read the pages before the fault page, stop as soon as we
|
||||||
|
// fail to read.
|
||||||
|
memory_index = kExtraPages;
|
||||||
|
if (fault_page > 0) {
|
||||||
|
read_addr = fault_page - page_size;
|
||||||
|
for (size_t i = 0; i < kExtraPages; i++, memory_index--) {
|
||||||
|
if (!process_memory->ReadFully(read_addr, &memory[(memory_index - 1) * page_size],
|
||||||
|
page_size)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (read_addr == 0) {
|
||||||
|
memory_index--;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
read_addr -= page_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
size_t start_memory_index = memory_index;
|
||||||
|
uintptr_t memory_begin = fault_page - (kExtraPages - memory_index) * page_size;
|
||||||
|
|
||||||
|
std::vector<long> memory_tags((memory_end - memory_begin) / kTagGranuleSize);
|
||||||
|
read_addr = memory_begin;
|
||||||
|
for (size_t i = 0; i < memory_tags.size(); i++) {
|
||||||
|
memory_tags[i] = process_memory->ReadTag(read_addr);
|
||||||
|
read_addr += kTagGranuleSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto memory = std::make_unique<char[]>(memory_end - memory_begin);
|
__scudo_get_error_info(
|
||||||
for (auto i = memory_begin; i != memory_end; i += PAGE_SIZE) {
|
&error_info_, process_info.maybe_tagged_fault_address, stack_depot.data(), region_info.data(),
|
||||||
process_memory->ReadFully(i, memory.get() + i - memory_begin, PAGE_SIZE);
|
ring_buffer.data(), &memory[start_memory_index * page_size],
|
||||||
}
|
reinterpret_cast<const char*>(memory_tags.data()), memory_begin, memory_end - memory_begin);
|
||||||
|
|
||||||
auto memory_tags = std::make_unique<char[]>((memory_end - memory_begin) / kTagGranuleSize);
|
return true;
|
||||||
for (auto i = memory_begin; i != memory_end; i += kTagGranuleSize) {
|
|
||||||
memory_tags[(i - memory_begin) / kTagGranuleSize] = process_memory->ReadTag(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
__scudo_get_error_info(&error_info_, process_info.maybe_tagged_fault_address, stack_depot.get(),
|
|
||||||
region_info.get(), ring_buffer.get(), memory.get(), memory_tags.get(),
|
|
||||||
memory_begin, memory_end - memory_begin);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ScudoCrashData::CrashIsMine() const {
|
bool ScudoCrashData::CrashIsMine() const {
|
||||||
|
|
|
@ -0,0 +1,233 @@
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2022 The Android Open Source Project
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
#include <gmock/gmock.h>
|
||||||
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
|
#include "libdebuggerd/scudo.h"
|
||||||
|
#include "libdebuggerd/types.h"
|
||||||
|
#include "unwindstack/Memory.h"
|
||||||
|
|
||||||
|
#include "log_fake.h"
|
||||||
|
|
||||||
|
#include <inttypes.h>
|
||||||
|
|
||||||
|
// This needs to match the kExtraPages from ScudoCrashData::SetErrorInfo.
|
||||||
|
constexpr uint64_t kMaxPages = 16;
|
||||||
|
|
||||||
|
class MemoryAlwaysZero : public unwindstack::Memory {
|
||||||
|
public:
|
||||||
|
MemoryAlwaysZero() = default;
|
||||||
|
virtual ~MemoryAlwaysZero() = default;
|
||||||
|
|
||||||
|
size_t Read(uint64_t addr, void* buffer, size_t size) override {
|
||||||
|
if (test_unreadable_addrs_.count(addr) != 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
test_read_addrs_.insert(addr);
|
||||||
|
memset(buffer, 0, size);
|
||||||
|
return size;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TestAddUnreadableAddress(uint64_t addr) { test_unreadable_addrs_.insert(addr); }
|
||||||
|
|
||||||
|
void TestClearAddresses() {
|
||||||
|
test_read_addrs_.clear();
|
||||||
|
test_unreadable_addrs_.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::set<uint64_t>& test_read_addrs() { return test_read_addrs_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
std::set<uint64_t> test_unreadable_addrs_;
|
||||||
|
|
||||||
|
std::set<uint64_t> test_read_addrs_;
|
||||||
|
};
|
||||||
|
|
||||||
|
TEST(ScudoTest, no_fault_address) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = false;
|
||||||
|
info.untagged_fault_address = 0x5000;
|
||||||
|
info.scudo_stack_depot = 0x1000;
|
||||||
|
info.scudo_region_info = 0x2000;
|
||||||
|
info.scudo_ring_buffer = 0x3000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_FALSE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
info.has_fault_address = true;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(ScudoTest, scudo_data_read_check) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = true;
|
||||||
|
info.untagged_fault_address = 0x5000;
|
||||||
|
info.scudo_stack_depot = 0x1000;
|
||||||
|
info.scudo_region_info = 0x2000;
|
||||||
|
info.scudo_ring_buffer = 0x3000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
// Stack Depot unreadable
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
process_memory.TestAddUnreadableAddress(0x1000);
|
||||||
|
ASSERT_FALSE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
// The Region Info doesn't exist for 32 bit.
|
||||||
|
#if defined(__LP64__)
|
||||||
|
// Region Info unreadable
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
process_memory.TestAddUnreadableAddress(0x2000);
|
||||||
|
ASSERT_FALSE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Ring Buffer unreadable
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
process_memory.TestAddUnreadableAddress(0x3000);
|
||||||
|
ASSERT_FALSE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
// Verify that with all scudo data readable, the error info works.
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(ScudoTest, fault_page_unreadable) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = true;
|
||||||
|
info.untagged_fault_address = 0x5124;
|
||||||
|
info.scudo_stack_depot = 0x1000;
|
||||||
|
info.scudo_region_info = 0x2000;
|
||||||
|
info.scudo_ring_buffer = 0x3000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
uint64_t fault_page = info.untagged_fault_address & ~(getpagesize() - 1);
|
||||||
|
process_memory.TestAddUnreadableAddress(fault_page);
|
||||||
|
ASSERT_FALSE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(ScudoTest, pages_before_fault_unreadable) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = true;
|
||||||
|
info.untagged_fault_address = 0x15124;
|
||||||
|
info.scudo_stack_depot = 0x1000;
|
||||||
|
info.scudo_region_info = 0x2000;
|
||||||
|
info.scudo_ring_buffer = 0x3000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
uint64_t page_size = getpagesize();
|
||||||
|
uint64_t fault_page = info.untagged_fault_address & ~(page_size - 1);
|
||||||
|
|
||||||
|
std::vector<uint64_t> expected_reads = {0x1000, 0x2000, 0x3000};
|
||||||
|
for (size_t i = 0; i <= kMaxPages; i++) {
|
||||||
|
expected_reads.emplace_back(fault_page + i * page_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop through and make pages before the fault page unreadable.
|
||||||
|
for (size_t i = 1; i <= kMaxPages + 1; i++) {
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
uint64_t unreadable_addr = fault_page - i * page_size;
|
||||||
|
SCOPED_TRACE(testing::Message()
|
||||||
|
<< "Failed at unreadable address 0x" << std::hex << unreadable_addr);
|
||||||
|
process_memory.TestAddUnreadableAddress(unreadable_addr);
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
ASSERT_THAT(process_memory.test_read_addrs(),
|
||||||
|
testing::UnorderedElementsAreArray(expected_reads));
|
||||||
|
// Need to add the previous unreadable_addr to the list of expected addresses.
|
||||||
|
expected_reads.emplace_back(unreadable_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(ScudoTest, pages_after_fault_unreadable) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = true;
|
||||||
|
info.untagged_fault_address = 0x15124;
|
||||||
|
info.scudo_stack_depot = 0x1000;
|
||||||
|
info.scudo_region_info = 0x2000;
|
||||||
|
info.scudo_ring_buffer = 0x3000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
uint64_t page_size = getpagesize();
|
||||||
|
uint64_t fault_page = info.untagged_fault_address & ~(page_size - 1);
|
||||||
|
|
||||||
|
std::vector<uint64_t> expected_reads = {0x1000, 0x2000, 0x3000};
|
||||||
|
for (size_t i = 0; i <= kMaxPages; i++) {
|
||||||
|
expected_reads.emplace_back(fault_page - i * page_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Loop through and make pages after the fault page unreadable.
|
||||||
|
for (size_t i = 1; i <= kMaxPages + 1; i++) {
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
uint64_t unreadable_addr = fault_page + i * page_size;
|
||||||
|
SCOPED_TRACE(testing::Message()
|
||||||
|
<< "Failed at unreadable address 0x" << std::hex << unreadable_addr);
|
||||||
|
process_memory.TestAddUnreadableAddress(unreadable_addr);
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
ASSERT_THAT(process_memory.test_read_addrs(),
|
||||||
|
testing::UnorderedElementsAreArray(expected_reads));
|
||||||
|
// Need to add the previous unreadable_addr to the list of expected addresses.
|
||||||
|
expected_reads.emplace_back(unreadable_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that if the fault address is low, you won't underflow.
|
||||||
|
TEST(ScudoTest, fault_address_low) {
|
||||||
|
MemoryAlwaysZero process_memory;
|
||||||
|
ProcessInfo info;
|
||||||
|
info.has_fault_address = true;
|
||||||
|
info.scudo_stack_depot = 0x21000;
|
||||||
|
info.scudo_region_info = 0x22000;
|
||||||
|
info.scudo_ring_buffer = 0x23000;
|
||||||
|
|
||||||
|
ScudoCrashData crash;
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
|
||||||
|
uint64_t page_size = getpagesize();
|
||||||
|
for (size_t i = 0; i < kMaxPages + 1; i++) {
|
||||||
|
process_memory.TestClearAddresses();
|
||||||
|
info.untagged_fault_address = 0x124 + i * getpagesize();
|
||||||
|
SCOPED_TRACE(testing::Message()
|
||||||
|
<< "Failed with fault address 0x" << std::hex << info.untagged_fault_address);
|
||||||
|
ASSERT_TRUE(crash.SetErrorInfo(&process_memory, info));
|
||||||
|
std::vector<uint64_t> expected_reads = {0x21000, 0x22000, 0x23000};
|
||||||
|
uint64_t fault_page = info.untagged_fault_address & ~(page_size - 1);
|
||||||
|
expected_reads.emplace_back(fault_page);
|
||||||
|
for (size_t j = 1; j <= kMaxPages; j++) {
|
||||||
|
expected_reads.emplace_back(fault_page + j * page_size);
|
||||||
|
}
|
||||||
|
while (fault_page != 0) {
|
||||||
|
fault_page -= page_size;
|
||||||
|
expected_reads.emplace_back(fault_page);
|
||||||
|
}
|
||||||
|
ASSERT_THAT(process_memory.test_read_addrs(),
|
||||||
|
testing::UnorderedElementsAreArray(expected_reads));
|
||||||
|
}
|
||||||
|
}
|
|
@ -193,8 +193,9 @@ void set_human_readable_cause(Cause* cause, uint64_t fault_addr) {
|
||||||
static void dump_probable_cause(Tombstone* tombstone, unwindstack::AndroidUnwinder* unwinder,
|
static void dump_probable_cause(Tombstone* tombstone, unwindstack::AndroidUnwinder* unwinder,
|
||||||
const ProcessInfo& process_info, const ThreadInfo& main_thread) {
|
const ProcessInfo& process_info, const ThreadInfo& main_thread) {
|
||||||
#if defined(USE_SCUDO)
|
#if defined(USE_SCUDO)
|
||||||
ScudoCrashData scudo_crash_data(unwinder->GetProcessMemory().get(), process_info);
|
ScudoCrashData scudo_crash_data;
|
||||||
if (scudo_crash_data.CrashIsMine()) {
|
if (scudo_crash_data.SetErrorInfo(unwinder->GetProcessMemory().get(), process_info) &&
|
||||||
|
scudo_crash_data.CrashIsMine()) {
|
||||||
scudo_crash_data.AddCauseProtos(tombstone, unwinder);
|
scudo_crash_data.AddCauseProtos(tombstone, unwinder);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue