Merge "Basic support for MTE stack tagging."

This commit is contained in:
Treehugger Robot 2022-05-27 02:15:53 +00:00 committed by Gerrit Code Review
commit 39de8b944e
6 changed files with 90 additions and 50 deletions

View File

@ -44,30 +44,37 @@ void SetDefaultHeapTaggingLevel() {
#if !__has_feature(hwaddress_sanitizer) #if !__has_feature(hwaddress_sanitizer)
heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level; heap_tagging_level = __libc_shared_globals()->initial_heap_tagging_level;
#endif #endif
switch (heap_tagging_level) {
case M_HEAP_TAGGING_LEVEL_TBI: __libc_globals.mutate([](libc_globals* globals) {
__libc_globals.mutate([](libc_globals* globals) { switch (heap_tagging_level) {
case M_HEAP_TAGGING_LEVEL_TBI:
// Arrange for us to set pointer tags to POINTER_TAG, check tags on // Arrange for us to set pointer tags to POINTER_TAG, check tags on
// deallocation and untag when passing pointers to the allocator. // deallocation and untag when passing pointers to the allocator.
globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) | globals->heap_pointer_tag = (reinterpret_cast<uintptr_t>(POINTER_TAG) << TAG_SHIFT) |
(0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT); (0xffull << CHECK_SHIFT) | (0xffull << UNTAG_SHIFT);
}); break;
#if defined(USE_SCUDO) case M_HEAP_TAGGING_LEVEL_SYNC:
scudo_malloc_disable_memory_tagging(); case M_HEAP_TAGGING_LEVEL_ASYNC:
#endif // USE_SCUDO atomic_store(&globals->memtag_stack, __libc_shared_globals()->initial_memtag_stack);
break; break;
#if defined(USE_SCUDO) default:
case M_HEAP_TAGGING_LEVEL_SYNC: break;
scudo_malloc_set_track_allocation_stacks(1); };
break; });
#if defined(USE_SCUDO)
switch (heap_tagging_level) {
case M_HEAP_TAGGING_LEVEL_TBI:
case M_HEAP_TAGGING_LEVEL_NONE: case M_HEAP_TAGGING_LEVEL_NONE:
scudo_malloc_disable_memory_tagging(); scudo_malloc_disable_memory_tagging();
break; break;
#endif // USE_SCUDO case M_HEAP_TAGGING_LEVEL_SYNC:
scudo_malloc_set_track_allocation_stacks(1);
break;
default: default:
break; break;
} }
#endif // USE_SCUDO
#endif // aarch64 #endif // aarch64
} }
@ -99,16 +106,21 @@ bool SetHeapTaggingLevel(HeapTaggingLevel tag_level) {
switch (tag_level) { switch (tag_level) {
case M_HEAP_TAGGING_LEVEL_NONE: case M_HEAP_TAGGING_LEVEL_NONE:
if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) { __libc_globals.mutate([](libc_globals* globals) {
__libc_globals.mutate([](libc_globals* globals) { if (heap_tagging_level == M_HEAP_TAGGING_LEVEL_TBI) {
// Preserve the untag mask (we still want to untag pointers when passing them to the // Preserve the untag mask (we still want to untag pointers when passing them to the
// allocator), but clear the fixed tag and the check mask, so that pointers are no longer // allocator), but clear the fixed tag and the check mask, so that pointers are no longer
// tagged and checks no longer happen. // tagged and checks no longer happen.
globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT); globals->heap_pointer_tag = static_cast<uintptr_t>(0xffull << UNTAG_SHIFT);
}); }
} else if (!set_tcf_on_all_threads(PR_MTE_TCF_NONE)) { atomic_store(&globals->memtag_stack, false);
error_log("SetHeapTaggingLevel: set_tcf_on_all_threads failed"); });
return false;
if (heap_tagging_level != M_HEAP_TAGGING_LEVEL_TBI) {
if (!set_tcf_on_all_threads(PR_MTE_TCF_NONE)) {
error_log("SetHeapTaggingLevel: set_tcf_on_all_threads failed");
return false;
}
} }
#if defined(USE_SCUDO) #if defined(USE_SCUDO)
scudo_malloc_disable_memory_tagging(); scudo_malloc_disable_memory_tagging();

View File

@ -259,17 +259,18 @@ static bool get_environment_memtag_setting(HeapTaggingLevel* level) {
// M_HEAP_TAGGING_LEVEL_NONE, if MTE isn't enabled for this process we enable // M_HEAP_TAGGING_LEVEL_NONE, if MTE isn't enabled for this process we enable
// M_HEAP_TAGGING_LEVEL_TBI. // M_HEAP_TAGGING_LEVEL_TBI.
static HeapTaggingLevel __get_heap_tagging_level(const void* phdr_start, size_t phdr_ct, static HeapTaggingLevel __get_heap_tagging_level(const void* phdr_start, size_t phdr_ct,
uintptr_t load_bias) { uintptr_t load_bias, bool* stack) {
unsigned note_val =
__get_memtag_note(reinterpret_cast<const ElfW(Phdr)*>(phdr_start), phdr_ct, load_bias);
*stack = note_val & NT_MEMTAG_STACK;
HeapTaggingLevel level; HeapTaggingLevel level;
if (get_environment_memtag_setting(&level)) return level; if (get_environment_memtag_setting(&level)) return level;
unsigned note_val =
__get_memtag_note(reinterpret_cast<const ElfW(Phdr)*>(phdr_start), phdr_ct, load_bias);
// Note, previously (in Android 12), any value outside of bits [0..3] resulted // Note, previously (in Android 12), any value outside of bits [0..3] resulted
// in a check-fail. In order to be permissive of further extensions, we // in a check-fail. In order to be permissive of further extensions, we
// relaxed this restriction. For now, we still only support MTE heap. // relaxed this restriction.
if (!(note_val & NT_MEMTAG_HEAP)) return M_HEAP_TAGGING_LEVEL_TBI; if (!(note_val & (NT_MEMTAG_HEAP | NT_MEMTAG_STACK))) return M_HEAP_TAGGING_LEVEL_TBI;
unsigned mode = note_val & NT_MEMTAG_LEVEL_MASK; unsigned mode = note_val & NT_MEMTAG_LEVEL_MASK;
switch (mode) { switch (mode) {
@ -295,8 +296,10 @@ static HeapTaggingLevel __get_heap_tagging_level(const void* phdr_start, size_t
// This function is called from the linker before the main executable is relocated. // This function is called from the linker before the main executable is relocated.
__attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const void* phdr_start, __attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const void* phdr_start,
size_t phdr_ct, size_t phdr_ct,
uintptr_t load_bias) { uintptr_t load_bias,
HeapTaggingLevel level = __get_heap_tagging_level(phdr_start, phdr_ct, load_bias); void* stack_top) {
bool memtag_stack;
HeapTaggingLevel level = __get_heap_tagging_level(phdr_start, phdr_ct, load_bias, &memtag_stack);
if (level == M_HEAP_TAGGING_LEVEL_SYNC || level == M_HEAP_TAGGING_LEVEL_ASYNC) { if (level == M_HEAP_TAGGING_LEVEL_SYNC || level == M_HEAP_TAGGING_LEVEL_ASYNC) {
unsigned long prctl_arg = PR_TAGGED_ADDR_ENABLE | PR_MTE_TAG_SET_NONZERO; unsigned long prctl_arg = PR_TAGGED_ADDR_ENABLE | PR_MTE_TAG_SET_NONZERO;
@ -308,6 +311,17 @@ __attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const v
if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg | PR_MTE_TCF_SYNC, 0, 0, 0) == 0 || if (prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg | PR_MTE_TCF_SYNC, 0, 0, 0) == 0 ||
prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg, 0, 0, 0) == 0) { prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_arg, 0, 0, 0) == 0) {
__libc_shared_globals()->initial_heap_tagging_level = level; __libc_shared_globals()->initial_heap_tagging_level = level;
__libc_shared_globals()->initial_memtag_stack = memtag_stack;
if (memtag_stack) {
void* page_start =
reinterpret_cast<void*>(PAGE_START(reinterpret_cast<uintptr_t>(stack_top)));
if (mprotect(page_start, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_MTE | PROT_GROWSDOWN)) {
async_safe_fatal("error: failed to set PROT_MTE on main thread stack: %s\n",
strerror(errno));
}
}
return; return;
} }
} }
@ -319,7 +333,7 @@ __attribute__((no_sanitize("hwaddress", "memtag"))) void __libc_init_mte(const v
} }
} }
#else // __aarch64__ #else // __aarch64__
void __libc_init_mte(const void*, size_t, uintptr_t) {} void __libc_init_mte(const void*, size_t, uintptr_t, void*) {}
#endif // __aarch64__ #endif // __aarch64__
void __libc_init_profiling_handlers() { void __libc_init_profiling_handlers() {
@ -331,11 +345,9 @@ void __libc_init_profiling_handlers() {
signal(BIONIC_SIGNAL_ART_PROFILER, SIG_IGN); signal(BIONIC_SIGNAL_ART_PROFILER, SIG_IGN);
} }
__noreturn static void __real_libc_init(void *raw_args, __attribute__((no_sanitize("memtag"))) __noreturn static void __real_libc_init(
void (*onexit)(void) __unused, void* raw_args, void (*onexit)(void) __unused, int (*slingshot)(int, char**, char**),
int (*slingshot)(int, char**, char**), structors_array_t const* const structors, bionic_tcb* temp_tcb) {
structors_array_t const * const structors,
bionic_tcb* temp_tcb) {
BIONIC_STOP_UNWIND; BIONIC_STOP_UNWIND;
// Initialize TLS early so system calls and errno work. // Initialize TLS early so system calls and errno work.
@ -349,7 +361,7 @@ __noreturn static void __real_libc_init(void *raw_args,
__libc_init_main_thread_final(); __libc_init_main_thread_final();
__libc_init_common(); __libc_init_common();
__libc_init_mte(reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR)), getauxval(AT_PHNUM), __libc_init_mte(reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR)), getauxval(AT_PHNUM),
/*load_bias = */ 0); /*load_bias = */ 0, /*stack_top = */ raw_args);
__libc_init_scudo(); __libc_init_scudo();
__libc_init_profiling_handlers(); __libc_init_profiling_handlers();
__libc_init_fork_handler(); __libc_init_fork_handler();
@ -379,11 +391,9 @@ extern "C" void __hwasan_init_static();
// //
// The 'structors' parameter contains pointers to various initializer // The 'structors' parameter contains pointers to various initializer
// arrays that must be run before the program's 'main' routine is launched. // arrays that must be run before the program's 'main' routine is launched.
__attribute__((no_sanitize("hwaddress"))) __attribute__((no_sanitize("hwaddress", "memtag"))) __noreturn void __libc_init(
__noreturn void __libc_init(void* raw_args, void* raw_args, void (*onexit)(void) __unused, int (*slingshot)(int, char**, char**),
void (*onexit)(void) __unused, structors_array_t const* const structors) {
int (*slingshot)(int, char**, char**),
structors_array_t const * const structors) {
bionic_tcb temp_tcb = {}; bionic_tcb temp_tcb = {};
#if __has_feature(hwaddress_sanitizer) #if __has_feature(hwaddress_sanitizer)
// Install main thread TLS early. It will be initialized later in __libc_init_main_thread. For now // Install main thread TLS early. It will be initialized later in __libc_init_main_thread. For now

View File

@ -40,15 +40,16 @@
#include <async_safe/log.h> #include <async_safe/log.h>
#include "platform/bionic/macros.h"
#include "platform/bionic/mte.h"
#include "private/ErrnoRestorer.h"
#include "private/ScopedRWLock.h" #include "private/ScopedRWLock.h"
#include "private/bionic_constants.h" #include "private/bionic_constants.h"
#include "private/bionic_defs.h" #include "private/bionic_defs.h"
#include "private/bionic_globals.h" #include "private/bionic_globals.h"
#include "platform/bionic/macros.h"
#include "private/bionic_ssp.h" #include "private/bionic_ssp.h"
#include "private/bionic_systrace.h" #include "private/bionic_systrace.h"
#include "private/bionic_tls.h" #include "private/bionic_tls.h"
#include "private/ErrnoRestorer.h"
// x86 uses segment descriptors rather than a direct pointer to TLS. // x86 uses segment descriptors rather than a direct pointer to TLS.
#if defined(__i386__) #if defined(__i386__)
@ -88,7 +89,13 @@ void __free_temp_bionic_tls(bionic_tls* tls) {
static void __init_alternate_signal_stack(pthread_internal_t* thread) { static void __init_alternate_signal_stack(pthread_internal_t* thread) {
// Create and set an alternate signal stack. // Create and set an alternate signal stack.
void* stack_base = mmap(nullptr, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); int prot = PROT_READ | PROT_WRITE;
#ifdef __aarch64__
if (atomic_load(&__libc_globals->memtag_stack)) {
prot |= PROT_MTE;
}
#endif
void* stack_base = mmap(nullptr, SIGNAL_STACK_SIZE, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (stack_base != MAP_FAILED) { if (stack_base != MAP_FAILED) {
// Create a guard to catch stack overflows in signal handlers. // Create a guard to catch stack overflows in signal handlers.
if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) { if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) {
@ -224,12 +231,19 @@ ThreadMapping __allocate_thread_mapping(size_t stack_size, size_t stack_guard_si
return {}; return {};
} }
const size_t writable_size = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE; const size_t writable_size = mmap_size - stack_guard_size - PTHREAD_GUARD_SIZE;
if (mprotect(space + stack_guard_size, int prot = PROT_READ | PROT_WRITE;
writable_size, const char* prot_str = "R+W";
PROT_READ | PROT_WRITE) != 0) { #ifdef __aarch64__
async_safe_format_log(ANDROID_LOG_WARN, "libc", if (atomic_load(&__libc_globals->memtag_stack)) {
"pthread_create failed: couldn't mprotect R+W %zu-byte thread mapping region: %s", prot |= PROT_MTE;
writable_size, strerror(errno)); prot_str = "R+W+MTE";
}
#endif
if (mprotect(space + stack_guard_size, writable_size, prot) != 0) {
async_safe_format_log(
ANDROID_LOG_WARN, "libc",
"pthread_create failed: couldn't mprotect %s %zu-byte thread mapping region: %s", prot_str,
writable_size, strerror(errno));
munmap(space, mmap_size); munmap(space, mmap_size);
return {}; return {};
} }

View File

@ -76,3 +76,4 @@
#define NT_MEMTAG_LEVEL_ASYNC 1 #define NT_MEMTAG_LEVEL_ASYNC 1
#define NT_MEMTAG_LEVEL_SYNC 2 #define NT_MEMTAG_LEVEL_SYNC 2
#define NT_MEMTAG_HEAP 4 #define NT_MEMTAG_HEAP 4
#define NT_MEMTAG_STACK 8

View File

@ -47,6 +47,7 @@ struct libc_globals {
vdso_entry vdso[VDSO_END]; vdso_entry vdso[VDSO_END];
long setjmp_cookie; long setjmp_cookie;
uintptr_t heap_pointer_tag; uintptr_t heap_pointer_tag;
_Atomic(bool) memtag_stack;
// In order to allow a complete switch between dispatch tables without // In order to allow a complete switch between dispatch tables without
// the need for copying each function by function in the structure, // the need for copying each function by function in the structure,
@ -112,6 +113,7 @@ struct libc_shared_globals {
const char* scudo_ring_buffer = nullptr; const char* scudo_ring_buffer = nullptr;
HeapTaggingLevel initial_heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE; HeapTaggingLevel initial_heap_tagging_level = M_HEAP_TAGGING_LEVEL_NONE;
bool initial_memtag_stack = false;
}; };
__LIBC_HIDDEN__ libc_shared_globals* __libc_shared_globals(); __LIBC_HIDDEN__ libc_shared_globals* __libc_shared_globals();

View File

@ -68,7 +68,8 @@ static void get_elf_base_from_phdr(const ElfW(Phdr)* phdr_table, size_t phdr_cou
static void set_bss_vma_name(soinfo* si); static void set_bss_vma_name(soinfo* si);
void __libc_init_mte(const void* phdr_start, size_t phdr_count, uintptr_t load_bias); void __libc_init_mte(const void* phdr_start, size_t phdr_count, uintptr_t load_bias,
void* stack_top);
// These should be preserved static to avoid emitting // These should be preserved static to avoid emitting
// RELATIVE relocations for the part of the code running // RELATIVE relocations for the part of the code running
@ -408,7 +409,7 @@ static ElfW(Addr) linker_main(KernelArgumentBlock& args, const char* exe_to_load
} }
} }
__libc_init_mte(somain->phdr, somain->phnum, somain->load_bias); __libc_init_mte(somain->phdr, somain->phnum, somain->load_bias, args.argv);
#endif #endif
// Register the main executable and the linker upfront to have // Register the main executable and the linker upfront to have