Only wipe TLS for user-supplied stacks.
Bug: 16667988 Change-Id: Id180ab2bc6713e1612386120a306db5bbf1d6046
This commit is contained in:
parent
4ad5066e1d
commit
40a5217448
|
@ -51,9 +51,9 @@ extern "C" int __isthreaded;
|
||||||
|
|
||||||
// This code is used both by each new pthread and the code that initializes the main thread.
|
// This code is used both by each new pthread and the code that initializes the main thread.
|
||||||
void __init_tls(pthread_internal_t* thread) {
|
void __init_tls(pthread_internal_t* thread) {
|
||||||
// Zero-initialize all the slots after TLS_SLOT_SELF and TLS_SLOT_THREAD_ID.
|
if (thread->user_allocated_stack()) {
|
||||||
for (size_t i = TLS_SLOT_ERRNO; i < BIONIC_TLS_SLOTS; ++i) {
|
// We don't know where the user got their stack, so assume the worst and zero the TLS area.
|
||||||
thread->tls[i] = NULL;
|
memset(&thread->tls[0], 0, BIONIC_TLS_SLOTS * sizeof(void*));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
|
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
|
||||||
|
@ -66,7 +66,7 @@ void __init_tls(pthread_internal_t* thread) {
|
||||||
void __init_alternate_signal_stack(pthread_internal_t* thread) {
|
void __init_alternate_signal_stack(pthread_internal_t* thread) {
|
||||||
// Create and set an alternate signal stack.
|
// Create and set an alternate signal stack.
|
||||||
stack_t ss;
|
stack_t ss;
|
||||||
ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
ss.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
if (ss.ss_sp != MAP_FAILED) {
|
if (ss.ss_sp != MAP_FAILED) {
|
||||||
ss.ss_size = SIGSTKSZ;
|
ss.ss_size = SIGSTKSZ;
|
||||||
ss.ss_flags = 0;
|
ss.ss_flags = 0;
|
||||||
|
@ -227,7 +227,7 @@ int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
|
||||||
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
|
// be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
|
||||||
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
|
// reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
|
||||||
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
pthread_mutex_unlock(&thread->startup_handshake_mutex);
|
||||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) == 0) {
|
if (!thread->user_allocated_stack()) {
|
||||||
munmap(thread->attr.stack_base, thread->attr.stack_size);
|
munmap(thread->attr.stack_base, thread->attr.stack_size);
|
||||||
}
|
}
|
||||||
free(thread);
|
free(thread);
|
||||||
|
|
|
@ -90,7 +90,7 @@ void pthread_exit(void* return_value) {
|
||||||
// Keep track of what we need to know about the stack before we lose the pthread_internal_t.
|
// Keep track of what we need to know about the stack before we lose the pthread_internal_t.
|
||||||
void* stack_base = thread->attr.stack_base;
|
void* stack_base = thread->attr.stack_base;
|
||||||
size_t stack_size = thread->attr.stack_size;
|
size_t stack_size = thread->attr.stack_size;
|
||||||
bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
|
bool user_allocated_stack = thread->user_allocated_stack();
|
||||||
|
|
||||||
pthread_mutex_lock(&g_thread_list_lock);
|
pthread_mutex_lock(&g_thread_list_lock);
|
||||||
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) != 0) {
|
||||||
|
|
|
@ -30,6 +30,18 @@
|
||||||
|
|
||||||
#include <pthread.h>
|
#include <pthread.h>
|
||||||
|
|
||||||
|
/* Has the thread been detached by a pthread_join or pthread_detach call? */
|
||||||
|
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
|
||||||
|
|
||||||
|
/* Was the thread's stack allocated by the user rather than by us? */
|
||||||
|
#define PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK 0x00000002
|
||||||
|
|
||||||
|
/* Has the thread been joined by another thread? */
|
||||||
|
#define PTHREAD_ATTR_FLAG_JOINED 0x00000004
|
||||||
|
|
||||||
|
/* Is this the main thread? */
|
||||||
|
#define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000
|
||||||
|
|
||||||
struct pthread_internal_t {
|
struct pthread_internal_t {
|
||||||
struct pthread_internal_t* next;
|
struct pthread_internal_t* next;
|
||||||
struct pthread_internal_t* prev;
|
struct pthread_internal_t* prev;
|
||||||
|
@ -56,6 +68,10 @@ struct pthread_internal_t {
|
||||||
return (*cached_pid != 0);
|
return (*cached_pid != 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool user_allocated_stack() {
|
||||||
|
return (attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
void** tls;
|
void** tls;
|
||||||
|
|
||||||
pthread_attr_t attr;
|
pthread_attr_t attr;
|
||||||
|
@ -87,20 +103,8 @@ __LIBC_HIDDEN__ pthread_internal_t* __get_thread(void);
|
||||||
__LIBC_HIDDEN__ void pthread_key_clean_all(void);
|
__LIBC_HIDDEN__ void pthread_key_clean_all(void);
|
||||||
__LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread);
|
__LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread);
|
||||||
|
|
||||||
/* Has the thread been detached by a pthread_join or pthread_detach call? */
|
|
||||||
#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
|
|
||||||
|
|
||||||
/* Was the thread's stack allocated by the user rather than by us? */
|
|
||||||
#define PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK 0x00000002
|
|
||||||
|
|
||||||
/* Has the thread been joined by another thread? */
|
|
||||||
#define PTHREAD_ATTR_FLAG_JOINED 0x00000004
|
|
||||||
|
|
||||||
/* Is this the main thread? */
|
|
||||||
#define PTHREAD_ATTR_FLAG_MAIN_THREAD 0x80000000
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Traditionally we give threads a 1MiB stack. When we started
|
* Traditionally we gave threads a 1MiB stack. When we started
|
||||||
* allocating per-thread alternate signal stacks to ease debugging of
|
* allocating per-thread alternate signal stacks to ease debugging of
|
||||||
* stack overflows, we subtracted the same amount we were using there
|
* stack overflows, we subtracted the same amount we were using there
|
||||||
* from the default thread stack size. This should keep memory usage
|
* from the default thread stack size. This should keep memory usage
|
||||||
|
|
|
@ -82,6 +82,57 @@ TEST(pthread, pthread_key_delete) {
|
||||||
ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
|
ASSERT_EQ(EINVAL, pthread_setspecific(key, expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(pthread, pthread_key_fork) {
|
||||||
|
void* expected = reinterpret_cast<void*>(1234);
|
||||||
|
pthread_key_t key;
|
||||||
|
ASSERT_EQ(0, pthread_key_create(&key, NULL));
|
||||||
|
ASSERT_EQ(0, pthread_setspecific(key, expected));
|
||||||
|
ASSERT_EQ(expected, pthread_getspecific(key));
|
||||||
|
|
||||||
|
pid_t pid = fork();
|
||||||
|
ASSERT_NE(-1, pid) << strerror(errno);
|
||||||
|
|
||||||
|
if (pid == 0) {
|
||||||
|
// The surviving thread inherits all the forking thread's TLS values...
|
||||||
|
ASSERT_EQ(expected, pthread_getspecific(key));
|
||||||
|
_exit(99);
|
||||||
|
}
|
||||||
|
|
||||||
|
int status;
|
||||||
|
ASSERT_EQ(pid, waitpid(pid, &status, 0));
|
||||||
|
ASSERT_TRUE(WIFEXITED(status));
|
||||||
|
ASSERT_EQ(99, WEXITSTATUS(status));
|
||||||
|
|
||||||
|
ASSERT_EQ(expected, pthread_getspecific(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void* DirtyKeyFn(void* key) {
|
||||||
|
return pthread_getspecific(*reinterpret_cast<pthread_key_t*>(key));
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST(pthread, pthread_key_dirty) {
|
||||||
|
pthread_key_t key;
|
||||||
|
ASSERT_EQ(0, pthread_key_create(&key, NULL));
|
||||||
|
|
||||||
|
size_t stack_size = 128 * 1024;
|
||||||
|
void* stack = mmap(NULL, stack_size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
|
ASSERT_NE(MAP_FAILED, stack);
|
||||||
|
memset(stack, 0xff, stack_size);
|
||||||
|
|
||||||
|
pthread_attr_t attr;
|
||||||
|
ASSERT_EQ(0, pthread_attr_init(&attr));
|
||||||
|
ASSERT_EQ(0, pthread_attr_setstack(&attr, stack, stack_size));
|
||||||
|
|
||||||
|
pthread_t t;
|
||||||
|
ASSERT_EQ(0, pthread_create(&t, &attr, DirtyKeyFn, &key));
|
||||||
|
|
||||||
|
void* result;
|
||||||
|
ASSERT_EQ(0, pthread_join(t, &result));
|
||||||
|
ASSERT_EQ(nullptr, result); // Not ~0!
|
||||||
|
|
||||||
|
ASSERT_EQ(0, munmap(stack, stack_size));
|
||||||
|
}
|
||||||
|
|
||||||
static void* IdFn(void* arg) {
|
static void* IdFn(void* arg) {
|
||||||
return arg;
|
return arg;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue