Be more strict about using invalid `pthread_t`s.
Another release, another attempt to remove the global thread list. But this time, let's admit that it's not going away. We can switch to using a read/write lock for the global thread list, and to aborting rather than quietly returning ESRCH if we're given an invalid pthread_t. This change affects pthread_detach, pthread_getcpuclockid, pthread_getschedparam/pthread_setschedparam, pthread_join, and pthread_kill: instead of returning ESRCH when passed an invalid pthread_t, if you're targeting O or above, they'll abort with the message "attempt to use invalid pthread_t". Note that this doesn't change behavior as much as you might think: the old lookup only held the global thread list lock for the duration of the lookup, so there was still a race between that and the dereference in the caller, given that callers actually need the tid to pass to some syscall or other, and sometimes update fields in the pthread_internal_t struct too. (This patch replaces such users with calls to pthread_gettid_np, which at least makes the TOCTOU window smaller.) We can't check thread->tid against 0 to see whether a pthread_t is still valid because a dead thread gets its thread struct unmapped along with its stack, so the dereference isn't safe. Taking the affected functions one by one: * pthread_getcpuclockid and pthread_getschedparam/pthread_setschedparam should be fine. Unsafe calls to those seem highly unlikely. * Unsafe pthread_detach callers probably want to switch to pthread_attr_setdetachstate instead, or using pthread_detach(pthread_self()) from the new thread's start routine rather than doing the detach in the parent. * pthread_join calls should be safe anyway, because a joinable thread won't actually exit and unmap until it's joined. If you're joining an unjoinable thread, the fix is to stop marking it detached. If you're joining an already-joined thread, you need to rethink your design. * Unsafe pthread_kill calls aren't portably fixable. (And are obviously inherently non-portable as-is.) The best alternative on Android is to use pthread_gettid_np at some point that you know the thread to be alive, and then call kill/tgkill directly. That's still not completely safe because if you're too late, the tid may have been reused, but then your code is inherently unsafe anyway. Bug: http://b/19636317 Test: ran tests Change-Id: I0372c4428e8a7f1c3af5c9334f5d9c25f2c73f21
This commit is contained in:
parent
132768084e
commit
11859d467c
|
@ -41,7 +41,6 @@
|
|||
#include "private/bionic_tls.h"
|
||||
#include "private/libc_logging.h"
|
||||
#include "private/ErrnoRestorer.h"
|
||||
#include "private/ScopedPthreadMutexLocker.h"
|
||||
|
||||
// x86 uses segment descriptors rather than a direct pointer to TLS.
|
||||
#if defined(__i386__)
|
||||
|
|
|
@ -31,13 +31,11 @@
|
|||
#include "pthread_internal.h"
|
||||
|
||||
int pthread_getcpuclockid(pthread_t t, clockid_t* clockid) {
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
if (thread == NULL) {
|
||||
return ESRCH;
|
||||
}
|
||||
pid_t tid = pthread_gettid_np(t);
|
||||
if (tid == -1) return ESRCH;
|
||||
|
||||
// The tid is stored in the top bits, but negated.
|
||||
clockid_t result = ~static_cast<clockid_t>(thread->tid) << 3;
|
||||
clockid_t result = ~static_cast<clockid_t>(tid) << 3;
|
||||
// Bits 0 and 1: clock type (0 = CPUCLOCK_PROF, 1 = CPUCLOCK_VIRT, 2 = CPUCLOCK_SCHED).
|
||||
result |= 2;
|
||||
// Bit 2: thread (set) or process (clear)?
|
||||
|
|
|
@ -34,15 +34,10 @@
|
|||
int pthread_getschedparam(pthread_t t, int* policy, sched_param* param) {
|
||||
ErrnoRestorer errno_restorer;
|
||||
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
if (thread == NULL) {
|
||||
return ESRCH;
|
||||
}
|
||||
pid_t tid = pthread_gettid_np(t);
|
||||
if (tid == -1) return ESRCH;
|
||||
|
||||
int rc = sched_getparam(thread->tid, param);
|
||||
if (rc == -1) {
|
||||
return errno;
|
||||
}
|
||||
*policy = sched_getscheduler(thread->tid);
|
||||
if (sched_getparam(tid, param) == -1) return errno;
|
||||
*policy = sched_getscheduler(tid);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,5 +29,6 @@
|
|||
#include "pthread_internal.h"
|
||||
|
||||
pid_t pthread_gettid_np(pthread_t t) {
|
||||
return reinterpret_cast<pthread_internal_t*>(t)->tid;
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
return thread ? thread->tid : -1;
|
||||
}
|
||||
|
|
|
@ -34,20 +34,38 @@
|
|||
#include <sys/mman.h>
|
||||
|
||||
#include "private/bionic_futex.h"
|
||||
#include "private/bionic_sdk_version.h"
|
||||
#include "private/bionic_tls.h"
|
||||
#include "private/libc_logging.h"
|
||||
#include "private/ScopedPthreadMutexLocker.h"
|
||||
|
||||
static pthread_internal_t* g_thread_list = NULL;
|
||||
static pthread_mutex_t g_thread_list_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
static pthread_internal_t* g_thread_list = nullptr;
|
||||
static pthread_rwlock_t g_thread_list_lock = PTHREAD_RWLOCK_INITIALIZER;
|
||||
|
||||
template <bool write> class ScopedRWLock {
|
||||
public:
|
||||
ScopedRWLock(pthread_rwlock_t* rwlock) : rwlock_(rwlock) {
|
||||
(write ? pthread_rwlock_wrlock : pthread_rwlock_rdlock)(rwlock_);
|
||||
}
|
||||
|
||||
~ScopedRWLock() {
|
||||
pthread_rwlock_unlock(rwlock_);
|
||||
}
|
||||
|
||||
private:
|
||||
pthread_rwlock_t* rwlock_;
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedRWLock);
|
||||
};
|
||||
|
||||
typedef ScopedRWLock<true> ScopedWriteLock;
|
||||
typedef ScopedRWLock<false> ScopedReadLock;
|
||||
|
||||
pthread_t __pthread_internal_add(pthread_internal_t* thread) {
|
||||
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
|
||||
ScopedWriteLock locker(&g_thread_list_lock);
|
||||
|
||||
// We insert at the head.
|
||||
thread->next = g_thread_list;
|
||||
thread->prev = NULL;
|
||||
if (thread->next != NULL) {
|
||||
thread->prev = nullptr;
|
||||
if (thread->next != nullptr) {
|
||||
thread->next->prev = thread;
|
||||
}
|
||||
g_thread_list = thread;
|
||||
|
@ -55,12 +73,12 @@ pthread_t __pthread_internal_add(pthread_internal_t* thread) {
|
|||
}
|
||||
|
||||
void __pthread_internal_remove(pthread_internal_t* thread) {
|
||||
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
|
||||
ScopedWriteLock locker(&g_thread_list_lock);
|
||||
|
||||
if (thread->next != NULL) {
|
||||
if (thread->next != nullptr) {
|
||||
thread->next->prev = thread->prev;
|
||||
}
|
||||
if (thread->prev != NULL) {
|
||||
if (thread->prev != nullptr) {
|
||||
thread->prev->next = thread->next;
|
||||
} else {
|
||||
g_thread_list = thread->next;
|
||||
|
@ -82,17 +100,17 @@ void __pthread_internal_remove_and_free(pthread_internal_t* thread) {
|
|||
pthread_internal_t* __pthread_internal_find(pthread_t thread_id) {
|
||||
pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(thread_id);
|
||||
|
||||
// check if thread is pthread_self() before acquiring the lock
|
||||
if (thread == __get_thread()) {
|
||||
return thread;
|
||||
// Check if we're looking for ourselves before acquiring the lock.
|
||||
if (thread == __get_thread()) return thread;
|
||||
|
||||
ScopedReadLock locker(&g_thread_list_lock);
|
||||
for (pthread_internal_t* t = g_thread_list; t != nullptr; t = t->next) {
|
||||
if (t == thread) return thread;
|
||||
}
|
||||
|
||||
ScopedPthreadMutexLocker locker(&g_thread_list_lock);
|
||||
|
||||
for (pthread_internal_t* t = g_thread_list; t != NULL; t = t->next) {
|
||||
if (t == thread) {
|
||||
return thread;
|
||||
}
|
||||
// Historically we'd return null, but
|
||||
if (bionic_get_application_target_sdk_version() >= __ANDROID_API_O__) {
|
||||
__libc_fatal("attempt to use invalid pthread_t");
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -35,10 +35,8 @@
|
|||
int pthread_kill(pthread_t t, int sig) {
|
||||
ErrnoRestorer errno_restorer;
|
||||
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
if (thread == NULL) {
|
||||
return ESRCH;
|
||||
}
|
||||
pid_t tid = pthread_gettid_np(t);
|
||||
if (tid == -1) return ESRCH;
|
||||
|
||||
return (tgkill(getpid(), thread->tid, sig) == -1) ? errno : 0;
|
||||
return (tgkill(getpid(), tid, sig) == -1) ? errno : 0;
|
||||
}
|
||||
|
|
|
@ -43,14 +43,8 @@
|
|||
#define MAX_TASK_COMM_LEN 16
|
||||
|
||||
static int __open_task_comm_fd(pthread_t t, int flags) {
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
if (thread == nullptr) {
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
}
|
||||
|
||||
char comm_name[64];
|
||||
snprintf(comm_name, sizeof(comm_name), "/proc/self/task/%d/comm", thread->tid);
|
||||
snprintf(comm_name, sizeof(comm_name), "/proc/self/task/%d/comm", pthread_gettid_np(t));
|
||||
return open(comm_name, O_CLOEXEC | flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -34,14 +34,8 @@
|
|||
int pthread_setschedparam(pthread_t t, int policy, const sched_param* param) {
|
||||
ErrnoRestorer errno_restorer;
|
||||
|
||||
pthread_internal_t* thread = __pthread_internal_find(t);
|
||||
if (thread == NULL) {
|
||||
return ESRCH;
|
||||
}
|
||||
pid_t tid = pthread_gettid_np(t);
|
||||
if (tid == -1) return ESRCH;
|
||||
|
||||
int rc = sched_setscheduler(thread->tid, policy, param);
|
||||
if (rc == -1) {
|
||||
return errno;
|
||||
}
|
||||
return 0;
|
||||
return (sched_setscheduler(tid, policy, param) == -1) ? errno : 0;
|
||||
}
|
||||
|
|
|
@ -443,14 +443,20 @@ TEST(pthread, pthread_setname_np__pthread_getname_np__other_PR_SET_DUMPABLE) {
|
|||
ASSERT_EQ(0, pthread_join(t, nullptr));
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_setname_np__pthread_getname_np__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_setname_np__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
EXPECT_DEATH(pthread_setname_np(dead_thread, "short 3"), "attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST_F(pthread_DeathTest, pthread_getname_np__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
// Call pthread_getname_np and pthread_setname_np after the thread has already exited.
|
||||
ASSERT_EQ(ENOENT, pthread_setname_np(dead_thread, "short 3"));
|
||||
char name[64];
|
||||
ASSERT_EQ(ENOENT, pthread_getname_np(dead_thread, name, sizeof(name)));
|
||||
EXPECT_DEATH(pthread_getname_np(dead_thread, name, sizeof(name)),
|
||||
"attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_kill__0) {
|
||||
|
@ -476,11 +482,11 @@ TEST(pthread, pthread_kill__in_signal_handler) {
|
|||
ASSERT_EQ(0, pthread_kill(pthread_self(), SIGALRM));
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_detach__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_detach__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
ASSERT_EQ(ESRCH, pthread_detach(dead_thread));
|
||||
EXPECT_DEATH(pthread_detach(dead_thread), "attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_getcpuclockid__clock_gettime) {
|
||||
|
@ -497,44 +503,46 @@ TEST(pthread, pthread_getcpuclockid__clock_gettime) {
|
|||
ASSERT_EQ(0, pthread_join(t, nullptr));
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_getcpuclockid__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_getcpuclockid__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
clockid_t c;
|
||||
ASSERT_EQ(ESRCH, pthread_getcpuclockid(dead_thread, &c));
|
||||
EXPECT_DEATH(pthread_getcpuclockid(dead_thread, &c), "attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_getschedparam__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_getschedparam__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
int policy;
|
||||
sched_param param;
|
||||
ASSERT_EQ(ESRCH, pthread_getschedparam(dead_thread, &policy, ¶m));
|
||||
EXPECT_DEATH(pthread_getschedparam(dead_thread, &policy, ¶m),
|
||||
"attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_setschedparam__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_setschedparam__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
int policy = 0;
|
||||
sched_param param;
|
||||
ASSERT_EQ(ESRCH, pthread_setschedparam(dead_thread, policy, ¶m));
|
||||
EXPECT_DEATH(pthread_setschedparam(dead_thread, policy, ¶m),
|
||||
"attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_join__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_join__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
ASSERT_EQ(ESRCH, pthread_join(dead_thread, NULL));
|
||||
EXPECT_DEATH(pthread_join(dead_thread, NULL), "attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_kill__no_such_thread) {
|
||||
TEST_F(pthread_DeathTest, pthread_kill__no_such_thread) {
|
||||
pthread_t dead_thread;
|
||||
MakeDeadThread(dead_thread);
|
||||
|
||||
ASSERT_EQ(ESRCH, pthread_kill(dead_thread, 0));
|
||||
EXPECT_DEATH(pthread_kill(dead_thread, 0), "attempt to use invalid pthread_t");
|
||||
}
|
||||
|
||||
TEST(pthread, pthread_join__multijoin) {
|
||||
|
|
|
@ -509,14 +509,14 @@ TEST(time, timer_delete_terminates) {
|
|||
|
||||
struct TimerDeleteData {
|
||||
timer_t timer_id;
|
||||
pthread_t thread_id;
|
||||
pid_t tid;
|
||||
volatile bool complete;
|
||||
};
|
||||
|
||||
static void TimerDeleteCallback(sigval_t value) {
|
||||
TimerDeleteData* tdd = reinterpret_cast<TimerDeleteData*>(value.sival_ptr);
|
||||
|
||||
tdd->thread_id = pthread_self();
|
||||
tdd->tid = gettid();
|
||||
timer_delete(tdd->timer_id);
|
||||
tdd->complete = true;
|
||||
}
|
||||
|
@ -548,8 +548,9 @@ TEST(time, timer_delete_from_timer_thread) {
|
|||
// Since bionic timers are implemented by creating a thread to handle the
|
||||
// callback, verify that the thread actually completes.
|
||||
cur_time = time(NULL);
|
||||
while (pthread_detach(tdd.thread_id) != ESRCH && (time(NULL) - cur_time) < 5);
|
||||
ASSERT_EQ(ESRCH, pthread_detach(tdd.thread_id));
|
||||
while ((kill(tdd.tid, 0) != -1 || errno != ESRCH) && (time(NULL) - cur_time) < 5);
|
||||
ASSERT_EQ(-1, kill(tdd.tid, 0));
|
||||
ASSERT_EQ(ESRCH, errno);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue