Initialize static TLS memory using module list

This implementation simply iterates over each static TLS module and
copies its initialization image into a new thread's static TLS block.

Bug: http://b/78026329
Test: bionic unit tests
Change-Id: Ib7edb665271a07010bc68e306feb5df422f2f9e6
This commit is contained in:
Ryan Prichard 2019-01-15 13:45:27 -08:00
parent e5e69e0912
commit 361c1b4a3b
4 changed files with 38 additions and 1 deletions

View File

@ -126,6 +126,7 @@ extern "C" void __libc_init_main_thread_final() {
auto new_tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
auto new_tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());
__init_static_tls(mapping.static_tls);
new_tcb->copy_from_bootstrap(temp_tcb);
new_tls->copy_from_bootstrap(temp_tls);
__init_tcb(new_tcb, &main_thread);

View File

@ -29,9 +29,12 @@
#include "private/bionic_elf_tls.h"
#include <async_safe/log.h>
#include <string.h>
#include <sys/param.h>
#include <unistd.h>
#include "private/ScopedRWLock.h"
#include "private/bionic_globals.h"
#include "private/bionic_macros.h"
#include "private/bionic_tls.h"
@ -147,3 +150,33 @@ size_t StaticTlsLayout::round_up_with_overflow_check(size_t value, size_t alignm
if (value < old_value) overflowed_ = true;
return value;
}
// Copy each TLS module's initialization image into a newly-allocated block of
// static TLS memory. To reduce dirty pages, this function only writes to pages
// within the static TLS that need initialization. The memory should already be
// zero-initialized on entry.
void __init_static_tls(void* static_tls) {
// The part of the table we care about (i.e. static TLS modules) never changes
// after startup, but we still need the mutex because the table could grow,
// moving the initial part. If this locking is too slow, we can duplicate the
// static part of the table.
TlsModules& modules = __libc_shared_globals()->tls_modules;
ScopedReadLock locker(&modules.rwlock);
for (size_t i = 0; i < modules.module_count; ++i) {
TlsModule& module = modules.module_table[i];
if (module.static_offset == SIZE_MAX) {
// All of the static modules come before all of the dynamic modules, so
// once we see the first dynamic module, we're done.
break;
}
if (module.segment.init_size == 0) {
// Skip the memcpy call for TLS segments with no initializer, which is
// common.
continue;
}
memcpy(static_cast<char*>(static_tls) + module.static_offset,
module.segment.init_ptr,
module.segment.init_size);
}
}

View File

@ -288,7 +288,8 @@ static int __allocate_thread(pthread_attr_t* attr, bionic_tcb** tcbp, void** chi
auto tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
auto tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());
// (Re)initialize TLS pointers.
// Initialize TLS memory.
__init_static_tls(mapping.static_tls);
__init_tcb(tcb, thread);
__init_tcb_stack_guard(tcb);
__init_bionic_tls_ptrs(tcb, tls);

View File

@ -115,3 +115,5 @@ struct TlsModules {
size_t module_count = 0;
TlsModule* module_table = nullptr;
};
void __init_static_tls(void* static_tls);