Align-up and randomize shared libraries.

This change implements the following property:
  Any 2**N aligned memory region on size 2**N contains no more than one DSO.

The value N can be configured, with 16 or 18 looking like a good choice.
Additionally, DSOs are loaded at random page-aligned address inside these large
regions.

This change has dual purpose:
1. Larger values of N allow a lot more compact CFI shadow implementation.
   See change I14dfea630de468eb5620e7f55f92b1397ba06217.
   For example, CFI shadow for the system_server process has the following size (RSS, KB):
   152 for N = 12, 32 for N = 16, 16 for N = 18.
2. Extra randomization is good for security.

This change does not result in extra RAM usage, because everything is still page-aligned.
It does result in a bit more VM fragmentation because of the gaps between shared libraries.
As it turns out, this fragmentation is barely noticeable because the kernel creates new mapping
at the highest possible address, and we do enough small mappings to almost completely fill the
gaps (ex. in the Zygote the gaps are filled with .ttf file mappings and thread stacks).

I've measured VM fragmentation as the sum of all VM gaps (unmapped regions) that are larger
than 1MB according to /proc/$PID/maps. On aosp_angler-userdebug, the numbers are (in GB):

                |   N = 12  |  N = 18
system_server   |   521.9   |  521.1
zygote64        |   522.1   |  521.3
zygote32        |   2.55    |  2.55
mediaserver     |   4.00    |  4.00

Change-Id: Ia6df840dd409c82837efd1f263be420d9723c84a
This commit is contained in:
Evgenii Stepanov 2016-07-15 16:31:42 -07:00
parent d941f72e75
commit d13e9a603f
4 changed files with 60 additions and 3 deletions

View File

@ -45,6 +45,7 @@
#include <unistd.h>
#include <wchar.h>
#include "private/bionic_macros.h"
#include "private/libc_logging.h"
extern "C" {

View File

@ -17,6 +17,8 @@
#ifndef _BIONIC_MACROS_H_
#define _BIONIC_MACROS_H_
#include <stdint.h>
// Frameworks OpenGL code currently leaks this header and allows
// collisions with other declarations, e.g., from libnativehelper.
// TODO: Remove once cleaned up. b/18334516
@ -46,4 +48,22 @@
? (1UL << (64 - __builtin_clzl(static_cast<unsigned long>(value)))) \
: (1UL << (32 - __builtin_clz(static_cast<unsigned int>(value)))))
static inline uintptr_t align_down(uintptr_t p, size_t align) {
return p & ~(align - 1);
}
static inline uintptr_t align_up(uintptr_t p, size_t align) {
return (p + align - 1) & ~(align - 1);
}
template <typename T>
static inline T* align_down(T* p, size_t align) {
return reinterpret_cast<T*>(align_down(reinterpret_cast<uintptr_t>(p), align));
}
template <typename T>
static inline T* align_up(T* p, size_t align) {
return reinterpret_cast<T*>(align_up(reinterpret_cast<uintptr_t>(p), align));
}
#endif // _BIONIC_MACROS_H_

View File

@ -503,4 +503,7 @@ android_namespace_t* create_namespace(const void* caller_addr,
const char* permitted_when_isolated_path,
android_namespace_t* parent_namespace);
constexpr unsigned kLibraryAlignmentBits = 18;
constexpr size_t kLibraryAlignment = 1UL << kLibraryAlignmentBits;
#endif

View File

@ -449,6 +449,40 @@ size_t phdr_table_get_load_size(const ElfW(Phdr)* phdr_table, size_t phdr_count,
return max_vaddr - min_vaddr;
}
// Reserve a virtual address range such that if it's limits were extended to the next 2**align
// boundary, it would not overlap with any existing mappings.
static void* ReserveAligned(void* hint, size_t size, size_t align) {
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
// Address hint is only used in Art for the image mapping, and it is pretty important. Don't mess
// with it.
// FIXME: try an aligned allocation and fall back to plain mmap() if the former does not provide a
// mapping at the requested address?
if (align == PAGE_SIZE || hint != nullptr) {
void* mmap_ptr = mmap(hint, size, PROT_NONE, mmap_flags, -1, 0);
if (mmap_ptr == MAP_FAILED) {
return nullptr;
}
return mmap_ptr;
}
// Allocate enough space so that the end of the desired region aligned up is still inside the
// mapping.
size_t mmap_size = align_up(size, align) + align - PAGE_SIZE;
uint8_t* mmap_ptr =
reinterpret_cast<uint8_t*>(mmap(nullptr, mmap_size, PROT_NONE, mmap_flags, -1, 0));
if (mmap_ptr == MAP_FAILED) {
return nullptr;
}
uint8_t* first = align_up(mmap_ptr, align);
uint8_t* last = align_down(mmap_ptr + mmap_size, align) - size;
size_t n = arc4random_uniform((last - first) / PAGE_SIZE + 1);
uint8_t* start = first + n * PAGE_SIZE;
munmap(mmap_ptr, start - mmap_ptr);
munmap(start + size, mmap_ptr + mmap_size - (start + size));
return start;
}
// Reserve a virtual address range big enough to hold all loadable
// segments of a program header table. This is done by creating a
// private anonymous mmap() with PROT_NONE.
@ -490,9 +524,8 @@ bool ElfReader::ReserveAddressSpace(const android_dlextinfo* extinfo) {
reserved_size - load_size_, load_size_, name_.c_str());
return false;
}
int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
start = mmap(mmap_hint, load_size_, PROT_NONE, mmap_flags, -1, 0);
if (start == MAP_FAILED) {
start = ReserveAligned(mmap_hint, load_size_, kLibraryAlignment);
if (start == nullptr) {
DL_ERR("couldn't reserve %zd bytes of address space for \"%s\"", load_size_, name_.c_str());
return false;
}