diff --git a/libc/include/malloc.h b/libc/include/malloc.h index f7beb2c10..40786fad6 100644 --- a/libc/include/malloc.h +++ b/libc/include/malloc.h @@ -97,30 +97,31 @@ void* memalign(size_t __alignment, size_t __byte_count) __mallocfunc __BIONIC_AL */ size_t malloc_usable_size(const void* __ptr) __INTRODUCED_IN(17); +#define __MALLINFO_BODY \ + /** Total number of non-mmapped bytes currently allocated from OS. */ \ + size_t arena; \ + /** Number of free chunks. */ \ + size_t ordblks; \ + /** (Unused.) */ \ + size_t smblks; \ + /** (Unused.) */ \ + size_t hblks; \ + /** Total number of bytes in mmapped regions. */ \ + size_t hblkhd; \ + /** Maximum total allocated space; greater than total if trimming has occurred. */ \ + size_t usmblks; \ + /** (Unused.) */ \ + size_t fsmblks; \ + /** Total allocated space (normal or mmapped.) */ \ + size_t uordblks; \ + /** Total free space. */ \ + size_t fordblks; \ + /** Upper bound on number of bytes releasable by a trim operation. */ \ + size_t keepcost; + #ifndef STRUCT_MALLINFO_DECLARED #define STRUCT_MALLINFO_DECLARED 1 -struct mallinfo { - /** Total number of non-mmapped bytes currently allocated from OS. */ - size_t arena; - /** Number of free chunks. */ - size_t ordblks; - /** (Unused.) */ - size_t smblks; - /** (Unused.) */ - size_t hblks; - /** Total number of bytes in mmapped regions. */ - size_t hblkhd; - /** Maximum total allocated space; greater than total if trimming has occurred. */ - size_t usmblks; - /** (Unused.) */ - size_t fsmblks; - /** Total allocated space (normal or mmapped.) */ - size_t uordblks; - /** Total free space. */ - size_t fordblks; - /** Upper bound on number of bytes releasable by a trim operation. */ - size_t keepcost; -}; +struct mallinfo { __MALLINFO_BODY }; #endif /** @@ -130,6 +131,18 @@ struct mallinfo { */ struct mallinfo mallinfo(void); +/** + * On Android the struct mallinfo and struct mallinfo2 are the same. + */ +struct mallinfo2 { __MALLINFO_BODY }; + +/** + * [mallinfo2(3)](http://man7.org/linux/man-pages/man3/mallinfo2.3.html) returns + * information about the current state of the heap. Note that mallinfo2() is + * inherently unreliable and consider using malloc_info() instead. + */ +struct mallinfo2 mallinfo2(void) __RENAME(mallinfo); + /** * [malloc_info(3)](http://man7.org/linux/man-pages/man3/malloc_info.3.html) * writes information about the current state of the heap to the given stream. diff --git a/tests/malloc_test.cpp b/tests/malloc_test.cpp index 5e8fb9fa7..8272d3920 100644 --- a/tests/malloc_test.cpp +++ b/tests/malloc_test.cpp @@ -809,6 +809,77 @@ TEST(malloc, mallinfo) { #endif } +TEST(malloc, mallinfo2) { +#if defined(__BIONIC__) + SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2"; + static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000}; + + constexpr static size_t kMaxAllocs = 50; + + for (size_t size : sizes) { + // If some of these allocations are stuck in a thread cache, then keep + // looping until we make an allocation that changes the total size of the + // memory allocated. + // jemalloc implementations counts the thread cache allocations against + // total memory allocated. + void* ptrs[kMaxAllocs] = {}; + bool pass = false; + for (size_t i = 0; i < kMaxAllocs; i++) { + struct mallinfo info = mallinfo(); + struct mallinfo2 info2 = mallinfo2(); + // Verify that mallinfo and mallinfo2 are exactly the same. + ASSERT_EQ(info.arena, info2.arena); + ASSERT_EQ(info.ordblks, info2.ordblks); + ASSERT_EQ(info.smblks, info2.smblks); + ASSERT_EQ(info.hblks, info2.hblks); + ASSERT_EQ(info.hblkhd, info2.hblkhd); + ASSERT_EQ(info.usmblks, info2.usmblks); + ASSERT_EQ(info.fsmblks, info2.fsmblks); + ASSERT_EQ(info.uordblks, info2.uordblks); + ASSERT_EQ(info.fordblks, info2.fordblks); + ASSERT_EQ(info.keepcost, info2.keepcost); + + size_t allocated = info2.uordblks; + ptrs[i] = malloc(size); + ASSERT_TRUE(ptrs[i] != nullptr); + + info = mallinfo(); + info2 = mallinfo2(); + // Verify that mallinfo and mallinfo2 are exactly the same. + ASSERT_EQ(info.arena, info2.arena); + ASSERT_EQ(info.ordblks, info2.ordblks); + ASSERT_EQ(info.smblks, info2.smblks); + ASSERT_EQ(info.hblks, info2.hblks); + ASSERT_EQ(info.hblkhd, info2.hblkhd); + ASSERT_EQ(info.usmblks, info2.usmblks); + ASSERT_EQ(info.fsmblks, info2.fsmblks); + ASSERT_EQ(info.uordblks, info2.uordblks); + ASSERT_EQ(info.fordblks, info2.fordblks); + ASSERT_EQ(info.keepcost, info2.keepcost); + + size_t new_allocated = info2.uordblks; + if (allocated != new_allocated) { + size_t usable_size = malloc_usable_size(ptrs[i]); + // Only check if the total got bigger by at least allocation size. + // Sometimes the mallinfo2 numbers can go backwards due to compaction + // and/or freeing of cached data. + if (new_allocated >= allocated + usable_size) { + pass = true; + break; + } + } + } + for (void* ptr : ptrs) { + free(ptr); + } + ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after " + << kMaxAllocs << " allocations."; + } +#else + GTEST_SKIP() << "glibc is broken"; +#endif +} + template void __attribute__((optnone)) VerifyAlignment(Type* floating) { size_t expected_alignment = alignof(Type);