1*8f1d5724Srobert //===----------------------------------------------------------------------===//
279c2e3e6Spatrick //
379c2e3e6Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
479c2e3e6Spatrick // See https://llvm.org/LICENSE.txt for license information.
579c2e3e6Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
679c2e3e6Spatrick //
779c2e3e6Spatrick //===----------------------------------------------------------------------===//
879c2e3e6Spatrick
979c2e3e6Spatrick #include "fallback_malloc.h"
1079c2e3e6Spatrick
1179c2e3e6Spatrick #include <__threading_support>
1279c2e3e6Spatrick #ifndef _LIBCXXABI_HAS_NO_THREADS
1379c2e3e6Spatrick #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB)
1479c2e3e6Spatrick #pragma comment(lib, "pthread")
1579c2e3e6Spatrick #endif
1679c2e3e6Spatrick #endif
1779c2e3e6Spatrick
18*8f1d5724Srobert #include <assert.h>
1979c2e3e6Spatrick #include <stdlib.h> // for malloc, calloc, free
2079c2e3e6Spatrick #include <string.h> // for memset
214e0cc08cSpatrick #include <new> // for std::__libcpp_aligned_{alloc,free}
2279c2e3e6Spatrick
2379c2e3e6Spatrick // A small, simple heap manager based (loosely) on
2479c2e3e6Spatrick // the startup heap manager from FreeBSD, optimized for space.
2579c2e3e6Spatrick //
2679c2e3e6Spatrick // Manages a fixed-size memory pool, supports malloc and free only.
2779c2e3e6Spatrick // No support for realloc.
2879c2e3e6Spatrick //
2979c2e3e6Spatrick // Allocates chunks in multiples of four bytes, with a four byte header
3079c2e3e6Spatrick // for each chunk. The overhead of each chunk is kept low by keeping pointers
3179c2e3e6Spatrick // as two byte offsets within the heap, rather than (4 or 8 byte) pointers.
3279c2e3e6Spatrick
3379c2e3e6Spatrick namespace {
3479c2e3e6Spatrick
3579c2e3e6Spatrick // When POSIX threads are not available, make the mutex operations a nop
3679c2e3e6Spatrick #ifndef _LIBCXXABI_HAS_NO_THREADS
37*8f1d5724Srobert static _LIBCPP_CONSTINIT std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER;
3879c2e3e6Spatrick #else
39*8f1d5724Srobert static _LIBCPP_CONSTINIT void* heap_mutex = 0;
4079c2e3e6Spatrick #endif
4179c2e3e6Spatrick
4279c2e3e6Spatrick class mutexor {
4379c2e3e6Spatrick public:
4479c2e3e6Spatrick #ifndef _LIBCXXABI_HAS_NO_THREADS
mutexor(std::__libcpp_mutex_t * m)4579c2e3e6Spatrick mutexor(std::__libcpp_mutex_t* m) : mtx_(m) {
4679c2e3e6Spatrick std::__libcpp_mutex_lock(mtx_);
4779c2e3e6Spatrick }
~mutexor()4879c2e3e6Spatrick ~mutexor() { std::__libcpp_mutex_unlock(mtx_); }
4979c2e3e6Spatrick #else
5079c2e3e6Spatrick mutexor(void*) {}
5179c2e3e6Spatrick ~mutexor() {}
5279c2e3e6Spatrick #endif
5379c2e3e6Spatrick private:
5479c2e3e6Spatrick mutexor(const mutexor& rhs);
5579c2e3e6Spatrick mutexor& operator=(const mutexor& rhs);
5679c2e3e6Spatrick #ifndef _LIBCXXABI_HAS_NO_THREADS
5779c2e3e6Spatrick std::__libcpp_mutex_t* mtx_;
5879c2e3e6Spatrick #endif
5979c2e3e6Spatrick };
6079c2e3e6Spatrick
6179c2e3e6Spatrick static const size_t HEAP_SIZE = 512;
6279c2e3e6Spatrick char heap[HEAP_SIZE] __attribute__((aligned));
6379c2e3e6Spatrick
6479c2e3e6Spatrick typedef unsigned short heap_offset;
6579c2e3e6Spatrick typedef unsigned short heap_size;
6679c2e3e6Spatrick
67*8f1d5724Srobert // On both 64 and 32 bit targets heap_node should have the following properties
68*8f1d5724Srobert // Size: 4
69*8f1d5724Srobert // Alignment: 2
7079c2e3e6Spatrick struct heap_node {
7179c2e3e6Spatrick heap_offset next_node; // offset into heap
7279c2e3e6Spatrick heap_size len; // size in units of "sizeof(heap_node)"
7379c2e3e6Spatrick };
7479c2e3e6Spatrick
75*8f1d5724Srobert // All pointers returned by fallback_malloc must be at least aligned
76*8f1d5724Srobert // as RequiredAligned. Note that RequiredAlignment can be greater than
77*8f1d5724Srobert // alignof(std::max_align_t) on 64 bit systems compiling 32 bit code.
78*8f1d5724Srobert struct FallbackMaxAlignType {
79*8f1d5724Srobert } __attribute__((aligned));
80*8f1d5724Srobert const size_t RequiredAlignment = alignof(FallbackMaxAlignType);
81*8f1d5724Srobert
82*8f1d5724Srobert static_assert(alignof(FallbackMaxAlignType) % sizeof(heap_node) == 0,
83*8f1d5724Srobert "The required alignment must be evenly divisible by the sizeof(heap_node)");
84*8f1d5724Srobert
85*8f1d5724Srobert // The number of heap_node's that can fit in a chunk of memory with the size
86*8f1d5724Srobert // of the RequiredAlignment. On 64 bit targets NodesPerAlignment should be 4.
87*8f1d5724Srobert const size_t NodesPerAlignment = alignof(FallbackMaxAlignType) / sizeof(heap_node);
88*8f1d5724Srobert
8979c2e3e6Spatrick static const heap_node* list_end =
9079c2e3e6Spatrick (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap
9179c2e3e6Spatrick static heap_node* freelist = NULL;
9279c2e3e6Spatrick
node_from_offset(const heap_offset offset)9379c2e3e6Spatrick heap_node* node_from_offset(const heap_offset offset) {
9479c2e3e6Spatrick return (heap_node*)(heap + (offset * sizeof(heap_node)));
9579c2e3e6Spatrick }
9679c2e3e6Spatrick
offset_from_node(const heap_node * ptr)9779c2e3e6Spatrick heap_offset offset_from_node(const heap_node* ptr) {
9879c2e3e6Spatrick return static_cast<heap_offset>(
9979c2e3e6Spatrick static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) /
10079c2e3e6Spatrick sizeof(heap_node));
10179c2e3e6Spatrick }
10279c2e3e6Spatrick
103*8f1d5724Srobert // Return a pointer to the first address, 'A', in `heap` that can actually be
104*8f1d5724Srobert // used to represent a heap_node. 'A' must be aligned so that
105*8f1d5724Srobert // '(A + sizeof(heap_node)) % RequiredAlignment == 0'. On 64 bit systems this
106*8f1d5724Srobert // address should be 12 bytes after the first 16 byte boundary.
getFirstAlignedNodeInHeap()107*8f1d5724Srobert heap_node* getFirstAlignedNodeInHeap() {
108*8f1d5724Srobert heap_node* node = (heap_node*)heap;
109*8f1d5724Srobert const size_t alignNBytesAfterBoundary = RequiredAlignment - sizeof(heap_node);
110*8f1d5724Srobert size_t boundaryOffset = reinterpret_cast<size_t>(node) % RequiredAlignment;
111*8f1d5724Srobert size_t requiredOffset = alignNBytesAfterBoundary - boundaryOffset;
112*8f1d5724Srobert size_t NElemOffset = requiredOffset / sizeof(heap_node);
113*8f1d5724Srobert return node + NElemOffset;
114*8f1d5724Srobert }
115*8f1d5724Srobert
init_heap()11679c2e3e6Spatrick void init_heap() {
117*8f1d5724Srobert freelist = getFirstAlignedNodeInHeap();
11879c2e3e6Spatrick freelist->next_node = offset_from_node(list_end);
119*8f1d5724Srobert freelist->len = static_cast<heap_size>(list_end - freelist);
12079c2e3e6Spatrick }
12179c2e3e6Spatrick
12279c2e3e6Spatrick // How big a chunk we allocate
alloc_size(size_t len)12379c2e3e6Spatrick size_t alloc_size(size_t len) {
12479c2e3e6Spatrick return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1;
12579c2e3e6Spatrick }
12679c2e3e6Spatrick
is_fallback_ptr(void * ptr)12779c2e3e6Spatrick bool is_fallback_ptr(void* ptr) {
12879c2e3e6Spatrick return ptr >= heap && ptr < (heap + HEAP_SIZE);
12979c2e3e6Spatrick }
13079c2e3e6Spatrick
fallback_malloc(size_t len)13179c2e3e6Spatrick void* fallback_malloc(size_t len) {
13279c2e3e6Spatrick heap_node *p, *prev;
13379c2e3e6Spatrick const size_t nelems = alloc_size(len);
13479c2e3e6Spatrick mutexor mtx(&heap_mutex);
13579c2e3e6Spatrick
13679c2e3e6Spatrick if (NULL == freelist)
13779c2e3e6Spatrick init_heap();
13879c2e3e6Spatrick
13979c2e3e6Spatrick // Walk the free list, looking for a "big enough" chunk
14079c2e3e6Spatrick for (p = freelist, prev = 0; p && p != list_end;
14179c2e3e6Spatrick prev = p, p = node_from_offset(p->next_node)) {
14279c2e3e6Spatrick
143*8f1d5724Srobert // Check the invariant that all heap_nodes pointers 'p' are aligned
144*8f1d5724Srobert // so that 'p + 1' has an alignment of at least RequiredAlignment
145*8f1d5724Srobert assert(reinterpret_cast<size_t>(p + 1) % RequiredAlignment == 0);
14679c2e3e6Spatrick
147*8f1d5724Srobert // Calculate the number of extra padding elements needed in order
148*8f1d5724Srobert // to split 'p' and create a properly aligned heap_node from the tail
149*8f1d5724Srobert // of 'p'. We calculate aligned_nelems such that 'p->len - aligned_nelems'
150*8f1d5724Srobert // will be a multiple of NodesPerAlignment.
151*8f1d5724Srobert size_t aligned_nelems = nelems;
152*8f1d5724Srobert if (p->len > nelems) {
153*8f1d5724Srobert heap_size remaining_len = static_cast<heap_size>(p->len - nelems);
154*8f1d5724Srobert aligned_nelems += remaining_len % NodesPerAlignment;
15579c2e3e6Spatrick }
15679c2e3e6Spatrick
157*8f1d5724Srobert // chunk is larger and we can create a properly aligned heap_node
158*8f1d5724Srobert // from the tail. In this case we shorten 'p' and return the tail.
159*8f1d5724Srobert if (p->len > aligned_nelems) {
160*8f1d5724Srobert heap_node* q;
161*8f1d5724Srobert p->len = static_cast<heap_size>(p->len - aligned_nelems);
162*8f1d5724Srobert q = p + p->len;
163*8f1d5724Srobert q->next_node = 0;
164*8f1d5724Srobert q->len = static_cast<heap_size>(aligned_nelems);
165*8f1d5724Srobert void* ptr = q + 1;
166*8f1d5724Srobert assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
167*8f1d5724Srobert return ptr;
168*8f1d5724Srobert }
169*8f1d5724Srobert
170*8f1d5724Srobert // The chunk is the exact size or the chunk is larger but not large
171*8f1d5724Srobert // enough to split due to alignment constraints.
172*8f1d5724Srobert if (p->len >= nelems) {
17379c2e3e6Spatrick if (prev == 0)
17479c2e3e6Spatrick freelist = node_from_offset(p->next_node);
17579c2e3e6Spatrick else
17679c2e3e6Spatrick prev->next_node = p->next_node;
17779c2e3e6Spatrick p->next_node = 0;
178*8f1d5724Srobert void* ptr = p + 1;
179*8f1d5724Srobert assert(reinterpret_cast<size_t>(ptr) % RequiredAlignment == 0);
180*8f1d5724Srobert return ptr;
18179c2e3e6Spatrick }
18279c2e3e6Spatrick }
18379c2e3e6Spatrick return NULL; // couldn't find a spot big enough
18479c2e3e6Spatrick }
18579c2e3e6Spatrick
18679c2e3e6Spatrick // Return the start of the next block
after(struct heap_node * p)18779c2e3e6Spatrick heap_node* after(struct heap_node* p) { return p + p->len; }
18879c2e3e6Spatrick
fallback_free(void * ptr)18979c2e3e6Spatrick void fallback_free(void* ptr) {
19079c2e3e6Spatrick struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk
19179c2e3e6Spatrick struct heap_node *p, *prev;
19279c2e3e6Spatrick
19379c2e3e6Spatrick mutexor mtx(&heap_mutex);
19479c2e3e6Spatrick
19579c2e3e6Spatrick #ifdef DEBUG_FALLBACK_MALLOC
1964e0cc08cSpatrick std::printf("Freeing item at %d of size %d\n", offset_from_node(cp), cp->len);
19779c2e3e6Spatrick #endif
19879c2e3e6Spatrick
19979c2e3e6Spatrick for (p = freelist, prev = 0; p && p != list_end;
20079c2e3e6Spatrick prev = p, p = node_from_offset(p->next_node)) {
20179c2e3e6Spatrick #ifdef DEBUG_FALLBACK_MALLOC
2024e0cc08cSpatrick std::printf(" p=%d, cp=%d, after(p)=%d, after(cp)=%d\n",
2034e0cc08cSpatrick offset_from_node(p), offset_from_node(cp),
2044e0cc08cSpatrick offset_from_node(after(p)), offset_from_node(after(cp)));
20579c2e3e6Spatrick #endif
20679c2e3e6Spatrick if (after(p) == cp) {
20779c2e3e6Spatrick #ifdef DEBUG_FALLBACK_MALLOC
2084e0cc08cSpatrick std::printf(" Appending onto chunk at %d\n", offset_from_node(p));
20979c2e3e6Spatrick #endif
21079c2e3e6Spatrick p->len = static_cast<heap_size>(
21179c2e3e6Spatrick p->len + cp->len); // make the free heap_node larger
21279c2e3e6Spatrick return;
21379c2e3e6Spatrick } else if (after(cp) == p) { // there's a free heap_node right after
21479c2e3e6Spatrick #ifdef DEBUG_FALLBACK_MALLOC
2154e0cc08cSpatrick std::printf(" Appending free chunk at %d\n", offset_from_node(p));
21679c2e3e6Spatrick #endif
21779c2e3e6Spatrick cp->len = static_cast<heap_size>(cp->len + p->len);
21879c2e3e6Spatrick if (prev == 0) {
21979c2e3e6Spatrick freelist = cp;
22079c2e3e6Spatrick cp->next_node = p->next_node;
22179c2e3e6Spatrick } else
22279c2e3e6Spatrick prev->next_node = offset_from_node(cp);
22379c2e3e6Spatrick return;
22479c2e3e6Spatrick }
22579c2e3e6Spatrick }
22679c2e3e6Spatrick // Nothing to merge with, add it to the start of the free list
22779c2e3e6Spatrick #ifdef DEBUG_FALLBACK_MALLOC
2284e0cc08cSpatrick std::printf(" Making new free list entry %d\n", offset_from_node(cp));
22979c2e3e6Spatrick #endif
23079c2e3e6Spatrick cp->next_node = offset_from_node(freelist);
23179c2e3e6Spatrick freelist = cp;
23279c2e3e6Spatrick }
23379c2e3e6Spatrick
23479c2e3e6Spatrick #ifdef INSTRUMENT_FALLBACK_MALLOC
print_free_list()23579c2e3e6Spatrick size_t print_free_list() {
23679c2e3e6Spatrick struct heap_node *p, *prev;
23779c2e3e6Spatrick heap_size total_free = 0;
23879c2e3e6Spatrick if (NULL == freelist)
23979c2e3e6Spatrick init_heap();
24079c2e3e6Spatrick
24179c2e3e6Spatrick for (p = freelist, prev = 0; p && p != list_end;
24279c2e3e6Spatrick prev = p, p = node_from_offset(p->next_node)) {
2434e0cc08cSpatrick std::printf("%sOffset: %d\tsize: %d Next: %d\n",
2444e0cc08cSpatrick (prev == 0 ? "" : " "), offset_from_node(p), p->len, p->next_node);
24579c2e3e6Spatrick total_free += p->len;
24679c2e3e6Spatrick }
2474e0cc08cSpatrick std::printf("Total Free space: %d\n", total_free);
24879c2e3e6Spatrick return total_free;
24979c2e3e6Spatrick }
25079c2e3e6Spatrick #endif
25179c2e3e6Spatrick } // end unnamed namespace
25279c2e3e6Spatrick
25379c2e3e6Spatrick namespace __cxxabiv1 {
25479c2e3e6Spatrick
25579c2e3e6Spatrick struct __attribute__((aligned)) __aligned_type {};
25679c2e3e6Spatrick
__aligned_malloc_with_fallback(size_t size)25779c2e3e6Spatrick void* __aligned_malloc_with_fallback(size_t size) {
25879c2e3e6Spatrick #if defined(_WIN32)
2594e0cc08cSpatrick if (void* dest = std::__libcpp_aligned_alloc(alignof(__aligned_type), size))
26079c2e3e6Spatrick return dest;
26179c2e3e6Spatrick #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
26279c2e3e6Spatrick if (void* dest = ::malloc(size))
26379c2e3e6Spatrick return dest;
26479c2e3e6Spatrick #else
26579c2e3e6Spatrick if (size == 0)
26679c2e3e6Spatrick size = 1;
2674e0cc08cSpatrick if (void* dest = std::__libcpp_aligned_alloc(__alignof(__aligned_type), size))
26879c2e3e6Spatrick return dest;
26979c2e3e6Spatrick #endif
27079c2e3e6Spatrick return fallback_malloc(size);
27179c2e3e6Spatrick }
27279c2e3e6Spatrick
__calloc_with_fallback(size_t count,size_t size)27379c2e3e6Spatrick void* __calloc_with_fallback(size_t count, size_t size) {
27479c2e3e6Spatrick void* ptr = ::calloc(count, size);
27579c2e3e6Spatrick if (NULL != ptr)
27679c2e3e6Spatrick return ptr;
27779c2e3e6Spatrick // if calloc fails, fall back to emergency stash
27879c2e3e6Spatrick ptr = fallback_malloc(size * count);
27979c2e3e6Spatrick if (NULL != ptr)
28079c2e3e6Spatrick ::memset(ptr, 0, size * count);
28179c2e3e6Spatrick return ptr;
28279c2e3e6Spatrick }
28379c2e3e6Spatrick
__aligned_free_with_fallback(void * ptr)28479c2e3e6Spatrick void __aligned_free_with_fallback(void* ptr) {
28579c2e3e6Spatrick if (is_fallback_ptr(ptr))
28679c2e3e6Spatrick fallback_free(ptr);
28779c2e3e6Spatrick else {
2884e0cc08cSpatrick #if defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION)
28979c2e3e6Spatrick ::free(ptr);
2904e0cc08cSpatrick #else
2914e0cc08cSpatrick std::__libcpp_aligned_free(ptr);
29279c2e3e6Spatrick #endif
29379c2e3e6Spatrick }
29479c2e3e6Spatrick }
29579c2e3e6Spatrick
__free_with_fallback(void * ptr)29679c2e3e6Spatrick void __free_with_fallback(void* ptr) {
29779c2e3e6Spatrick if (is_fallback_ptr(ptr))
29879c2e3e6Spatrick fallback_free(ptr);
29979c2e3e6Spatrick else
30079c2e3e6Spatrick ::free(ptr);
30179c2e3e6Spatrick }
30279c2e3e6Spatrick
30379c2e3e6Spatrick } // namespace __cxxabiv1
304