xref: /llvm-project/llvm/lib/Support/rpmalloc/rpmalloc.h (revision 67226bad150785f64efcf53c79b7785d421fc8eb)
1*67226badSAlexandre Ganea //===---------------------- rpmalloc.h ------------------*- C -*-=============//
2*67226badSAlexandre Ganea //
3*67226badSAlexandre Ganea // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*67226badSAlexandre Ganea // See https://llvm.org/LICENSE.txt for license information.
5*67226badSAlexandre Ganea // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*67226badSAlexandre Ganea //
7*67226badSAlexandre Ganea //===----------------------------------------------------------------------===//
8*67226badSAlexandre Ganea //
9*67226badSAlexandre Ganea // This library provides a cross-platform lock free thread caching malloc
10*67226badSAlexandre Ganea // implementation in C11.
11*67226badSAlexandre Ganea //
12*67226badSAlexandre Ganea //===----------------------------------------------------------------------===//
13*67226badSAlexandre Ganea 
14*67226badSAlexandre Ganea #pragma once
15*67226badSAlexandre Ganea 
16*67226badSAlexandre Ganea #include <stddef.h>
17*67226badSAlexandre Ganea 
18*67226badSAlexandre Ganea #ifdef __cplusplus
19*67226badSAlexandre Ganea extern "C" {
20*67226badSAlexandre Ganea #endif
21*67226badSAlexandre Ganea 
22*67226badSAlexandre Ganea #if defined(__clang__) || defined(__GNUC__)
23*67226badSAlexandre Ganea #define RPMALLOC_EXPORT __attribute__((visibility("default")))
24*67226badSAlexandre Ganea #define RPMALLOC_ALLOCATOR
25*67226badSAlexandre Ganea #if (defined(__clang_major__) && (__clang_major__ < 4)) ||                     \
26*67226badSAlexandre Ganea     (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD)
27*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_MALLOC
28*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
29*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
30*67226badSAlexandre Ganea #else
31*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
32*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
33*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)                               \
34*67226badSAlexandre Ganea   __attribute__((alloc_size(count, size)))
35*67226badSAlexandre Ganea #endif
36*67226badSAlexandre Ganea #define RPMALLOC_CDECL
37*67226badSAlexandre Ganea #elif defined(_MSC_VER)
38*67226badSAlexandre Ganea #define RPMALLOC_EXPORT
39*67226badSAlexandre Ganea #define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
40*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_MALLOC
41*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
42*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
43*67226badSAlexandre Ganea #define RPMALLOC_CDECL __cdecl
44*67226badSAlexandre Ganea #else
45*67226badSAlexandre Ganea #define RPMALLOC_EXPORT
46*67226badSAlexandre Ganea #define RPMALLOC_ALLOCATOR
47*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_MALLOC
48*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
49*67226badSAlexandre Ganea #define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
50*67226badSAlexandre Ganea #define RPMALLOC_CDECL
51*67226badSAlexandre Ganea #endif
52*67226badSAlexandre Ganea 
53*67226badSAlexandre Ganea //! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce
54*67226badSAlexandre Ganea //  a very small overhead due to some size calculations not being compile time
55*67226badSAlexandre Ganea //  constants
56*67226badSAlexandre Ganea #ifndef RPMALLOC_CONFIGURABLE
57*67226badSAlexandre Ganea #define RPMALLOC_CONFIGURABLE 0
58*67226badSAlexandre Ganea #endif
59*67226badSAlexandre Ganea 
60*67226badSAlexandre Ganea //! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_*
61*67226badSAlexandre Ganea //! functions).
62*67226badSAlexandre Ganea //  Will introduce a very small overhead to track fully allocated spans in heaps
63*67226badSAlexandre Ganea #ifndef RPMALLOC_FIRST_CLASS_HEAPS
64*67226badSAlexandre Ganea #define RPMALLOC_FIRST_CLASS_HEAPS 0
65*67226badSAlexandre Ganea #endif
66*67226badSAlexandre Ganea 
67*67226badSAlexandre Ganea //! Flag to rpaligned_realloc to not preserve content in reallocation
68*67226badSAlexandre Ganea #define RPMALLOC_NO_PRESERVE 1
69*67226badSAlexandre Ganea //! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be
70*67226badSAlexandre Ganea //! done in-place,
71*67226badSAlexandre Ganea //  in which case the original pointer is still valid (just like a call to
72*67226badSAlexandre Ganea //  realloc which failes to allocate a new block).
73*67226badSAlexandre Ganea #define RPMALLOC_GROW_OR_FAIL 2
74*67226badSAlexandre Ganea 
75*67226badSAlexandre Ganea typedef struct rpmalloc_global_statistics_t {
76*67226badSAlexandre Ganea   //! Current amount of virtual memory mapped, all of which might not have been
77*67226badSAlexandre Ganea   //! committed (only if ENABLE_STATISTICS=1)
78*67226badSAlexandre Ganea   size_t mapped;
79*67226badSAlexandre Ganea   //! Peak amount of virtual memory mapped, all of which might not have been
80*67226badSAlexandre Ganea   //! committed (only if ENABLE_STATISTICS=1)
81*67226badSAlexandre Ganea   size_t mapped_peak;
82*67226badSAlexandre Ganea   //! Current amount of memory in global caches for small and medium sizes
83*67226badSAlexandre Ganea   //! (<32KiB)
84*67226badSAlexandre Ganea   size_t cached;
85*67226badSAlexandre Ganea   //! Current amount of memory allocated in huge allocations, i.e larger than
86*67226badSAlexandre Ganea   //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
87*67226badSAlexandre Ganea   size_t huge_alloc;
88*67226badSAlexandre Ganea   //! Peak amount of memory allocated in huge allocations, i.e larger than
89*67226badSAlexandre Ganea   //! LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
90*67226badSAlexandre Ganea   size_t huge_alloc_peak;
91*67226badSAlexandre Ganea   //! Total amount of memory mapped since initialization (only if
92*67226badSAlexandre Ganea   //! ENABLE_STATISTICS=1)
93*67226badSAlexandre Ganea   size_t mapped_total;
94*67226badSAlexandre Ganea   //! Total amount of memory unmapped since initialization  (only if
95*67226badSAlexandre Ganea   //! ENABLE_STATISTICS=1)
96*67226badSAlexandre Ganea   size_t unmapped_total;
97*67226badSAlexandre Ganea } rpmalloc_global_statistics_t;
98*67226badSAlexandre Ganea 
99*67226badSAlexandre Ganea typedef struct rpmalloc_thread_statistics_t {
100*67226badSAlexandre Ganea   //! Current number of bytes available in thread size class caches for small
101*67226badSAlexandre Ganea   //! and medium sizes (<32KiB)
102*67226badSAlexandre Ganea   size_t sizecache;
103*67226badSAlexandre Ganea   //! Current number of bytes available in thread span caches for small and
104*67226badSAlexandre Ganea   //! medium sizes (<32KiB)
105*67226badSAlexandre Ganea   size_t spancache;
106*67226badSAlexandre Ganea   //! Total number of bytes transitioned from thread cache to global cache (only
107*67226badSAlexandre Ganea   //! if ENABLE_STATISTICS=1)
108*67226badSAlexandre Ganea   size_t thread_to_global;
109*67226badSAlexandre Ganea   //! Total number of bytes transitioned from global cache to thread cache (only
110*67226badSAlexandre Ganea   //! if ENABLE_STATISTICS=1)
111*67226badSAlexandre Ganea   size_t global_to_thread;
112*67226badSAlexandre Ganea   //! Per span count statistics (only if ENABLE_STATISTICS=1)
113*67226badSAlexandre Ganea   struct {
114*67226badSAlexandre Ganea     //! Currently used number of spans
115*67226badSAlexandre Ganea     size_t current;
116*67226badSAlexandre Ganea     //! High water mark of spans used
117*67226badSAlexandre Ganea     size_t peak;
118*67226badSAlexandre Ganea     //! Number of spans transitioned to global cache
119*67226badSAlexandre Ganea     size_t to_global;
120*67226badSAlexandre Ganea     //! Number of spans transitioned from global cache
121*67226badSAlexandre Ganea     size_t from_global;
122*67226badSAlexandre Ganea     //! Number of spans transitioned to thread cache
123*67226badSAlexandre Ganea     size_t to_cache;
124*67226badSAlexandre Ganea     //! Number of spans transitioned from thread cache
125*67226badSAlexandre Ganea     size_t from_cache;
126*67226badSAlexandre Ganea     //! Number of spans transitioned to reserved state
127*67226badSAlexandre Ganea     size_t to_reserved;
128*67226badSAlexandre Ganea     //! Number of spans transitioned from reserved state
129*67226badSAlexandre Ganea     size_t from_reserved;
130*67226badSAlexandre Ganea     //! Number of raw memory map calls (not hitting the reserve spans but
131*67226badSAlexandre Ganea     //! resulting in actual OS mmap calls)
132*67226badSAlexandre Ganea     size_t map_calls;
133*67226badSAlexandre Ganea   } span_use[64];
134*67226badSAlexandre Ganea   //! Per size class statistics (only if ENABLE_STATISTICS=1)
135*67226badSAlexandre Ganea   struct {
136*67226badSAlexandre Ganea     //! Current number of allocations
137*67226badSAlexandre Ganea     size_t alloc_current;
138*67226badSAlexandre Ganea     //! Peak number of allocations
139*67226badSAlexandre Ganea     size_t alloc_peak;
140*67226badSAlexandre Ganea     //! Total number of allocations
141*67226badSAlexandre Ganea     size_t alloc_total;
142*67226badSAlexandre Ganea     //! Total number of frees
143*67226badSAlexandre Ganea     size_t free_total;
144*67226badSAlexandre Ganea     //! Number of spans transitioned to cache
145*67226badSAlexandre Ganea     size_t spans_to_cache;
146*67226badSAlexandre Ganea     //! Number of spans transitioned from cache
147*67226badSAlexandre Ganea     size_t spans_from_cache;
148*67226badSAlexandre Ganea     //! Number of spans transitioned from reserved state
149*67226badSAlexandre Ganea     size_t spans_from_reserved;
150*67226badSAlexandre Ganea     //! Number of raw memory map calls (not hitting the reserve spans but
151*67226badSAlexandre Ganea     //! resulting in actual OS mmap calls)
152*67226badSAlexandre Ganea     size_t map_calls;
153*67226badSAlexandre Ganea   } size_use[128];
154*67226badSAlexandre Ganea } rpmalloc_thread_statistics_t;
155*67226badSAlexandre Ganea 
156*67226badSAlexandre Ganea typedef struct rpmalloc_config_t {
157*67226badSAlexandre Ganea   //! Map memory pages for the given number of bytes. The returned address MUST
158*67226badSAlexandre Ganea   //! be
159*67226badSAlexandre Ganea   //  aligned to the rpmalloc span size, which will always be a power of two.
160*67226badSAlexandre Ganea   //  Optionally the function can store an alignment offset in the offset
161*67226badSAlexandre Ganea   //  variable in case it performs alignment and the returned pointer is offset
162*67226badSAlexandre Ganea   //  from the actual start of the memory region due to this alignment. The
163*67226badSAlexandre Ganea   //  alignment offset will be passed to the memory unmap function. The
164*67226badSAlexandre Ganea   //  alignment offset MUST NOT be larger than 65535 (storable in an uint16_t),
165*67226badSAlexandre Ganea   //  if it is you must use natural alignment to shift it into 16 bits. If you
166*67226badSAlexandre Ganea   //  set a memory_map function, you must also set a memory_unmap function or
167*67226badSAlexandre Ganea   //  else the default implementation will be used for both. This function must
168*67226badSAlexandre Ganea   //  be thread safe, it can be called by multiple threads simultaneously.
169*67226badSAlexandre Ganea   void *(*memory_map)(size_t size, size_t *offset);
170*67226badSAlexandre Ganea   //! Unmap the memory pages starting at address and spanning the given number
171*67226badSAlexandre Ganea   //! of bytes.
172*67226badSAlexandre Ganea   //  If release is set to non-zero, the unmap is for an entire span range as
173*67226badSAlexandre Ganea   //  returned by a previous call to memory_map and that the entire range should
174*67226badSAlexandre Ganea   //  be released. The release argument holds the size of the entire span range.
175*67226badSAlexandre Ganea   //  If release is set to 0, the unmap is a partial decommit of a subset of the
176*67226badSAlexandre Ganea   //  mapped memory range. If you set a memory_unmap function, you must also set
177*67226badSAlexandre Ganea   //  a memory_map function or else the default implementation will be used for
178*67226badSAlexandre Ganea   //  both. This function must be thread safe, it can be called by multiple
179*67226badSAlexandre Ganea   //  threads simultaneously.
180*67226badSAlexandre Ganea   void (*memory_unmap)(void *address, size_t size, size_t offset,
181*67226badSAlexandre Ganea                        size_t release);
182*67226badSAlexandre Ganea   //! Called when an assert fails, if asserts are enabled. Will use the standard
183*67226badSAlexandre Ganea   //! assert()
184*67226badSAlexandre Ganea   //  if this is not set.
185*67226badSAlexandre Ganea   void (*error_callback)(const char *message);
186*67226badSAlexandre Ganea   //! Called when a call to map memory pages fails (out of memory). If this
187*67226badSAlexandre Ganea   //! callback is
188*67226badSAlexandre Ganea   //  not set or returns zero the library will return a null pointer in the
189*67226badSAlexandre Ganea   //  allocation call. If this callback returns non-zero the map call will be
190*67226badSAlexandre Ganea   //  retried. The argument passed is the number of bytes that was requested in
191*67226badSAlexandre Ganea   //  the map call. Only used if the default system memory map function is used
192*67226badSAlexandre Ganea   //  (memory_map callback is not set).
193*67226badSAlexandre Ganea   int (*map_fail_callback)(size_t size);
194*67226badSAlexandre Ganea   //! Size of memory pages. The page size MUST be a power of two. All memory
195*67226badSAlexandre Ganea   //! mapping
196*67226badSAlexandre Ganea   //  requests to memory_map will be made with size set to a multiple of the
197*67226badSAlexandre Ganea   //  page size. Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system
198*67226badSAlexandre Ganea   //  page size is used.
199*67226badSAlexandre Ganea   size_t page_size;
200*67226badSAlexandre Ganea   //! Size of a span of memory blocks. MUST be a power of two, and in
201*67226badSAlexandre Ganea   //! [4096,262144]
202*67226badSAlexandre Ganea   //  range (unless 0 - set to 0 to use the default span size). Used if
203*67226badSAlexandre Ganea   //  RPMALLOC_CONFIGURABLE is defined to 1.
204*67226badSAlexandre Ganea   size_t span_size;
205*67226badSAlexandre Ganea   //! Number of spans to map at each request to map new virtual memory blocks.
206*67226badSAlexandre Ganea   //! This can
207*67226badSAlexandre Ganea   //  be used to minimize the system call overhead at the cost of virtual memory
208*67226badSAlexandre Ganea   //  address space. The extra mapped pages will not be written until actually
209*67226badSAlexandre Ganea   //  used, so physical committed memory should not be affected in the default
210*67226badSAlexandre Ganea   //  implementation. Will be aligned to a multiple of spans that match memory
211*67226badSAlexandre Ganea   //  page size in case of huge pages.
212*67226badSAlexandre Ganea   size_t span_map_count;
213*67226badSAlexandre Ganea   //! Enable use of large/huge pages. If this flag is set to non-zero and page
214*67226badSAlexandre Ganea   //! size is
215*67226badSAlexandre Ganea   //  zero, the allocator will try to enable huge pages and auto detect the
216*67226badSAlexandre Ganea   //  configuration. If this is set to non-zero and page_size is also non-zero,
217*67226badSAlexandre Ganea   //  the allocator will assume huge pages have been configured and enabled
218*67226badSAlexandre Ganea   //  prior to initializing the allocator. For Windows, see
219*67226badSAlexandre Ganea   //  https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
220*67226badSAlexandre Ganea   //  For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
221*67226badSAlexandre Ganea   int enable_huge_pages;
222*67226badSAlexandre Ganea   //! Respectively allocated pages and huge allocated pages names for systems
223*67226badSAlexandre Ganea   //  supporting it to be able to distinguish among anonymous regions.
224*67226badSAlexandre Ganea   const char *page_name;
225*67226badSAlexandre Ganea   const char *huge_page_name;
226*67226badSAlexandre Ganea } rpmalloc_config_t;
227*67226badSAlexandre Ganea 
228*67226badSAlexandre Ganea //! Initialize allocator with default configuration
229*67226badSAlexandre Ganea RPMALLOC_EXPORT int rpmalloc_initialize(void);
230*67226badSAlexandre Ganea 
231*67226badSAlexandre Ganea //! Initialize allocator with given configuration
232*67226badSAlexandre Ganea RPMALLOC_EXPORT int rpmalloc_initialize_config(const rpmalloc_config_t *config);
233*67226badSAlexandre Ganea 
234*67226badSAlexandre Ganea //! Get allocator configuration
235*67226badSAlexandre Ganea RPMALLOC_EXPORT const rpmalloc_config_t *rpmalloc_config(void);
236*67226badSAlexandre Ganea 
237*67226badSAlexandre Ganea //! Finalize allocator
238*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_finalize(void);
239*67226badSAlexandre Ganea 
240*67226badSAlexandre Ganea //! Initialize allocator for calling thread
241*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_thread_initialize(void);
242*67226badSAlexandre Ganea 
243*67226badSAlexandre Ganea //! Finalize allocator for calling thread
244*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_thread_finalize(int release_caches);
245*67226badSAlexandre Ganea 
246*67226badSAlexandre Ganea //! Perform deferred deallocations pending for the calling thread heap
247*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_thread_collect(void);
248*67226badSAlexandre Ganea 
249*67226badSAlexandre Ganea //! Query if allocator is initialized for calling thread
250*67226badSAlexandre Ganea RPMALLOC_EXPORT int rpmalloc_is_thread_initialized(void);
251*67226badSAlexandre Ganea 
252*67226badSAlexandre Ganea //! Get per-thread statistics
253*67226badSAlexandre Ganea RPMALLOC_EXPORT void
254*67226badSAlexandre Ganea rpmalloc_thread_statistics(rpmalloc_thread_statistics_t *stats);
255*67226badSAlexandre Ganea 
256*67226badSAlexandre Ganea //! Get global statistics
257*67226badSAlexandre Ganea RPMALLOC_EXPORT void
258*67226badSAlexandre Ganea rpmalloc_global_statistics(rpmalloc_global_statistics_t *stats);
259*67226badSAlexandre Ganea 
260*67226badSAlexandre Ganea //! Dump all statistics in human readable format to file (should be a FILE*)
261*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_dump_statistics(void *file);
262*67226badSAlexandre Ganea 
263*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size
264*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
265*67226badSAlexandre Ganea rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
266*67226badSAlexandre Ganea 
267*67226badSAlexandre Ganea //! Free the given memory block
268*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpfree(void *ptr);
269*67226badSAlexandre Ganea 
270*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size and zero initialize it
271*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
272*67226badSAlexandre Ganea rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC
273*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
274*67226badSAlexandre Ganea 
275*67226badSAlexandre Ganea //! Reallocate the given block to at least the given size
276*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
277*67226badSAlexandre Ganea rprealloc(void *ptr, size_t size) RPMALLOC_ATTRIB_MALLOC
278*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(2);
279*67226badSAlexandre Ganea 
280*67226badSAlexandre Ganea //! Reallocate the given block to at least the given size and alignment,
281*67226badSAlexandre Ganea //  with optional control flags (see RPMALLOC_NO_PRESERVE).
282*67226badSAlexandre Ganea //  Alignment must be a power of two and a multiple of sizeof(void*),
283*67226badSAlexandre Ganea //  and should ideally be less than memory page size. A caveat of rpmalloc
284*67226badSAlexandre Ganea //  internals is that this must also be strictly less than the span size
285*67226badSAlexandre Ganea //  (default 64KiB)
286*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
287*67226badSAlexandre Ganea rpaligned_realloc(void *ptr, size_t alignment, size_t size, size_t oldsize,
288*67226badSAlexandre Ganea                   unsigned int flags) RPMALLOC_ATTRIB_MALLOC
289*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(3);
290*67226badSAlexandre Ganea 
291*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size and alignment.
292*67226badSAlexandre Ganea //  Alignment must be a power of two and a multiple of sizeof(void*),
293*67226badSAlexandre Ganea //  and should ideally be less than memory page size. A caveat of rpmalloc
294*67226badSAlexandre Ganea //  internals is that this must also be strictly less than the span size
295*67226badSAlexandre Ganea //  (default 64KiB)
296*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
297*67226badSAlexandre Ganea rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC
298*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(2);
299*67226badSAlexandre Ganea 
300*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size and alignment, and zero
301*67226badSAlexandre Ganea //! initialize it.
302*67226badSAlexandre Ganea //  Alignment must be a power of two and a multiple of sizeof(void*),
303*67226badSAlexandre Ganea //  and should ideally be less than memory page size. A caveat of rpmalloc
304*67226badSAlexandre Ganea //  internals is that this must also be strictly less than the span size
305*67226badSAlexandre Ganea //  (default 64KiB)
306*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
307*67226badSAlexandre Ganea rpaligned_calloc(size_t alignment, size_t num,
308*67226badSAlexandre Ganea                  size_t size) RPMALLOC_ATTRIB_MALLOC
309*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
310*67226badSAlexandre Ganea 
311*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size and alignment.
312*67226badSAlexandre Ganea //  Alignment must be a power of two and a multiple of sizeof(void*),
313*67226badSAlexandre Ganea //  and should ideally be less than memory page size. A caveat of rpmalloc
314*67226badSAlexandre Ganea //  internals is that this must also be strictly less than the span size
315*67226badSAlexandre Ganea //  (default 64KiB)
316*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
317*67226badSAlexandre Ganea rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC
318*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(2);
319*67226badSAlexandre Ganea 
320*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size and alignment.
321*67226badSAlexandre Ganea //  Alignment must be a power of two and a multiple of sizeof(void*),
322*67226badSAlexandre Ganea //  and should ideally be less than memory page size. A caveat of rpmalloc
323*67226badSAlexandre Ganea //  internals is that this must also be strictly less than the span size
324*67226badSAlexandre Ganea //  (default 64KiB)
325*67226badSAlexandre Ganea RPMALLOC_EXPORT int rpposix_memalign(void **memptr, size_t alignment,
326*67226badSAlexandre Ganea                                      size_t size);
327*67226badSAlexandre Ganea 
328*67226badSAlexandre Ganea //! Query the usable size of the given memory block (from given pointer to the
329*67226badSAlexandre Ganea //! end of block)
330*67226badSAlexandre Ganea RPMALLOC_EXPORT size_t rpmalloc_usable_size(void *ptr);
331*67226badSAlexandre Ganea 
332*67226badSAlexandre Ganea //! Dummy empty function for forcing linker symbol inclusion
333*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_linker_reference(void);
334*67226badSAlexandre Ganea 
335*67226badSAlexandre Ganea #if RPMALLOC_FIRST_CLASS_HEAPS
336*67226badSAlexandre Ganea 
337*67226badSAlexandre Ganea //! Heap type
338*67226badSAlexandre Ganea typedef struct heap_t rpmalloc_heap_t;
339*67226badSAlexandre Ganea 
340*67226badSAlexandre Ganea //! Acquire a new heap. Will reuse existing released heaps or allocate memory
341*67226badSAlexandre Ganea //! for a new heap
342*67226badSAlexandre Ganea //  if none available. Heap API is implemented with the strict assumption that
343*67226badSAlexandre Ganea //  only one single thread will call heap functions for a given heap at any
344*67226badSAlexandre Ganea //  given time, no functions are thread safe.
345*67226badSAlexandre Ganea RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_heap_acquire(void);
346*67226badSAlexandre Ganea 
347*67226badSAlexandre Ganea //! Release a heap (does NOT free the memory allocated by the heap, use
348*67226badSAlexandre Ganea //! rpmalloc_heap_free_all before destroying the heap).
349*67226badSAlexandre Ganea //  Releasing a heap will enable it to be reused by other threads. Safe to pass
350*67226badSAlexandre Ganea //  a null pointer.
351*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_heap_release(rpmalloc_heap_t *heap);
352*67226badSAlexandre Ganea 
353*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size using the given heap.
354*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
355*67226badSAlexandre Ganea rpmalloc_heap_alloc(rpmalloc_heap_t *heap, size_t size) RPMALLOC_ATTRIB_MALLOC
356*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(2);
357*67226badSAlexandre Ganea 
358*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size using the given heap. The
359*67226badSAlexandre Ganea //! returned
360*67226badSAlexandre Ganea //  block will have the requested alignment. Alignment must be a power of two
361*67226badSAlexandre Ganea //  and a multiple of sizeof(void*), and should ideally be less than memory page
362*67226badSAlexandre Ganea //  size. A caveat of rpmalloc internals is that this must also be strictly less
363*67226badSAlexandre Ganea //  than the span size (default 64KiB).
364*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
365*67226badSAlexandre Ganea rpmalloc_heap_aligned_alloc(rpmalloc_heap_t *heap, size_t alignment,
366*67226badSAlexandre Ganea                             size_t size) RPMALLOC_ATTRIB_MALLOC
367*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(3);
368*67226badSAlexandre Ganea 
369*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size using the given heap and
370*67226badSAlexandre Ganea //! zero initialize it.
371*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
372*67226badSAlexandre Ganea rpmalloc_heap_calloc(rpmalloc_heap_t *heap, size_t num,
373*67226badSAlexandre Ganea                      size_t size) RPMALLOC_ATTRIB_MALLOC
374*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
375*67226badSAlexandre Ganea 
376*67226badSAlexandre Ganea //! Allocate a memory block of at least the given size using the given heap and
377*67226badSAlexandre Ganea //! zero initialize it. The returned
378*67226badSAlexandre Ganea //  block will have the requested alignment. Alignment must either be zero, or a
379*67226badSAlexandre Ganea //  power of two and a multiple of sizeof(void*), and should ideally be less
380*67226badSAlexandre Ganea //  than memory page size. A caveat of rpmalloc internals is that this must also
381*67226badSAlexandre Ganea //  be strictly less than the span size (default 64KiB).
382*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
383*67226badSAlexandre Ganea rpmalloc_heap_aligned_calloc(rpmalloc_heap_t *heap, size_t alignment,
384*67226badSAlexandre Ganea                              size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC
385*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
386*67226badSAlexandre Ganea 
387*67226badSAlexandre Ganea //! Reallocate the given block to at least the given size. The memory block MUST
388*67226badSAlexandre Ganea //! be allocated
389*67226badSAlexandre Ganea //  by the same heap given to this function.
390*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *
391*67226badSAlexandre Ganea rpmalloc_heap_realloc(rpmalloc_heap_t *heap, void *ptr, size_t size,
392*67226badSAlexandre Ganea                       unsigned int flags) RPMALLOC_ATTRIB_MALLOC
393*67226badSAlexandre Ganea     RPMALLOC_ATTRIB_ALLOC_SIZE(3);
394*67226badSAlexandre Ganea 
395*67226badSAlexandre Ganea //! Reallocate the given block to at least the given size. The memory block MUST
396*67226badSAlexandre Ganea //! be allocated
397*67226badSAlexandre Ganea //  by the same heap given to this function. The returned block will have the
398*67226badSAlexandre Ganea //  requested alignment. Alignment must be either zero, or a power of two and a
399*67226badSAlexandre Ganea //  multiple of sizeof(void*), and should ideally be less than memory page size.
400*67226badSAlexandre Ganea //  A caveat of rpmalloc internals is that this must also be strictly less than
401*67226badSAlexandre Ganea //  the span size (default 64KiB).
402*67226badSAlexandre Ganea RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void *rpmalloc_heap_aligned_realloc(
403*67226badSAlexandre Ganea     rpmalloc_heap_t *heap, void *ptr, size_t alignment, size_t size,
404*67226badSAlexandre Ganea     unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
405*67226badSAlexandre Ganea 
406*67226badSAlexandre Ganea //! Free the given memory block from the given heap. The memory block MUST be
407*67226badSAlexandre Ganea //! allocated
408*67226badSAlexandre Ganea //  by the same heap given to this function.
409*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_heap_free(rpmalloc_heap_t *heap, void *ptr);
410*67226badSAlexandre Ganea 
411*67226badSAlexandre Ganea //! Free all memory allocated by the heap
412*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_heap_free_all(rpmalloc_heap_t *heap);
413*67226badSAlexandre Ganea 
414*67226badSAlexandre Ganea //! Set the given heap as the current heap for the calling thread. A heap MUST
415*67226badSAlexandre Ganea //! only be current heap
416*67226badSAlexandre Ganea //  for a single thread, a heap can never be shared between multiple threads.
417*67226badSAlexandre Ganea //  The previous current heap for the calling thread is released to be reused by
418*67226badSAlexandre Ganea //  other threads.
419*67226badSAlexandre Ganea RPMALLOC_EXPORT void rpmalloc_heap_thread_set_current(rpmalloc_heap_t *heap);
420*67226badSAlexandre Ganea 
421*67226badSAlexandre Ganea //! Returns which heap the given pointer is allocated on
422*67226badSAlexandre Ganea RPMALLOC_EXPORT rpmalloc_heap_t *rpmalloc_get_heap_for_ptr(void *ptr);
423*67226badSAlexandre Ganea 
424*67226badSAlexandre Ganea #endif
425*67226badSAlexandre Ganea 
426*67226badSAlexandre Ganea #ifdef __cplusplus
427*67226badSAlexandre Ganea }
428*67226badSAlexandre Ganea #endif
429