1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #ifndef MALLOC_ELEM_H_ 6 #define MALLOC_ELEM_H_ 7 8 #include <stdbool.h> 9 10 #include <rte_common.h> 11 12 #define MIN_DATA_SIZE (RTE_CACHE_LINE_SIZE) 13 14 /* dummy definition of struct so we can use pointers to it in malloc_elem struct */ 15 struct malloc_heap; 16 17 enum elem_state { 18 ELEM_FREE = 0, 19 ELEM_BUSY, 20 ELEM_PAD /* element is a padding-only header */ 21 }; 22 23 struct malloc_elem { 24 struct malloc_heap *heap; 25 struct malloc_elem *volatile prev; 26 /**< points to prev elem in memseg */ 27 struct malloc_elem *volatile next; 28 /**< points to next elem in memseg */ 29 LIST_ENTRY(malloc_elem) free_list; 30 /**< list of free elements in heap */ 31 struct rte_memseg_list *msl; 32 /** Element state, @c dirty and @c pad validity depends on it. */ 33 /* An extra bit is needed to represent enum elem_state as signed int. */ 34 enum elem_state state : 3; 35 /** If state == ELEM_FREE: the memory is not filled with zeroes. */ 36 uint32_t dirty : 1; 37 /** Reserved for future use. */ 38 uint32_t reserved : 28; 39 uint32_t pad; 40 size_t size; 41 struct malloc_elem *orig_elem; 42 size_t orig_size; 43 #ifdef RTE_MALLOC_DEBUG 44 uint64_t header_cookie; /* Cookie marking start of data */ 45 /* trailer cookie at start + size */ 46 #endif 47 #ifdef RTE_MALLOC_ASAN 48 size_t user_size; 49 uint64_t asan_cookie[2]; /* must be next to header_cookie */ 50 #endif 51 } __rte_cache_aligned; 52 53 static const unsigned int MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); 54 55 #ifndef RTE_MALLOC_DEBUG 56 #ifdef RTE_MALLOC_ASAN 57 static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; 58 #else 59 static const unsigned int MALLOC_ELEM_TRAILER_LEN; 60 #endif 61 62 /* dummy function - just check if pointer is non-null */ 63 static inline int 64 malloc_elem_cookies_ok(const struct malloc_elem *elem){ return elem != NULL; } 65 66 /* dummy function - no header if malloc_debug is not enabled */ 67 static inline void 68 set_header(struct malloc_elem *elem __rte_unused){ } 69 70 /* dummy function - no trailer if malloc_debug is not enabled */ 71 static inline void 72 set_trailer(struct malloc_elem *elem __rte_unused){ } 73 74 75 #else 76 static const unsigned int MALLOC_ELEM_TRAILER_LEN = RTE_CACHE_LINE_SIZE; 77 78 #define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */ 79 #define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ 80 81 /* define macros to make referencing the header and trailer cookies easier */ 82 #define MALLOC_ELEM_TRAILER(elem) (*((uint64_t*)RTE_PTR_ADD(elem, \ 83 elem->size - MALLOC_ELEM_TRAILER_LEN))) 84 #define MALLOC_ELEM_HEADER(elem) (elem->header_cookie) 85 86 static inline void 87 set_header(struct malloc_elem *elem) 88 { 89 if (elem != NULL) 90 MALLOC_ELEM_HEADER(elem) = MALLOC_HEADER_COOKIE; 91 } 92 93 static inline void 94 set_trailer(struct malloc_elem *elem) 95 { 96 if (elem != NULL) 97 MALLOC_ELEM_TRAILER(elem) = MALLOC_TRAILER_COOKIE; 98 } 99 100 /* check that the header and trailer cookies are set correctly */ 101 static inline int 102 malloc_elem_cookies_ok(const struct malloc_elem *elem) 103 { 104 return elem != NULL && 105 MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE && 106 MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE; 107 } 108 109 #endif 110 111 #define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN) 112 113 #ifdef RTE_MALLOC_ASAN 114 115 /* 116 * ASAN_SHADOW_OFFSET should match to the corresponding 117 * value defined in gcc/libsanitizer/asan/asan_mapping.h 118 */ 119 #ifdef RTE_ARCH_X86_64 120 #define ASAN_SHADOW_OFFSET 0x00007fff8000 121 #elif defined(RTE_ARCH_ARM64) 122 #define ASAN_SHADOW_OFFSET 0x001000000000 123 #elif defined(RTE_ARCH_PPC_64) 124 #define ASAN_SHADOW_OFFSET 0x020000000000 125 #endif 126 127 #define ASAN_SHADOW_GRAIN_SIZE 8 128 #define ASAN_MEM_FREE_FLAG 0xfd 129 #define ASAN_MEM_REDZONE_FLAG 0xfa 130 #define ASAN_SHADOW_SCALE 3 131 132 #define ASAN_MEM_SHIFT(mem) ((void *)((uintptr_t)(mem) >> ASAN_SHADOW_SCALE)) 133 #define ASAN_MEM_TO_SHADOW(mem) \ 134 RTE_PTR_ADD(ASAN_MEM_SHIFT(mem), ASAN_SHADOW_OFFSET) 135 136 __rte_no_asan 137 static inline void 138 asan_set_shadow(void *addr, char val) 139 { 140 *(char *)addr = val; 141 } 142 143 static inline void 144 asan_set_zone(void *ptr, size_t len, uint32_t val) 145 { 146 size_t offset, i; 147 void *shadow; 148 size_t zone_len = len / ASAN_SHADOW_GRAIN_SIZE; 149 if (len % ASAN_SHADOW_GRAIN_SIZE != 0) 150 zone_len += 1; 151 152 for (i = 0; i < zone_len; i++) { 153 offset = i * ASAN_SHADOW_GRAIN_SIZE; 154 shadow = ASAN_MEM_TO_SHADOW((uintptr_t)ptr + offset); 155 asan_set_shadow(shadow, val); 156 } 157 } 158 159 /* 160 * When the memory is released, the release mark is 161 * set in the corresponding range of the shadow area. 162 */ 163 static inline void 164 asan_set_freezone(void *ptr, size_t size) 165 { 166 asan_set_zone(ptr, size, ASAN_MEM_FREE_FLAG); 167 } 168 169 /* 170 * When the memory is allocated, memory state must set as accessible. 171 */ 172 static inline void 173 asan_clear_alloczone(struct malloc_elem *elem) 174 { 175 asan_set_zone((void *)elem, elem->size, 0x0); 176 } 177 178 static inline void 179 asan_clear_split_alloczone(struct malloc_elem *elem) 180 { 181 void *ptr = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN); 182 asan_set_zone(ptr, MALLOC_ELEM_OVERHEAD, 0x0); 183 } 184 185 /* 186 * When the memory is allocated, the memory boundary is 187 * marked in the corresponding range of the shadow area. 188 * Requirement: redzone >= 16, is a power of two. 189 */ 190 static inline void 191 asan_set_redzone(struct malloc_elem *elem, size_t user_size) 192 { 193 uintptr_t head_redzone; 194 uintptr_t tail_redzone; 195 void *front_shadow; 196 void *tail_shadow; 197 uint32_t val; 198 199 if (elem != NULL) { 200 if (elem->state != ELEM_PAD) 201 elem = RTE_PTR_ADD(elem, elem->pad); 202 203 elem->user_size = user_size; 204 205 /* Set mark before the start of the allocated memory */ 206 head_redzone = (uintptr_t)RTE_PTR_ADD(elem, 207 MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); 208 front_shadow = ASAN_MEM_TO_SHADOW(head_redzone); 209 asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); 210 front_shadow = ASAN_MEM_TO_SHADOW(head_redzone 211 - ASAN_SHADOW_GRAIN_SIZE); 212 asan_set_shadow(front_shadow, ASAN_MEM_REDZONE_FLAG); 213 214 /* Set mark after the end of the allocated memory */ 215 tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, 216 MALLOC_ELEM_HEADER_LEN 217 + elem->user_size); 218 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); 219 val = (tail_redzone % ASAN_SHADOW_GRAIN_SIZE); 220 val = (val == 0) ? ASAN_MEM_REDZONE_FLAG : val; 221 asan_set_shadow(tail_shadow, val); 222 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone 223 + ASAN_SHADOW_GRAIN_SIZE); 224 asan_set_shadow(tail_shadow, ASAN_MEM_REDZONE_FLAG); 225 } 226 } 227 228 /* 229 * When the memory is released, the mark of the memory boundary 230 * in the corresponding range of the shadow area is cleared. 231 * Requirement: redzone >= 16, is a power of two. 232 */ 233 static inline void 234 asan_clear_redzone(struct malloc_elem *elem) 235 { 236 uintptr_t head_redzone; 237 uintptr_t tail_redzone; 238 void *head_shadow; 239 void *tail_shadow; 240 241 if (elem != NULL) { 242 elem = RTE_PTR_ADD(elem, elem->pad); 243 244 /* Clear mark before the start of the allocated memory */ 245 head_redzone = (uintptr_t)RTE_PTR_ADD(elem, 246 MALLOC_ELEM_HEADER_LEN - ASAN_SHADOW_GRAIN_SIZE); 247 head_shadow = ASAN_MEM_TO_SHADOW(head_redzone); 248 asan_set_shadow(head_shadow, 0x00); 249 head_shadow = ASAN_MEM_TO_SHADOW(head_redzone 250 - ASAN_SHADOW_GRAIN_SIZE); 251 asan_set_shadow(head_shadow, 0x00); 252 253 /* Clear mark after the end of the allocated memory */ 254 tail_redzone = (uintptr_t)RTE_PTR_ADD(elem, 255 MALLOC_ELEM_HEADER_LEN + elem->user_size); 256 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone); 257 asan_set_shadow(tail_shadow, 0x00); 258 tail_shadow = ASAN_MEM_TO_SHADOW(tail_redzone 259 + ASAN_SHADOW_GRAIN_SIZE); 260 asan_set_shadow(tail_shadow, 0x00); 261 } 262 } 263 264 static inline size_t 265 old_malloc_size(struct malloc_elem *elem) 266 { 267 if (elem->state != ELEM_PAD) 268 elem = RTE_PTR_ADD(elem, elem->pad); 269 270 return elem->user_size; 271 } 272 273 #else /* !RTE_MALLOC_ASAN */ 274 275 static inline void 276 asan_set_zone(void *ptr __rte_unused, size_t len __rte_unused, 277 uint32_t val __rte_unused) { } 278 279 static inline void 280 asan_set_freezone(void *ptr __rte_unused, size_t size __rte_unused) { } 281 282 static inline void 283 asan_clear_alloczone(struct malloc_elem *elem __rte_unused) { } 284 285 static inline void 286 asan_clear_split_alloczone(struct malloc_elem *elem __rte_unused) { } 287 288 static inline void 289 asan_set_redzone(struct malloc_elem *elem __rte_unused, 290 size_t user_size __rte_unused) { } 291 292 static inline void 293 asan_clear_redzone(struct malloc_elem *elem __rte_unused) { } 294 295 static inline size_t 296 old_malloc_size(struct malloc_elem *elem) 297 { 298 return elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; 299 } 300 #endif /* !RTE_MALLOC_ASAN */ 301 302 /* 303 * Given a pointer to the start of a memory block returned by malloc, get 304 * the actual malloc_elem header for that block. 305 */ 306 static inline struct malloc_elem * 307 malloc_elem_from_data(const void *data) 308 { 309 if (data == NULL) 310 return NULL; 311 312 struct malloc_elem *elem = RTE_PTR_SUB(data, MALLOC_ELEM_HEADER_LEN); 313 if (!malloc_elem_cookies_ok(elem)) 314 return NULL; 315 return elem->state != ELEM_PAD ? elem: RTE_PTR_SUB(elem, elem->pad); 316 } 317 318 /* 319 * initialise a malloc_elem header 320 */ 321 void 322 malloc_elem_init(struct malloc_elem *elem, 323 struct malloc_heap *heap, 324 struct rte_memseg_list *msl, 325 size_t size, 326 struct malloc_elem *orig_elem, 327 size_t orig_size, 328 bool dirty); 329 330 void 331 malloc_elem_insert(struct malloc_elem *elem); 332 333 /* 334 * return true if the current malloc_elem can hold a block of data 335 * of the requested size and with the requested alignment 336 */ 337 int 338 malloc_elem_can_hold(struct malloc_elem *elem, size_t size, 339 unsigned int align, size_t bound, bool contig); 340 341 /* 342 * reserve a block of data in an existing malloc_elem. If the malloc_elem 343 * is much larger than the data block requested, we split the element in two. 344 */ 345 struct malloc_elem * 346 malloc_elem_alloc(struct malloc_elem *elem, size_t size, 347 unsigned int align, size_t bound, bool contig); 348 349 /* 350 * free a malloc_elem block by adding it to the free list. If the 351 * blocks either immediately before or immediately after newly freed block 352 * are also free, the blocks are merged together. 353 */ 354 struct malloc_elem * 355 malloc_elem_free(struct malloc_elem *elem); 356 357 struct malloc_elem * 358 malloc_elem_join_adjacent_free(struct malloc_elem *elem); 359 360 /* 361 * attempt to resize a malloc_elem by expanding into any free space 362 * immediately after it in memory. 363 */ 364 int 365 malloc_elem_resize(struct malloc_elem *elem, size_t size); 366 367 void 368 malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len); 369 370 void 371 malloc_elem_free_list_remove(struct malloc_elem *elem); 372 373 /* 374 * dump contents of malloc elem to a file. 375 */ 376 void 377 malloc_elem_dump(const struct malloc_elem *elem, FILE *f); 378 379 /* 380 * Given an element size, compute its freelist index. 381 */ 382 size_t 383 malloc_elem_free_list_index(size_t size); 384 385 /* 386 * Add element to its heap's free list. 387 */ 388 void 389 malloc_elem_free_list_insert(struct malloc_elem *elem); 390 391 /* 392 * Find biggest IOVA-contiguous zone within an element with specified alignment. 393 */ 394 size_t 395 malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align); 396 397 #endif /* MALLOC_ELEM_H_ */ 398