xref: /dpdk/lib/eal/common/malloc_elem.c (revision ae67895b507bb6af22263c79ba0d5c374b396485)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <inttypes.h>
5 #include <stdint.h>
6 #include <stddef.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 
11 #include <rte_memory.h>
12 #include <rte_eal.h>
13 #include <rte_common.h>
14 
15 #include "eal_private.h"
16 #include "eal_internal_cfg.h"
17 #include "eal_memalloc.h"
18 #include "malloc_elem.h"
19 #include "malloc_heap.h"
20 
21 /*
22  * If debugging is enabled, freed memory is set to poison value
23  * to catch buggy programs. Otherwise, freed memory is set to zero
24  * to avoid having to zero in zmalloc
25  */
26 #ifdef RTE_MALLOC_DEBUG
27 #define MALLOC_POISON	       0x6b
28 #else
29 #define MALLOC_POISON	       0
30 #endif
31 
32 size_t
malloc_elem_find_max_iova_contig(struct malloc_elem * elem,size_t align)33 malloc_elem_find_max_iova_contig(struct malloc_elem *elem, size_t align)
34 {
35 	void *cur_page, *contig_seg_start, *page_end, *cur_seg_end;
36 	void *data_start, *data_end;
37 	rte_iova_t expected_iova;
38 	struct rte_memseg *ms;
39 	size_t page_sz, cur, max;
40 	const struct internal_config *internal_conf =
41 		eal_get_internal_configuration();
42 
43 	page_sz = (size_t)elem->msl->page_sz;
44 	data_start = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
45 	data_end = RTE_PTR_ADD(elem, elem->size - MALLOC_ELEM_TRAILER_LEN);
46 	/* segment must start after header and with specified alignment */
47 	contig_seg_start = RTE_PTR_ALIGN_CEIL(data_start, align);
48 
49 	/* return if aligned address is already out of malloc element */
50 	if (contig_seg_start > data_end)
51 		return 0;
52 
53 	/* if we're in IOVA as VA mode, or if we're in legacy mode with
54 	 * hugepages, all elements are IOVA-contiguous. however, we can only
55 	 * make these assumptions about internal memory - externally allocated
56 	 * segments have to be checked.
57 	 */
58 	if (!elem->msl->external &&
59 			(rte_eal_iova_mode() == RTE_IOVA_VA ||
60 				(internal_conf->legacy_mem &&
61 					rte_eal_has_hugepages())))
62 		return RTE_PTR_DIFF(data_end, contig_seg_start);
63 
64 	cur_page = RTE_PTR_ALIGN_FLOOR(contig_seg_start, page_sz);
65 	ms = rte_mem_virt2memseg(cur_page, elem->msl);
66 
67 	/* do first iteration outside the loop */
68 	page_end = RTE_PTR_ADD(cur_page, page_sz);
69 	cur_seg_end = RTE_MIN(page_end, data_end);
70 	cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start) -
71 			MALLOC_ELEM_TRAILER_LEN;
72 	max = cur;
73 	expected_iova = ms->iova + page_sz;
74 	/* memsegs are contiguous in memory */
75 	ms++;
76 
77 	cur_page = RTE_PTR_ADD(cur_page, page_sz);
78 
79 	while (cur_page < data_end) {
80 		page_end = RTE_PTR_ADD(cur_page, page_sz);
81 		cur_seg_end = RTE_MIN(page_end, data_end);
82 
83 		/* reset start of contiguous segment if unexpected iova */
84 		if (ms->iova != expected_iova) {
85 			/* next contiguous segment must start at specified
86 			 * alignment.
87 			 */
88 			contig_seg_start = RTE_PTR_ALIGN(cur_page, align);
89 			/* new segment start may be on a different page, so find
90 			 * the page and skip to next iteration to make sure
91 			 * we're not blowing past data end.
92 			 */
93 			ms = rte_mem_virt2memseg(contig_seg_start, elem->msl);
94 			cur_page = ms->addr;
95 			/* don't trigger another recalculation */
96 			expected_iova = ms->iova;
97 			continue;
98 		}
99 		/* cur_seg_end ends on a page boundary or on data end. if we're
100 		 * looking at data end, then malloc trailer is already included
101 		 * in the calculations. if we're looking at page end, then we
102 		 * know there's more data past this page and thus there's space
103 		 * for malloc element trailer, so don't count it here.
104 		 */
105 		cur = RTE_PTR_DIFF(cur_seg_end, contig_seg_start);
106 		/* update max if cur value is bigger */
107 		if (cur > max)
108 			max = cur;
109 
110 		/* move to next page */
111 		cur_page = page_end;
112 		expected_iova = ms->iova + page_sz;
113 		/* memsegs are contiguous in memory */
114 		ms++;
115 	}
116 
117 	return max;
118 }
119 
120 /*
121  * Initialize a general malloc_elem header structure
122  */
123 void
malloc_elem_init(struct malloc_elem * elem,struct malloc_heap * heap,struct rte_memseg_list * msl,size_t size,struct malloc_elem * orig_elem,size_t orig_size,bool dirty)124 malloc_elem_init(struct malloc_elem *elem, struct malloc_heap *heap,
125 		struct rte_memseg_list *msl, size_t size,
126 		struct malloc_elem *orig_elem, size_t orig_size, bool dirty)
127 {
128 	elem->heap = heap;
129 	elem->msl = msl;
130 	elem->prev = NULL;
131 	elem->next = NULL;
132 	memset(&elem->free_list, 0, sizeof(elem->free_list));
133 	elem->state = ELEM_FREE;
134 	elem->dirty = dirty;
135 	elem->size = size;
136 	elem->pad = 0;
137 	elem->orig_elem = orig_elem;
138 	elem->orig_size = orig_size;
139 	set_header(elem);
140 	set_trailer(elem);
141 }
142 
143 void
malloc_elem_insert(struct malloc_elem * elem)144 malloc_elem_insert(struct malloc_elem *elem)
145 {
146 	struct malloc_elem *prev_elem, *next_elem;
147 	struct malloc_heap *heap = elem->heap;
148 
149 	/* first and last elements must be both NULL or both non-NULL */
150 	if ((heap->first == NULL) != (heap->last == NULL)) {
151 		EAL_LOG(ERR, "Heap is probably corrupt");
152 		return;
153 	}
154 
155 	if (heap->first == NULL && heap->last == NULL) {
156 		/* if empty heap */
157 		heap->first = elem;
158 		heap->last = elem;
159 		prev_elem = NULL;
160 		next_elem = NULL;
161 	} else if (elem < heap->first) {
162 		/* if lower than start */
163 		prev_elem = NULL;
164 		next_elem = heap->first;
165 		heap->first = elem;
166 	} else if (elem > heap->last) {
167 		/* if higher than end */
168 		prev_elem = heap->last;
169 		next_elem = NULL;
170 		heap->last = elem;
171 	} else {
172 		/* the new memory is somewhere between start and end */
173 		uint64_t dist_from_start, dist_from_end;
174 
175 		dist_from_end = RTE_PTR_DIFF(heap->last, elem);
176 		dist_from_start = RTE_PTR_DIFF(elem, heap->first);
177 
178 		/* check which is closer, and find closest list entries */
179 		if (dist_from_start < dist_from_end) {
180 			prev_elem = heap->first;
181 			while (prev_elem->next < elem)
182 				prev_elem = prev_elem->next;
183 			next_elem = prev_elem->next;
184 		} else {
185 			next_elem = heap->last;
186 			while (next_elem->prev > elem)
187 				next_elem = next_elem->prev;
188 			prev_elem = next_elem->prev;
189 		}
190 	}
191 
192 	/* insert new element */
193 	elem->prev = prev_elem;
194 	elem->next = next_elem;
195 	if (prev_elem)
196 		prev_elem->next = elem;
197 	if (next_elem)
198 		next_elem->prev = elem;
199 }
200 
201 /*
202  * Attempt to find enough physically contiguous memory in this block to store
203  * our data. Assume that element has at least enough space to fit in the data,
204  * so we just check the page addresses.
205  */
206 static bool
elem_check_phys_contig(const struct rte_memseg_list * msl,void * start,size_t size)207 elem_check_phys_contig(const struct rte_memseg_list *msl,
208 		void *start, size_t size)
209 {
210 	return eal_memalloc_is_contig(msl, start, size);
211 }
212 
213 /*
214  * calculate the starting point of where data of the requested size
215  * and alignment would fit in the current element. If the data doesn't
216  * fit, return NULL.
217  */
218 static void *
elem_start_pt(struct malloc_elem * elem,size_t size,unsigned align,size_t bound,bool contig)219 elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align,
220 		size_t bound, bool contig)
221 {
222 	size_t elem_size = elem->size;
223 
224 	/*
225 	 * we're allocating from the end, so adjust the size of element by
226 	 * alignment size.
227 	 */
228 	while (elem_size >= size) {
229 		const size_t bmask = ~(bound - 1);
230 		uintptr_t end_pt = (uintptr_t)elem +
231 				elem_size - MALLOC_ELEM_TRAILER_LEN;
232 		uintptr_t new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
233 				align);
234 		uintptr_t new_elem_start;
235 
236 		/* check boundary */
237 		if ((new_data_start & bmask) != ((end_pt - 1) & bmask)) {
238 			end_pt = RTE_ALIGN_FLOOR(end_pt, bound);
239 			new_data_start = RTE_ALIGN_FLOOR((end_pt - size),
240 					align);
241 			end_pt = new_data_start + size;
242 
243 			if (((end_pt - 1) & bmask) != (new_data_start & bmask))
244 				return NULL;
245 		}
246 
247 		new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
248 
249 		/* if the new start point is before the exist start,
250 		 * it won't fit
251 		 */
252 		if (new_elem_start < (uintptr_t)elem)
253 			return NULL;
254 
255 		if (contig) {
256 			size_t new_data_size = end_pt - new_data_start;
257 
258 			/*
259 			 * if physical contiguousness was requested and we
260 			 * couldn't fit all data into one physically contiguous
261 			 * block, try again with lower addresses.
262 			 */
263 			if (!elem_check_phys_contig(elem->msl,
264 					(void *)new_data_start,
265 					new_data_size)) {
266 				elem_size -= align;
267 				continue;
268 			}
269 		}
270 		return (void *)new_elem_start;
271 	}
272 	return NULL;
273 }
274 
275 /*
276  * use elem_start_pt to determine if we get meet the size and
277  * alignment request from the current element
278  */
279 int
malloc_elem_can_hold(struct malloc_elem * elem,size_t size,unsigned align,size_t bound,bool contig)280 malloc_elem_can_hold(struct malloc_elem *elem, size_t size,	unsigned align,
281 		size_t bound, bool contig)
282 {
283 	return elem_start_pt(elem, size, align, bound, contig) != NULL;
284 }
285 
286 /*
287  * split an existing element into two smaller elements at the given
288  * split_pt parameter.
289  */
290 static void
split_elem(struct malloc_elem * elem,struct malloc_elem * split_pt)291 split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
292 {
293 	struct malloc_elem *next_elem = elem->next;
294 	const size_t old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
295 	const size_t new_elem_size = elem->size - old_elem_size;
296 
297 	malloc_elem_init(split_pt, elem->heap, elem->msl, new_elem_size,
298 			elem->orig_elem, elem->orig_size, elem->dirty);
299 	split_pt->prev = elem;
300 	split_pt->next = next_elem;
301 	if (next_elem)
302 		next_elem->prev = split_pt;
303 	else
304 		elem->heap->last = split_pt;
305 	elem->next = split_pt;
306 	elem->size = old_elem_size;
307 	set_trailer(elem);
308 	if (elem->pad) {
309 		/* Update inner padding inner element size. */
310 		elem = RTE_PTR_ADD(elem, elem->pad);
311 		elem->size = old_elem_size - elem->pad;
312 	}
313 }
314 
315 /*
316  * our malloc heap is a doubly linked list, so doubly remove our element.
317  */
318 static void __rte_unused
remove_elem(struct malloc_elem * elem)319 remove_elem(struct malloc_elem *elem)
320 {
321 	struct malloc_elem *next, *prev;
322 	next = elem->next;
323 	prev = elem->prev;
324 
325 	if (next)
326 		next->prev = prev;
327 	else
328 		elem->heap->last = prev;
329 	if (prev)
330 		prev->next = next;
331 	else
332 		elem->heap->first = next;
333 
334 	elem->prev = NULL;
335 	elem->next = NULL;
336 }
337 
338 static int
next_elem_is_adjacent(struct malloc_elem * elem)339 next_elem_is_adjacent(struct malloc_elem *elem)
340 {
341 	const struct internal_config *internal_conf =
342 		eal_get_internal_configuration();
343 
344 	return elem->next == RTE_PTR_ADD(elem, elem->size) &&
345 			elem->next->msl == elem->msl &&
346 			(!internal_conf->match_allocations ||
347 			 elem->orig_elem == elem->next->orig_elem);
348 }
349 
350 static int
prev_elem_is_adjacent(struct malloc_elem * elem)351 prev_elem_is_adjacent(struct malloc_elem *elem)
352 {
353 	const struct internal_config *internal_conf =
354 		eal_get_internal_configuration();
355 
356 	return elem == RTE_PTR_ADD(elem->prev, elem->prev->size) &&
357 			elem->prev->msl == elem->msl &&
358 			(!internal_conf->match_allocations ||
359 			 elem->orig_elem == elem->prev->orig_elem);
360 }
361 
362 /*
363  * Given an element size, compute its freelist index.
364  * We free an element into the freelist containing similarly-sized elements.
365  * We try to allocate elements starting with the freelist containing
366  * similarly-sized elements, and if necessary, we search freelists
367  * containing larger elements.
368  *
369  * Example element size ranges for a heap with five free lists:
370  *   heap->free_head[0] - (0   , 2^8)
371  *   heap->free_head[1] - [2^8 , 2^10)
372  *   heap->free_head[2] - [2^10 ,2^12)
373  *   heap->free_head[3] - [2^12, 2^14)
374  *   heap->free_head[4] - [2^14, MAX_SIZE]
375  */
376 size_t
malloc_elem_free_list_index(size_t size)377 malloc_elem_free_list_index(size_t size)
378 {
379 #define MALLOC_MINSIZE_LOG2   8
380 #define MALLOC_LOG2_INCREMENT 2
381 
382 	size_t log2;
383 	size_t index;
384 
385 	if (size < (1UL << MALLOC_MINSIZE_LOG2))
386 		return 0;
387 
388 	/* Find next power of 2 > size. */
389 	log2 = sizeof(size) * 8 - rte_clz64(size);
390 
391 	/* Compute freelist index, based on log2(size). */
392 	index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
393 			MALLOC_LOG2_INCREMENT;
394 
395 	return index <= RTE_HEAP_NUM_FREELISTS - 1 ?
396 			index : RTE_HEAP_NUM_FREELISTS - 1;
397 }
398 
399 /*
400  * Add the specified element to its heap's free list.
401  */
402 void
malloc_elem_free_list_insert(struct malloc_elem * elem)403 malloc_elem_free_list_insert(struct malloc_elem *elem)
404 {
405 	size_t idx;
406 
407 	idx = malloc_elem_free_list_index(elem->size - MALLOC_ELEM_HEADER_LEN);
408 	elem->state = ELEM_FREE;
409 	LIST_INSERT_HEAD(&elem->heap->free_head[idx], elem, free_list);
410 }
411 
412 /*
413  * Remove the specified element from its heap's free list.
414  */
415 void
malloc_elem_free_list_remove(struct malloc_elem * elem)416 malloc_elem_free_list_remove(struct malloc_elem *elem)
417 {
418 	LIST_REMOVE(elem, free_list);
419 }
420 
421 /*
422  * reserve a block of data in an existing malloc_elem. If the malloc_elem
423  * is much larger than the data block requested, we split the element in two.
424  * This function is only called from malloc_heap_alloc so parameter checking
425  * is not done here, as it's done there previously.
426  */
427 struct malloc_elem *
malloc_elem_alloc(struct malloc_elem * elem,size_t size,unsigned align,size_t bound,bool contig)428 malloc_elem_alloc(struct malloc_elem *elem, size_t size, unsigned align,
429 		size_t bound, bool contig)
430 {
431 	struct malloc_elem *new_elem = elem_start_pt(elem, size, align, bound,
432 			contig);
433 	const size_t old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
434 	const size_t trailer_size = elem->size - old_elem_size - size -
435 		MALLOC_ELEM_OVERHEAD;
436 
437 	malloc_elem_free_list_remove(elem);
438 
439 	if (trailer_size > MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
440 		/* split it, too much free space after elem */
441 		struct malloc_elem *new_free_elem =
442 				RTE_PTR_ADD(new_elem, size + MALLOC_ELEM_OVERHEAD);
443 
444 		asan_clear_split_alloczone(new_free_elem);
445 
446 		split_elem(elem, new_free_elem);
447 		malloc_elem_free_list_insert(new_free_elem);
448 
449 		if (elem == elem->heap->last)
450 			elem->heap->last = new_free_elem;
451 	}
452 
453 	if (old_elem_size < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
454 		/* don't split it, pad the element instead */
455 		elem->state = ELEM_BUSY;
456 		elem->pad = old_elem_size;
457 
458 		asan_clear_alloczone(elem);
459 
460 		/* put a dummy header in padding, to point to real element header */
461 		if (elem->pad > 0) { /* pad will be at least 64-bytes, as everything
462 		                     * is cache-line aligned */
463 			new_elem->pad = elem->pad;
464 			new_elem->state = ELEM_PAD;
465 			new_elem->size = elem->size - elem->pad;
466 			set_header(new_elem);
467 		}
468 
469 		return new_elem;
470 	}
471 
472 	asan_clear_split_alloczone(new_elem);
473 
474 	/* we are going to split the element in two. The original element
475 	 * remains free, and the new element is the one allocated.
476 	 * Re-insert original element, in case its new size makes it
477 	 * belong on a different list.
478 	 */
479 
480 	split_elem(elem, new_elem);
481 
482 	asan_clear_alloczone(new_elem);
483 
484 	new_elem->state = ELEM_BUSY;
485 	malloc_elem_free_list_insert(elem);
486 
487 	return new_elem;
488 }
489 
490 /*
491  * join two struct malloc_elem together. elem1 and elem2 must
492  * be contiguous in memory.
493  */
494 static inline void
join_elem(struct malloc_elem * elem1,struct malloc_elem * elem2)495 join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
496 {
497 	struct malloc_elem *next = elem2->next;
498 	elem1->size += elem2->size;
499 	if (next)
500 		next->prev = elem1;
501 	else
502 		elem1->heap->last = elem1;
503 	elem1->next = next;
504 	elem1->dirty |= elem2->dirty;
505 	if (elem1->pad) {
506 		struct malloc_elem *inner = RTE_PTR_ADD(elem1, elem1->pad);
507 		inner->size = elem1->size - elem1->pad;
508 	}
509 }
510 
511 struct malloc_elem *
malloc_elem_join_adjacent_free(struct malloc_elem * elem)512 malloc_elem_join_adjacent_free(struct malloc_elem *elem)
513 {
514 	/*
515 	 * check if next element exists, is adjacent and is free, if so join
516 	 * with it, need to remove from free list.
517 	 */
518 	if (elem->next != NULL && elem->next->state == ELEM_FREE &&
519 			next_elem_is_adjacent(elem)) {
520 		void *erase;
521 		size_t erase_len;
522 
523 		/* we will want to erase the trailer and header */
524 		erase = RTE_PTR_SUB(elem->next, MALLOC_ELEM_TRAILER_LEN);
525 		erase_len = MALLOC_ELEM_OVERHEAD + elem->next->pad;
526 
527 		/* remove from free list, join to this one */
528 		malloc_elem_free_list_remove(elem->next);
529 		join_elem(elem, elem->next);
530 
531 		/* erase header, trailer and pad */
532 		memset(erase, MALLOC_POISON, erase_len);
533 	}
534 
535 	/*
536 	 * check if prev element exists, is adjacent and is free, if so join
537 	 * with it, need to remove from free list.
538 	 */
539 	if (elem->prev != NULL && elem->prev->state == ELEM_FREE &&
540 			prev_elem_is_adjacent(elem)) {
541 		struct malloc_elem *new_elem;
542 		void *erase;
543 		size_t erase_len;
544 
545 		/* we will want to erase trailer and header */
546 		erase = RTE_PTR_SUB(elem, MALLOC_ELEM_TRAILER_LEN);
547 		erase_len = MALLOC_ELEM_OVERHEAD + elem->pad;
548 
549 		/* remove from free list, join to this one */
550 		malloc_elem_free_list_remove(elem->prev);
551 
552 		new_elem = elem->prev;
553 		join_elem(new_elem, elem);
554 
555 		/* erase header, trailer and pad */
556 		memset(erase, MALLOC_POISON, erase_len);
557 
558 		elem = new_elem;
559 	}
560 
561 	return elem;
562 }
563 
564 /*
565  * free a malloc_elem block by adding it to the free list. If the
566  * blocks either immediately before or immediately after newly freed block
567  * are also free, the blocks are merged together.
568  */
569 struct malloc_elem *
malloc_elem_free(struct malloc_elem * elem)570 malloc_elem_free(struct malloc_elem *elem)
571 {
572 	void *ptr;
573 	size_t data_len;
574 
575 	ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN);
576 	data_len = elem->size - MALLOC_ELEM_OVERHEAD;
577 
578 	/*
579 	 * Consider the element clean for the purposes of joining.
580 	 * If both neighbors are clean or non-existent,
581 	 * the joint element will be clean,
582 	 * which means the memory should be cleared.
583 	 * There is no need to clear the memory if the joint element is dirty.
584 	 */
585 	elem->dirty = false;
586 	elem = malloc_elem_join_adjacent_free(elem);
587 
588 	malloc_elem_free_list_insert(elem);
589 
590 	elem->pad = 0;
591 
592 	/* decrease heap's count of allocated elements */
593 	elem->heap->alloc_count--;
594 
595 #ifndef RTE_MALLOC_DEBUG
596 	/* Normally clear the memory when needed. */
597 	if (!elem->dirty)
598 		memset(ptr, 0, data_len);
599 #else
600 	/* Always poison the memory in debug mode. */
601 	memset(ptr, MALLOC_POISON, data_len);
602 #endif
603 
604 	return elem;
605 }
606 
607 /* assume all checks were already done */
608 void
malloc_elem_hide_region(struct malloc_elem * elem,void * start,size_t len)609 malloc_elem_hide_region(struct malloc_elem *elem, void *start, size_t len)
610 {
611 	struct malloc_elem *hide_start, *hide_end, *prev, *next;
612 	size_t len_before, len_after;
613 
614 	hide_start = start;
615 	hide_end = RTE_PTR_ADD(start, len);
616 
617 	prev = elem->prev;
618 	next = elem->next;
619 
620 	/* we cannot do anything with non-adjacent elements */
621 	if (next && next_elem_is_adjacent(elem)) {
622 		len_after = RTE_PTR_DIFF(next, hide_end);
623 		if (len_after >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
624 			asan_clear_split_alloczone(hide_end);
625 
626 			/* split after */
627 			split_elem(elem, hide_end);
628 
629 			malloc_elem_free_list_insert(hide_end);
630 		} else if (len_after > 0) {
631 			EAL_LOG(ERR, "Unaligned element, heap is probably corrupt");
632 			return;
633 		}
634 	}
635 
636 	/* we cannot do anything with non-adjacent elements */
637 	if (prev && prev_elem_is_adjacent(elem)) {
638 		len_before = RTE_PTR_DIFF(hide_start, elem);
639 		if (len_before >= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
640 			asan_clear_split_alloczone(hide_start);
641 
642 			/* split before */
643 			split_elem(elem, hide_start);
644 
645 			prev = elem;
646 			elem = hide_start;
647 
648 			malloc_elem_free_list_insert(prev);
649 		} else if (len_before > 0) {
650 			EAL_LOG(ERR, "Unaligned element, heap is probably corrupt");
651 			return;
652 		}
653 	}
654 
655 	asan_clear_alloczone(elem);
656 
657 	remove_elem(elem);
658 }
659 
660 /*
661  * attempt to resize a malloc_elem by expanding into any free space
662  * immediately after it in memory.
663  */
664 int
malloc_elem_resize(struct malloc_elem * elem,size_t size)665 malloc_elem_resize(struct malloc_elem *elem, size_t size)
666 {
667 	const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
668 
669 	/* if we request a smaller size, then always return ok */
670 	if (elem->size >= new_size) {
671 		asan_clear_alloczone(elem);
672 		return 0;
673 	}
674 
675 	/* check if there is a next element, it's free and adjacent */
676 	if (!elem->next || elem->next->state != ELEM_FREE ||
677 			!next_elem_is_adjacent(elem))
678 		return -1;
679 	if (elem->size + elem->next->size < new_size)
680 		return -1;
681 
682 	/* we now know the element fits, so remove from free list,
683 	 * join the two
684 	 */
685 	malloc_elem_free_list_remove(elem->next);
686 	join_elem(elem, elem->next);
687 
688 	if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
689 		/* now we have a big block together. Lets cut it down a bit, by splitting */
690 		struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
691 		split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
692 
693 		asan_clear_split_alloczone(split_pt);
694 
695 		split_elem(elem, split_pt);
696 		malloc_elem_free_list_insert(split_pt);
697 	}
698 
699 	asan_clear_alloczone(elem);
700 
701 	return 0;
702 }
703 
704 static inline const char *
elem_state_to_str(enum elem_state state)705 elem_state_to_str(enum elem_state state)
706 {
707 	switch (state) {
708 	case ELEM_PAD:
709 		return "PAD";
710 	case ELEM_BUSY:
711 		return "BUSY";
712 	case ELEM_FREE:
713 		return "FREE";
714 	}
715 	return "ERROR";
716 }
717 
718 void
malloc_elem_dump(const struct malloc_elem * elem,FILE * f)719 malloc_elem_dump(const struct malloc_elem *elem, FILE *f)
720 {
721 	fprintf(f, "Malloc element at %p (%s)\n", elem,
722 			elem_state_to_str(elem->state));
723 	fprintf(f, "  len: 0x%zx pad: 0x%" PRIx32 "\n", elem->size, elem->pad);
724 	fprintf(f, "  prev: %p next: %p\n", elem->prev, elem->next);
725 }
726