xref: /dpdk/lib/eal/common/malloc_heap.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 #include <stdint.h>
5 #include <stddef.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <sys/queue.h>
10 
11 #include <rte_memory.h>
12 #include <rte_errno.h>
13 #include <rte_eal.h>
14 #include <rte_eal_memconfig.h>
15 #include <rte_lcore.h>
16 #include <rte_common.h>
17 #include <rte_string_fns.h>
18 #include <rte_spinlock.h>
19 #include <rte_memzone.h>
20 #include <rte_fbarray.h>
21 
22 #include "eal_internal_cfg.h"
23 #include "eal_memalloc.h"
24 #include "eal_memcfg.h"
25 #include "eal_private.h"
26 #include "malloc_elem.h"
27 #include "malloc_heap.h"
28 #include "malloc_mp.h"
29 
30 /* start external socket ID's at a very high number */
31 #define CONST_MAX(a, b) (a > b ? a : b) /* RTE_MAX is not a constant */
32 #define EXTERNAL_HEAP_MIN_SOCKET_ID (CONST_MAX((1 << 8), RTE_MAX_NUMA_NODES))
33 
34 static unsigned
35 check_hugepage_sz(unsigned flags, uint64_t hugepage_sz)
36 {
37 	unsigned check_flag = 0;
38 
39 	if (!(flags & ~RTE_MEMZONE_SIZE_HINT_ONLY))
40 		return 1;
41 
42 	switch (hugepage_sz) {
43 	case RTE_PGSIZE_256K:
44 		check_flag = RTE_MEMZONE_256KB;
45 		break;
46 	case RTE_PGSIZE_2M:
47 		check_flag = RTE_MEMZONE_2MB;
48 		break;
49 	case RTE_PGSIZE_16M:
50 		check_flag = RTE_MEMZONE_16MB;
51 		break;
52 	case RTE_PGSIZE_256M:
53 		check_flag = RTE_MEMZONE_256MB;
54 		break;
55 	case RTE_PGSIZE_512M:
56 		check_flag = RTE_MEMZONE_512MB;
57 		break;
58 	case RTE_PGSIZE_1G:
59 		check_flag = RTE_MEMZONE_1GB;
60 		break;
61 	case RTE_PGSIZE_4G:
62 		check_flag = RTE_MEMZONE_4GB;
63 		break;
64 	case RTE_PGSIZE_16G:
65 		check_flag = RTE_MEMZONE_16GB;
66 	}
67 
68 	return check_flag & flags;
69 }
70 
71 int
72 malloc_socket_to_heap_id(unsigned int socket_id)
73 {
74 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
75 	int i;
76 
77 	for (i = 0; i < RTE_MAX_HEAPS; i++) {
78 		struct malloc_heap *heap = &mcfg->malloc_heaps[i];
79 
80 		if (heap->socket_id == socket_id)
81 			return i;
82 	}
83 	return -1;
84 }
85 
86 /*
87  * Expand the heap with a memory area.
88  */
89 static struct malloc_elem *
90 malloc_heap_add_memory(struct malloc_heap *heap, struct rte_memseg_list *msl,
91 		void *start, size_t len, bool dirty)
92 {
93 	struct malloc_elem *elem = start;
94 
95 	malloc_elem_init(elem, heap, msl, len, elem, len, dirty);
96 
97 	malloc_elem_insert(elem);
98 
99 	elem = malloc_elem_join_adjacent_free(elem);
100 
101 	malloc_elem_free_list_insert(elem);
102 
103 	return elem;
104 }
105 
106 static int
107 malloc_add_seg(const struct rte_memseg_list *msl,
108 		const struct rte_memseg *ms, size_t len, void *arg __rte_unused)
109 {
110 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
111 	struct rte_memseg_list *found_msl;
112 	struct malloc_heap *heap;
113 	int msl_idx, heap_idx;
114 
115 	if (msl->external)
116 		return 0;
117 
118 	heap_idx = malloc_socket_to_heap_id(msl->socket_id);
119 	if (heap_idx < 0) {
120 		RTE_LOG(ERR, EAL, "Memseg list has invalid socket id\n");
121 		return -1;
122 	}
123 	heap = &mcfg->malloc_heaps[heap_idx];
124 
125 	/* msl is const, so find it */
126 	msl_idx = msl - mcfg->memsegs;
127 
128 	if (msl_idx < 0 || msl_idx >= RTE_MAX_MEMSEG_LISTS)
129 		return -1;
130 
131 	found_msl = &mcfg->memsegs[msl_idx];
132 
133 	malloc_heap_add_memory(heap, found_msl, ms->addr, len,
134 			ms->flags & RTE_MEMSEG_FLAG_DIRTY);
135 
136 	heap->total_size += len;
137 
138 	RTE_LOG(DEBUG, EAL, "Added %zuM to heap on socket %i\n", len >> 20,
139 			msl->socket_id);
140 	return 0;
141 }
142 
143 /*
144  * Iterates through the freelist for a heap to find a free element
145  * which can store data of the required size and with the requested alignment.
146  * If size is 0, find the biggest available elem.
147  * Returns null on failure, or pointer to element on success.
148  */
149 static struct malloc_elem *
150 find_suitable_element(struct malloc_heap *heap, size_t size,
151 		unsigned int flags, size_t align, size_t bound, bool contig)
152 {
153 	size_t idx;
154 	struct malloc_elem *elem, *alt_elem = NULL;
155 
156 	for (idx = malloc_elem_free_list_index(size);
157 			idx < RTE_HEAP_NUM_FREELISTS; idx++) {
158 		for (elem = LIST_FIRST(&heap->free_head[idx]);
159 				!!elem; elem = LIST_NEXT(elem, free_list)) {
160 			if (malloc_elem_can_hold(elem, size, align, bound,
161 					contig)) {
162 				if (check_hugepage_sz(flags,
163 						elem->msl->page_sz))
164 					return elem;
165 				if (alt_elem == NULL)
166 					alt_elem = elem;
167 			}
168 		}
169 	}
170 
171 	if ((alt_elem != NULL) && (flags & RTE_MEMZONE_SIZE_HINT_ONLY))
172 		return alt_elem;
173 
174 	return NULL;
175 }
176 
177 /*
178  * Iterates through the freelist for a heap to find a free element with the
179  * biggest size and requested alignment. Will also set size to whatever element
180  * size that was found.
181  * Returns null on failure, or pointer to element on success.
182  */
183 static struct malloc_elem *
184 find_biggest_element(struct malloc_heap *heap, size_t *size,
185 		unsigned int flags, size_t align, bool contig)
186 {
187 	struct malloc_elem *elem, *max_elem = NULL;
188 	size_t idx, max_size = 0;
189 
190 	for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
191 		for (elem = LIST_FIRST(&heap->free_head[idx]);
192 				!!elem; elem = LIST_NEXT(elem, free_list)) {
193 			size_t cur_size;
194 			if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) == 0 &&
195 					!check_hugepage_sz(flags,
196 						elem->msl->page_sz))
197 				continue;
198 			if (contig) {
199 				cur_size =
200 					malloc_elem_find_max_iova_contig(elem,
201 							align);
202 			} else {
203 				void *data_start = RTE_PTR_ADD(elem,
204 						MALLOC_ELEM_HEADER_LEN);
205 				void *data_end = RTE_PTR_ADD(elem, elem->size -
206 						MALLOC_ELEM_TRAILER_LEN);
207 				void *aligned = RTE_PTR_ALIGN_CEIL(data_start,
208 						align);
209 				/* check if aligned data start is beyond end */
210 				if (aligned >= data_end)
211 					continue;
212 				cur_size = RTE_PTR_DIFF(data_end, aligned);
213 			}
214 			if (cur_size > max_size) {
215 				max_size = cur_size;
216 				max_elem = elem;
217 			}
218 		}
219 	}
220 
221 	*size = max_size;
222 	return max_elem;
223 }
224 
225 /*
226  * Main function to allocate a block of memory from the heap.
227  * It locks the free list, scans it, and adds a new memseg if the
228  * scan fails. Once the new memseg is added, it re-scans and should return
229  * the new element after releasing the lock.
230  */
231 static void *
232 heap_alloc(struct malloc_heap *heap, const char *type __rte_unused, size_t size,
233 		unsigned int flags, size_t align, size_t bound, bool contig)
234 {
235 	struct malloc_elem *elem;
236 	size_t user_size = size;
237 
238 	size = RTE_CACHE_LINE_ROUNDUP(size);
239 	align = RTE_CACHE_LINE_ROUNDUP(align);
240 
241 	/* roundup might cause an overflow */
242 	if (size == 0)
243 		return NULL;
244 	elem = find_suitable_element(heap, size, flags, align, bound, contig);
245 	if (elem != NULL) {
246 		elem = malloc_elem_alloc(elem, size, align, bound, contig);
247 
248 		/* increase heap's count of allocated elements */
249 		heap->alloc_count++;
250 
251 		asan_set_redzone(elem, user_size);
252 	}
253 
254 	return elem == NULL ? NULL : (void *)(&elem[1]);
255 }
256 
257 static void *
258 heap_alloc_biggest(struct malloc_heap *heap, const char *type __rte_unused,
259 		unsigned int flags, size_t align, bool contig)
260 {
261 	struct malloc_elem *elem;
262 	size_t size;
263 
264 	align = RTE_CACHE_LINE_ROUNDUP(align);
265 
266 	elem = find_biggest_element(heap, &size, flags, align, contig);
267 	if (elem != NULL) {
268 		elem = malloc_elem_alloc(elem, size, align, 0, contig);
269 
270 		/* increase heap's count of allocated elements */
271 		heap->alloc_count++;
272 
273 		asan_set_redzone(elem, size);
274 	}
275 
276 	return elem == NULL ? NULL : (void *)(&elem[1]);
277 }
278 
279 /* this function is exposed in malloc_mp.h */
280 void
281 rollback_expand_heap(struct rte_memseg **ms, int n_segs,
282 		struct malloc_elem *elem, void *map_addr, size_t map_len)
283 {
284 	if (elem != NULL) {
285 		malloc_elem_free_list_remove(elem);
286 		malloc_elem_hide_region(elem, map_addr, map_len);
287 	}
288 
289 	eal_memalloc_free_seg_bulk(ms, n_segs);
290 }
291 
292 /* this function is exposed in malloc_mp.h */
293 struct malloc_elem *
294 alloc_pages_on_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
295 		int socket, unsigned int flags, size_t align, size_t bound,
296 		bool contig, struct rte_memseg **ms, int n_segs)
297 {
298 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
299 	struct rte_memseg_list *msl;
300 	struct malloc_elem *elem = NULL;
301 	size_t alloc_sz;
302 	int allocd_pages, i;
303 	bool dirty = false;
304 	void *ret, *map_addr;
305 
306 	alloc_sz = (size_t)pg_sz * n_segs;
307 
308 	/* first, check if we're allowed to allocate this memory */
309 	if (eal_memalloc_mem_alloc_validate(socket,
310 			heap->total_size + alloc_sz) < 0) {
311 		RTE_LOG(DEBUG, EAL, "User has disallowed allocation\n");
312 		return NULL;
313 	}
314 
315 	allocd_pages = eal_memalloc_alloc_seg_bulk(ms, n_segs, pg_sz,
316 			socket, true);
317 
318 	/* make sure we've allocated our pages... */
319 	if (allocd_pages < 0)
320 		return NULL;
321 
322 	map_addr = ms[0]->addr;
323 	msl = rte_mem_virt2memseg_list(map_addr);
324 
325 	/* check if we wanted contiguous memory but didn't get it */
326 	if (contig && !eal_memalloc_is_contig(msl, map_addr, alloc_sz)) {
327 		RTE_LOG(DEBUG, EAL, "%s(): couldn't allocate physically contiguous space\n",
328 				__func__);
329 		goto fail;
330 	}
331 
332 	/*
333 	 * Once we have all the memseg lists configured, if there is a dma mask
334 	 * set, check iova addresses are not out of range. Otherwise the device
335 	 * setting the dma mask could have problems with the mapped memory.
336 	 *
337 	 * There are two situations when this can happen:
338 	 *	1) memory initialization
339 	 *	2) dynamic memory allocation
340 	 *
341 	 * For 1), an error when checking dma mask implies app can not be
342 	 * executed. For 2) implies the new memory can not be added.
343 	 */
344 	if (mcfg->dma_maskbits &&
345 	    rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
346 		/*
347 		 * Currently this can only happen if IOMMU is enabled
348 		 * and the address width supported by the IOMMU hw is
349 		 * not enough for using the memory mapped IOVAs.
350 		 *
351 		 * If IOVA is VA, advice to try with '--iova-mode pa'
352 		 * which could solve some situations when IOVA VA is not
353 		 * really needed.
354 		 */
355 		RTE_LOG(ERR, EAL,
356 			"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask\n",
357 			__func__);
358 
359 		/*
360 		 * If IOVA is VA and it is possible to run with IOVA PA,
361 		 * because user is root, give and advice for solving the
362 		 * problem.
363 		 */
364 		if ((rte_eal_iova_mode() == RTE_IOVA_VA) &&
365 		     rte_eal_using_phys_addrs())
366 			RTE_LOG(ERR, EAL,
367 				"%s(): Please try initializing EAL with --iova-mode=pa parameter\n",
368 				__func__);
369 		goto fail;
370 	}
371 
372 	/* Element is dirty if it contains at least one dirty page. */
373 	for (i = 0; i < allocd_pages; i++)
374 		dirty |= ms[i]->flags & RTE_MEMSEG_FLAG_DIRTY;
375 
376 	/* add newly minted memsegs to malloc heap */
377 	elem = malloc_heap_add_memory(heap, msl, map_addr, alloc_sz, dirty);
378 
379 	/* try once more, as now we have allocated new memory */
380 	ret = find_suitable_element(heap, elt_size, flags, align, bound,
381 			contig);
382 
383 	if (ret == NULL)
384 		goto fail;
385 
386 	return elem;
387 
388 fail:
389 	rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
390 	return NULL;
391 }
392 
393 static int
394 try_expand_heap_primary(struct malloc_heap *heap, uint64_t pg_sz,
395 		size_t elt_size, int socket, unsigned int flags, size_t align,
396 		size_t bound, bool contig)
397 {
398 	struct malloc_elem *elem;
399 	struct rte_memseg **ms;
400 	void *map_addr;
401 	size_t alloc_sz;
402 	int n_segs;
403 	bool callback_triggered = false;
404 
405 	alloc_sz = RTE_ALIGN_CEIL(align + elt_size +
406 			MALLOC_ELEM_TRAILER_LEN, pg_sz);
407 	n_segs = alloc_sz / pg_sz;
408 
409 	/* we can't know in advance how many pages we'll need, so we malloc */
410 	ms = malloc(sizeof(*ms) * n_segs);
411 	if (ms == NULL)
412 		return -1;
413 	memset(ms, 0, sizeof(*ms) * n_segs);
414 
415 	elem = alloc_pages_on_heap(heap, pg_sz, elt_size, socket, flags, align,
416 			bound, contig, ms, n_segs);
417 
418 	if (elem == NULL)
419 		goto free_ms;
420 
421 	map_addr = ms[0]->addr;
422 
423 	/* notify user about changes in memory map */
424 	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC, map_addr, alloc_sz);
425 
426 	/* notify other processes that this has happened */
427 	if (request_sync()) {
428 		/* we couldn't ensure all processes have mapped memory,
429 		 * so free it back and notify everyone that it's been
430 		 * freed back.
431 		 *
432 		 * technically, we could've avoided adding memory addresses to
433 		 * the map, but that would've led to inconsistent behavior
434 		 * between primary and secondary processes, as those get
435 		 * callbacks during sync. therefore, force primary process to
436 		 * do alloc-and-rollback syncs as well.
437 		 */
438 		callback_triggered = true;
439 		goto free_elem;
440 	}
441 	heap->total_size += alloc_sz;
442 
443 	RTE_LOG(DEBUG, EAL, "Heap on socket %d was expanded by %zdMB\n",
444 		socket, alloc_sz >> 20ULL);
445 
446 	free(ms);
447 
448 	return 0;
449 
450 free_elem:
451 	if (callback_triggered)
452 		eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
453 				map_addr, alloc_sz);
454 
455 	rollback_expand_heap(ms, n_segs, elem, map_addr, alloc_sz);
456 
457 	request_sync();
458 free_ms:
459 	free(ms);
460 
461 	return -1;
462 }
463 
464 static int
465 try_expand_heap_secondary(struct malloc_heap *heap, uint64_t pg_sz,
466 		size_t elt_size, int socket, unsigned int flags, size_t align,
467 		size_t bound, bool contig)
468 {
469 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
470 	struct malloc_mp_req req;
471 	int req_result;
472 
473 	memset(&req, 0, sizeof(req));
474 
475 	req.t = REQ_TYPE_ALLOC;
476 	req.alloc_req.align = align;
477 	req.alloc_req.bound = bound;
478 	req.alloc_req.contig = contig;
479 	req.alloc_req.flags = flags;
480 	req.alloc_req.elt_size = elt_size;
481 	req.alloc_req.page_sz = pg_sz;
482 	req.alloc_req.socket = socket;
483 	req.alloc_req.malloc_heap_idx = heap - mcfg->malloc_heaps;
484 
485 	req_result = request_to_primary(&req);
486 
487 	if (req_result != 0)
488 		return -1;
489 
490 	if (req.result != REQ_RESULT_SUCCESS)
491 		return -1;
492 
493 	return 0;
494 }
495 
496 static int
497 try_expand_heap(struct malloc_heap *heap, uint64_t pg_sz, size_t elt_size,
498 		int socket, unsigned int flags, size_t align, size_t bound,
499 		bool contig)
500 {
501 	int ret;
502 
503 	rte_mcfg_mem_write_lock();
504 
505 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
506 		ret = try_expand_heap_primary(heap, pg_sz, elt_size, socket,
507 				flags, align, bound, contig);
508 	} else {
509 		ret = try_expand_heap_secondary(heap, pg_sz, elt_size, socket,
510 				flags, align, bound, contig);
511 	}
512 
513 	rte_mcfg_mem_write_unlock();
514 	return ret;
515 }
516 
517 static int
518 compare_pagesz(const void *a, const void *b)
519 {
520 	const struct rte_memseg_list * const*mpa = a;
521 	const struct rte_memseg_list * const*mpb = b;
522 	const struct rte_memseg_list *msla = *mpa;
523 	const struct rte_memseg_list *mslb = *mpb;
524 	uint64_t pg_sz_a = msla->page_sz;
525 	uint64_t pg_sz_b = mslb->page_sz;
526 
527 	if (pg_sz_a < pg_sz_b)
528 		return -1;
529 	if (pg_sz_a > pg_sz_b)
530 		return 1;
531 	return 0;
532 }
533 
534 static int
535 alloc_more_mem_on_socket(struct malloc_heap *heap, size_t size, int socket,
536 		unsigned int flags, size_t align, size_t bound, bool contig)
537 {
538 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
539 	struct rte_memseg_list *requested_msls[RTE_MAX_MEMSEG_LISTS];
540 	struct rte_memseg_list *other_msls[RTE_MAX_MEMSEG_LISTS];
541 	uint64_t requested_pg_sz[RTE_MAX_MEMSEG_LISTS];
542 	uint64_t other_pg_sz[RTE_MAX_MEMSEG_LISTS];
543 	uint64_t prev_pg_sz;
544 	int i, n_other_msls, n_other_pg_sz, n_requested_msls, n_requested_pg_sz;
545 	bool size_hint = (flags & RTE_MEMZONE_SIZE_HINT_ONLY) > 0;
546 	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
547 	void *ret;
548 
549 	memset(requested_msls, 0, sizeof(requested_msls));
550 	memset(other_msls, 0, sizeof(other_msls));
551 	memset(requested_pg_sz, 0, sizeof(requested_pg_sz));
552 	memset(other_pg_sz, 0, sizeof(other_pg_sz));
553 
554 	/*
555 	 * go through memseg list and take note of all the page sizes available,
556 	 * and if any of them were specifically requested by the user.
557 	 */
558 	n_requested_msls = 0;
559 	n_other_msls = 0;
560 	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
561 		struct rte_memseg_list *msl = &mcfg->memsegs[i];
562 
563 		if (msl->socket_id != socket)
564 			continue;
565 
566 		if (msl->base_va == NULL)
567 			continue;
568 
569 		/* if pages of specific size were requested */
570 		if (size_flags != 0 && check_hugepage_sz(size_flags,
571 				msl->page_sz))
572 			requested_msls[n_requested_msls++] = msl;
573 		else if (size_flags == 0 || size_hint)
574 			other_msls[n_other_msls++] = msl;
575 	}
576 
577 	/* sort the lists, smallest first */
578 	qsort(requested_msls, n_requested_msls, sizeof(requested_msls[0]),
579 			compare_pagesz);
580 	qsort(other_msls, n_other_msls, sizeof(other_msls[0]),
581 			compare_pagesz);
582 
583 	/* now, extract page sizes we are supposed to try */
584 	prev_pg_sz = 0;
585 	n_requested_pg_sz = 0;
586 	for (i = 0; i < n_requested_msls; i++) {
587 		uint64_t pg_sz = requested_msls[i]->page_sz;
588 
589 		if (prev_pg_sz != pg_sz) {
590 			requested_pg_sz[n_requested_pg_sz++] = pg_sz;
591 			prev_pg_sz = pg_sz;
592 		}
593 	}
594 	prev_pg_sz = 0;
595 	n_other_pg_sz = 0;
596 	for (i = 0; i < n_other_msls; i++) {
597 		uint64_t pg_sz = other_msls[i]->page_sz;
598 
599 		if (prev_pg_sz != pg_sz) {
600 			other_pg_sz[n_other_pg_sz++] = pg_sz;
601 			prev_pg_sz = pg_sz;
602 		}
603 	}
604 
605 	/* finally, try allocating memory of specified page sizes, starting from
606 	 * the smallest sizes
607 	 */
608 	for (i = 0; i < n_requested_pg_sz; i++) {
609 		uint64_t pg_sz = requested_pg_sz[i];
610 
611 		/*
612 		 * do not pass the size hint here, as user expects other page
613 		 * sizes first, before resorting to best effort allocation.
614 		 */
615 		if (!try_expand_heap(heap, pg_sz, size, socket, size_flags,
616 				align, bound, contig))
617 			return 0;
618 	}
619 	if (n_other_pg_sz == 0)
620 		return -1;
621 
622 	/* now, check if we can reserve anything with size hint */
623 	ret = find_suitable_element(heap, size, flags, align, bound, contig);
624 	if (ret != NULL)
625 		return 0;
626 
627 	/*
628 	 * we still couldn't reserve memory, so try expanding heap with other
629 	 * page sizes, if there are any
630 	 */
631 	for (i = 0; i < n_other_pg_sz; i++) {
632 		uint64_t pg_sz = other_pg_sz[i];
633 
634 		if (!try_expand_heap(heap, pg_sz, size, socket, flags,
635 				align, bound, contig))
636 			return 0;
637 	}
638 	return -1;
639 }
640 
641 /* this will try lower page sizes first */
642 static void *
643 malloc_heap_alloc_on_heap_id(const char *type, size_t size,
644 		unsigned int heap_id, unsigned int flags, size_t align,
645 		size_t bound, bool contig)
646 {
647 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
648 	struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
649 	unsigned int size_flags = flags & ~RTE_MEMZONE_SIZE_HINT_ONLY;
650 	int socket_id;
651 	void *ret;
652 	const struct internal_config *internal_conf =
653 		eal_get_internal_configuration();
654 
655 	rte_spinlock_lock(&(heap->lock));
656 
657 	align = align == 0 ? 1 : align;
658 
659 	/* for legacy mode, try once and with all flags */
660 	if (internal_conf->legacy_mem) {
661 		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
662 		goto alloc_unlock;
663 	}
664 
665 	/*
666 	 * we do not pass the size hint here, because even if allocation fails,
667 	 * we may still be able to allocate memory from appropriate page sizes,
668 	 * we just need to request more memory first.
669 	 */
670 
671 	socket_id = rte_socket_id_by_idx(heap_id);
672 	/*
673 	 * if socket ID is negative, we cannot find a socket ID for this heap -
674 	 * which means it's an external heap. those can have unexpected page
675 	 * sizes, so if the user asked to allocate from there - assume user
676 	 * knows what they're doing, and allow allocating from there with any
677 	 * page size flags.
678 	 */
679 	if (socket_id < 0)
680 		size_flags |= RTE_MEMZONE_SIZE_HINT_ONLY;
681 
682 	ret = heap_alloc(heap, type, size, size_flags, align, bound, contig);
683 	if (ret != NULL)
684 		goto alloc_unlock;
685 
686 	/* if socket ID is invalid, this is an external heap */
687 	if (socket_id < 0)
688 		goto alloc_unlock;
689 
690 	if (!alloc_more_mem_on_socket(heap, size, socket_id, flags, align,
691 			bound, contig)) {
692 		ret = heap_alloc(heap, type, size, flags, align, bound, contig);
693 
694 		/* this should have succeeded */
695 		if (ret == NULL)
696 			RTE_LOG(ERR, EAL, "Error allocating from heap\n");
697 	}
698 alloc_unlock:
699 	rte_spinlock_unlock(&(heap->lock));
700 	return ret;
701 }
702 
703 static unsigned int
704 malloc_get_numa_socket(void)
705 {
706 	const struct internal_config *conf = eal_get_internal_configuration();
707 	unsigned int socket_id = rte_socket_id();
708 	unsigned int idx;
709 
710 	if (socket_id != (unsigned int)SOCKET_ID_ANY)
711 		return socket_id;
712 
713 	/* for control threads, return first socket where memory is available */
714 	for (idx = 0; idx < rte_socket_count(); idx++) {
715 		socket_id = rte_socket_id_by_idx(idx);
716 		if (conf->socket_mem[socket_id] != 0)
717 			return socket_id;
718 	}
719 
720 	return rte_socket_id_by_idx(0);
721 }
722 
723 void *
724 malloc_heap_alloc(const char *type, size_t size, int socket_arg,
725 		unsigned int flags, size_t align, size_t bound, bool contig)
726 {
727 	int socket, heap_id, i;
728 	void *ret;
729 
730 	/* return NULL if size is 0 or alignment is not power-of-2 */
731 	if (size == 0 || (align && !rte_is_power_of_2(align)))
732 		return NULL;
733 
734 	if (!rte_eal_has_hugepages() && socket_arg < RTE_MAX_NUMA_NODES)
735 		socket_arg = SOCKET_ID_ANY;
736 
737 	if (socket_arg == SOCKET_ID_ANY)
738 		socket = malloc_get_numa_socket();
739 	else
740 		socket = socket_arg;
741 
742 	/* turn socket ID into heap ID */
743 	heap_id = malloc_socket_to_heap_id(socket);
744 	/* if heap id is negative, socket ID was invalid */
745 	if (heap_id < 0)
746 		return NULL;
747 
748 	ret = malloc_heap_alloc_on_heap_id(type, size, heap_id, flags, align,
749 			bound, contig);
750 	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
751 		return ret;
752 
753 	/* try other heaps. we are only iterating through native DPDK sockets,
754 	 * so external heaps won't be included.
755 	 */
756 	for (i = 0; i < (int) rte_socket_count(); i++) {
757 		if (i == heap_id)
758 			continue;
759 		ret = malloc_heap_alloc_on_heap_id(type, size, i, flags, align,
760 				bound, contig);
761 		if (ret != NULL)
762 			return ret;
763 	}
764 	return NULL;
765 }
766 
767 static void *
768 heap_alloc_biggest_on_heap_id(const char *type, unsigned int heap_id,
769 		unsigned int flags, size_t align, bool contig)
770 {
771 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
772 	struct malloc_heap *heap = &mcfg->malloc_heaps[heap_id];
773 	void *ret;
774 
775 	rte_spinlock_lock(&(heap->lock));
776 
777 	align = align == 0 ? 1 : align;
778 
779 	ret = heap_alloc_biggest(heap, type, flags, align, contig);
780 
781 	rte_spinlock_unlock(&(heap->lock));
782 
783 	return ret;
784 }
785 
786 void *
787 malloc_heap_alloc_biggest(const char *type, int socket_arg, unsigned int flags,
788 		size_t align, bool contig)
789 {
790 	int socket, i, cur_socket, heap_id;
791 	void *ret;
792 
793 	/* return NULL if align is not power-of-2 */
794 	if ((align && !rte_is_power_of_2(align)))
795 		return NULL;
796 
797 	if (!rte_eal_has_hugepages())
798 		socket_arg = SOCKET_ID_ANY;
799 
800 	if (socket_arg == SOCKET_ID_ANY)
801 		socket = malloc_get_numa_socket();
802 	else
803 		socket = socket_arg;
804 
805 	/* turn socket ID into heap ID */
806 	heap_id = malloc_socket_to_heap_id(socket);
807 	/* if heap id is negative, socket ID was invalid */
808 	if (heap_id < 0)
809 		return NULL;
810 
811 	ret = heap_alloc_biggest_on_heap_id(type, heap_id, flags, align,
812 			contig);
813 	if (ret != NULL || socket_arg != SOCKET_ID_ANY)
814 		return ret;
815 
816 	/* try other heaps */
817 	for (i = 0; i < (int) rte_socket_count(); i++) {
818 		cur_socket = rte_socket_id_by_idx(i);
819 		if (cur_socket == socket)
820 			continue;
821 		ret = heap_alloc_biggest_on_heap_id(type, i, flags, align,
822 				contig);
823 		if (ret != NULL)
824 			return ret;
825 	}
826 	return NULL;
827 }
828 
829 /* this function is exposed in malloc_mp.h */
830 int
831 malloc_heap_free_pages(void *aligned_start, size_t aligned_len)
832 {
833 	int n_segs, seg_idx, max_seg_idx;
834 	struct rte_memseg_list *msl;
835 	size_t page_sz;
836 
837 	msl = rte_mem_virt2memseg_list(aligned_start);
838 	if (msl == NULL)
839 		return -1;
840 
841 	page_sz = (size_t)msl->page_sz;
842 	n_segs = aligned_len / page_sz;
843 	seg_idx = RTE_PTR_DIFF(aligned_start, msl->base_va) / page_sz;
844 	max_seg_idx = seg_idx + n_segs;
845 
846 	for (; seg_idx < max_seg_idx; seg_idx++) {
847 		struct rte_memseg *ms;
848 
849 		ms = rte_fbarray_get(&msl->memseg_arr, seg_idx);
850 		eal_memalloc_free_seg(ms);
851 	}
852 	return 0;
853 }
854 
855 int
856 malloc_heap_free(struct malloc_elem *elem)
857 {
858 	struct malloc_heap *heap;
859 	void *start, *aligned_start, *end, *aligned_end;
860 	size_t len, aligned_len, page_sz;
861 	struct rte_memseg_list *msl;
862 	unsigned int i, n_segs, before_space, after_space;
863 	int ret;
864 	const struct internal_config *internal_conf =
865 		eal_get_internal_configuration();
866 
867 	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
868 		return -1;
869 
870 	asan_clear_redzone(elem);
871 
872 	/* elem may be merged with previous element, so keep heap address */
873 	heap = elem->heap;
874 	msl = elem->msl;
875 	page_sz = (size_t)msl->page_sz;
876 
877 	rte_spinlock_lock(&(heap->lock));
878 
879 	void *asan_ptr = RTE_PTR_ADD(elem, MALLOC_ELEM_HEADER_LEN + elem->pad);
880 	size_t asan_data_len = elem->size - MALLOC_ELEM_OVERHEAD - elem->pad;
881 
882 	/* mark element as free */
883 	elem->state = ELEM_FREE;
884 
885 	elem = malloc_elem_free(elem);
886 
887 	/* anything after this is a bonus */
888 	ret = 0;
889 
890 	/* ...of which we can't avail if we are in legacy mode, or if this is an
891 	 * externally allocated segment.
892 	 */
893 	if (internal_conf->legacy_mem || (msl->external > 0))
894 		goto free_unlock;
895 
896 	/* check if we can free any memory back to the system */
897 	if (elem->size < page_sz)
898 		goto free_unlock;
899 
900 	/* if user requested to match allocations, the sizes must match - if not,
901 	 * we will defer freeing these hugepages until the entire original allocation
902 	 * can be freed
903 	 */
904 	if (internal_conf->match_allocations && elem->size != elem->orig_size)
905 		goto free_unlock;
906 
907 	/* probably, but let's make sure, as we may not be using up full page */
908 	start = elem;
909 	len = elem->size;
910 	aligned_start = RTE_PTR_ALIGN_CEIL(start, page_sz);
911 	end = RTE_PTR_ADD(elem, len);
912 	aligned_end = RTE_PTR_ALIGN_FLOOR(end, page_sz);
913 
914 	aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
915 
916 	/* can't free anything */
917 	if (aligned_len < page_sz)
918 		goto free_unlock;
919 
920 	/* we can free something. however, some of these pages may be marked as
921 	 * unfreeable, so also check that as well
922 	 */
923 	n_segs = aligned_len / page_sz;
924 	for (i = 0; i < n_segs; i++) {
925 		const struct rte_memseg *tmp =
926 				rte_mem_virt2memseg(aligned_start, msl);
927 
928 		if (tmp->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
929 			/* this is an unfreeable segment, so move start */
930 			aligned_start = RTE_PTR_ADD(tmp->addr, tmp->len);
931 		}
932 	}
933 
934 	/* recalculate length and number of segments */
935 	aligned_len = RTE_PTR_DIFF(aligned_end, aligned_start);
936 	n_segs = aligned_len / page_sz;
937 
938 	/* check if we can still free some pages */
939 	if (n_segs == 0)
940 		goto free_unlock;
941 
942 	/* We're not done yet. We also have to check if by freeing space we will
943 	 * be leaving free elements that are too small to store new elements.
944 	 * Check if we have enough space in the beginning and at the end, or if
945 	 * start/end are exactly page aligned.
946 	 */
947 	before_space = RTE_PTR_DIFF(aligned_start, elem);
948 	after_space = RTE_PTR_DIFF(end, aligned_end);
949 	if (before_space != 0 &&
950 			before_space < MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
951 		/* There is not enough space before start, but we may be able to
952 		 * move the start forward by one page.
953 		 */
954 		if (n_segs == 1)
955 			goto free_unlock;
956 
957 		/* move start */
958 		aligned_start = RTE_PTR_ADD(aligned_start, page_sz);
959 		aligned_len -= page_sz;
960 		n_segs--;
961 	}
962 	if (after_space != 0 && after_space <
963 			MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE) {
964 		/* There is not enough space after end, but we may be able to
965 		 * move the end backwards by one page.
966 		 */
967 		if (n_segs == 1)
968 			goto free_unlock;
969 
970 		/* move end */
971 		aligned_end = RTE_PTR_SUB(aligned_end, page_sz);
972 		aligned_len -= page_sz;
973 		n_segs--;
974 	}
975 
976 	/* now we can finally free us some pages */
977 
978 	rte_mcfg_mem_write_lock();
979 
980 	/*
981 	 * we allow secondary processes to clear the heap of this allocated
982 	 * memory because it is safe to do so, as even if notifications about
983 	 * unmapped pages don't make it to other processes, heap is shared
984 	 * across all processes, and will become empty of this memory anyway,
985 	 * and nothing can allocate it back unless primary process will be able
986 	 * to deliver allocation message to every single running process.
987 	 */
988 
989 	malloc_elem_free_list_remove(elem);
990 
991 	malloc_elem_hide_region(elem, (void *) aligned_start, aligned_len);
992 
993 	heap->total_size -= aligned_len;
994 
995 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
996 		/* notify user about changes in memory map */
997 		eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE,
998 				aligned_start, aligned_len);
999 
1000 		/* don't care if any of this fails */
1001 		malloc_heap_free_pages(aligned_start, aligned_len);
1002 
1003 		request_sync();
1004 	} else {
1005 		struct malloc_mp_req req;
1006 
1007 		memset(&req, 0, sizeof(req));
1008 
1009 		req.t = REQ_TYPE_FREE;
1010 		req.free_req.addr = aligned_start;
1011 		req.free_req.len = aligned_len;
1012 
1013 		/*
1014 		 * we request primary to deallocate pages, but we don't do it
1015 		 * in this thread. instead, we notify primary that we would like
1016 		 * to deallocate pages, and this process will receive another
1017 		 * request (in parallel) that will do it for us on another
1018 		 * thread.
1019 		 *
1020 		 * we also don't really care if this succeeds - the data is
1021 		 * already removed from the heap, so it is, for all intents and
1022 		 * purposes, hidden from the rest of DPDK even if some other
1023 		 * process (including this one) may have these pages mapped.
1024 		 *
1025 		 * notifications about deallocated memory happen during sync.
1026 		 */
1027 		request_to_primary(&req);
1028 	}
1029 
1030 	RTE_LOG(DEBUG, EAL, "Heap on socket %d was shrunk by %zdMB\n",
1031 		msl->socket_id, aligned_len >> 20ULL);
1032 
1033 	rte_mcfg_mem_write_unlock();
1034 free_unlock:
1035 	asan_set_freezone(asan_ptr, asan_data_len);
1036 
1037 	rte_spinlock_unlock(&(heap->lock));
1038 	return ret;
1039 }
1040 
1041 int
1042 malloc_heap_resize(struct malloc_elem *elem, size_t size)
1043 {
1044 	int ret;
1045 
1046 	if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
1047 		return -1;
1048 
1049 	rte_spinlock_lock(&(elem->heap->lock));
1050 
1051 	ret = malloc_elem_resize(elem, size);
1052 
1053 	rte_spinlock_unlock(&(elem->heap->lock));
1054 
1055 	return ret;
1056 }
1057 
1058 /*
1059  * Function to retrieve data for a given heap
1060  */
1061 int
1062 malloc_heap_get_stats(struct malloc_heap *heap,
1063 		struct rte_malloc_socket_stats *socket_stats)
1064 {
1065 	size_t idx;
1066 	struct malloc_elem *elem;
1067 
1068 	rte_spinlock_lock(&heap->lock);
1069 
1070 	/* Initialise variables for heap */
1071 	socket_stats->free_count = 0;
1072 	socket_stats->heap_freesz_bytes = 0;
1073 	socket_stats->greatest_free_size = 0;
1074 
1075 	/* Iterate through free list */
1076 	for (idx = 0; idx < RTE_HEAP_NUM_FREELISTS; idx++) {
1077 		for (elem = LIST_FIRST(&heap->free_head[idx]);
1078 			!!elem; elem = LIST_NEXT(elem, free_list))
1079 		{
1080 			socket_stats->free_count++;
1081 			socket_stats->heap_freesz_bytes += elem->size;
1082 			if (elem->size > socket_stats->greatest_free_size)
1083 				socket_stats->greatest_free_size = elem->size;
1084 		}
1085 	}
1086 	/* Get stats on overall heap and allocated memory on this heap */
1087 	socket_stats->heap_totalsz_bytes = heap->total_size;
1088 	socket_stats->heap_allocsz_bytes = (socket_stats->heap_totalsz_bytes -
1089 			socket_stats->heap_freesz_bytes);
1090 	socket_stats->alloc_count = heap->alloc_count;
1091 
1092 	rte_spinlock_unlock(&heap->lock);
1093 	return 0;
1094 }
1095 
1096 /*
1097  * Function to retrieve data for a given heap
1098  */
1099 void
1100 malloc_heap_dump(struct malloc_heap *heap, FILE *f)
1101 {
1102 	struct malloc_elem *elem;
1103 
1104 	rte_spinlock_lock(&heap->lock);
1105 
1106 	fprintf(f, "Heap size: 0x%zx\n", heap->total_size);
1107 	fprintf(f, "Heap alloc count: %u\n", heap->alloc_count);
1108 
1109 	elem = heap->first;
1110 	while (elem) {
1111 		malloc_elem_dump(elem, f);
1112 		elem = elem->next;
1113 	}
1114 
1115 	rte_spinlock_unlock(&heap->lock);
1116 }
1117 
1118 static int
1119 destroy_elem(struct malloc_elem *elem, size_t len)
1120 {
1121 	struct malloc_heap *heap = elem->heap;
1122 
1123 	/* notify all subscribers that a memory area is going to be removed */
1124 	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_FREE, elem, len);
1125 
1126 	/* this element can be removed */
1127 	malloc_elem_free_list_remove(elem);
1128 	malloc_elem_hide_region(elem, elem, len);
1129 
1130 	heap->total_size -= len;
1131 
1132 	memset(elem, 0, sizeof(*elem));
1133 
1134 	return 0;
1135 }
1136 
1137 struct rte_memseg_list *
1138 malloc_heap_create_external_seg(void *va_addr, rte_iova_t iova_addrs[],
1139 		unsigned int n_pages, size_t page_sz, const char *seg_name,
1140 		unsigned int socket_id)
1141 {
1142 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1143 	char fbarray_name[RTE_FBARRAY_NAME_LEN];
1144 	struct rte_memseg_list *msl = NULL;
1145 	struct rte_fbarray *arr;
1146 	size_t seg_len = n_pages * page_sz;
1147 	unsigned int i;
1148 
1149 	/* first, find a free memseg list */
1150 	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1151 		struct rte_memseg_list *tmp = &mcfg->memsegs[i];
1152 		if (tmp->base_va == NULL) {
1153 			msl = tmp;
1154 			break;
1155 		}
1156 	}
1157 	if (msl == NULL) {
1158 		RTE_LOG(ERR, EAL, "Couldn't find empty memseg list\n");
1159 		rte_errno = ENOSPC;
1160 		return NULL;
1161 	}
1162 
1163 	snprintf(fbarray_name, sizeof(fbarray_name), "%s_%p",
1164 			seg_name, va_addr);
1165 
1166 	/* create the backing fbarray */
1167 	if (rte_fbarray_init(&msl->memseg_arr, fbarray_name, n_pages,
1168 			sizeof(struct rte_memseg)) < 0) {
1169 		RTE_LOG(ERR, EAL, "Couldn't create fbarray backing the memseg list\n");
1170 		return NULL;
1171 	}
1172 	arr = &msl->memseg_arr;
1173 
1174 	/* fbarray created, fill it up */
1175 	for (i = 0; i < n_pages; i++) {
1176 		struct rte_memseg *ms;
1177 
1178 		rte_fbarray_set_used(arr, i);
1179 		ms = rte_fbarray_get(arr, i);
1180 		ms->addr = RTE_PTR_ADD(va_addr, i * page_sz);
1181 		ms->iova = iova_addrs == NULL ? RTE_BAD_IOVA : iova_addrs[i];
1182 		ms->hugepage_sz = page_sz;
1183 		ms->len = page_sz;
1184 		ms->nchannel = rte_memory_get_nchannel();
1185 		ms->nrank = rte_memory_get_nrank();
1186 		ms->socket_id = socket_id;
1187 	}
1188 
1189 	/* set up the memseg list */
1190 	msl->base_va = va_addr;
1191 	msl->page_sz = page_sz;
1192 	msl->socket_id = socket_id;
1193 	msl->len = seg_len;
1194 	msl->version = 0;
1195 	msl->external = 1;
1196 
1197 	return msl;
1198 }
1199 
1200 struct extseg_walk_arg {
1201 	void *va_addr;
1202 	size_t len;
1203 	struct rte_memseg_list *msl;
1204 };
1205 
1206 static int
1207 extseg_walk(const struct rte_memseg_list *msl, void *arg)
1208 {
1209 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1210 	struct extseg_walk_arg *wa = arg;
1211 
1212 	if (msl->base_va == wa->va_addr && msl->len == wa->len) {
1213 		unsigned int found_idx;
1214 
1215 		/* msl is const */
1216 		found_idx = msl - mcfg->memsegs;
1217 		wa->msl = &mcfg->memsegs[found_idx];
1218 		return 1;
1219 	}
1220 	return 0;
1221 }
1222 
1223 struct rte_memseg_list *
1224 malloc_heap_find_external_seg(void *va_addr, size_t len)
1225 {
1226 	struct extseg_walk_arg wa;
1227 	int res;
1228 
1229 	wa.va_addr = va_addr;
1230 	wa.len = len;
1231 
1232 	res = rte_memseg_list_walk_thread_unsafe(extseg_walk, &wa);
1233 
1234 	if (res != 1) {
1235 		/* 0 means nothing was found, -1 shouldn't happen */
1236 		if (res == 0)
1237 			rte_errno = ENOENT;
1238 		return NULL;
1239 	}
1240 	return wa.msl;
1241 }
1242 
1243 int
1244 malloc_heap_destroy_external_seg(struct rte_memseg_list *msl)
1245 {
1246 	/* destroy the fbarray backing this memory */
1247 	if (rte_fbarray_destroy(&msl->memseg_arr) < 0)
1248 		return -1;
1249 
1250 	/* reset the memseg list */
1251 	memset(msl, 0, sizeof(*msl));
1252 
1253 	return 0;
1254 }
1255 
1256 int
1257 malloc_heap_add_external_memory(struct malloc_heap *heap,
1258 		struct rte_memseg_list *msl)
1259 {
1260 	/* erase contents of new memory */
1261 	memset(msl->base_va, 0, msl->len);
1262 
1263 	/* now, add newly minted memory to the malloc heap */
1264 	malloc_heap_add_memory(heap, msl, msl->base_va, msl->len, false);
1265 
1266 	heap->total_size += msl->len;
1267 
1268 	/* all done! */
1269 	RTE_LOG(DEBUG, EAL, "Added segment for heap %s starting at %p\n",
1270 			heap->name, msl->base_va);
1271 
1272 	/* notify all subscribers that a new memory area has been added */
1273 	eal_memalloc_mem_event_notify(RTE_MEM_EVENT_ALLOC,
1274 			msl->base_va, msl->len);
1275 
1276 	return 0;
1277 }
1278 
1279 int
1280 malloc_heap_remove_external_memory(struct malloc_heap *heap, void *va_addr,
1281 		size_t len)
1282 {
1283 	struct malloc_elem *elem = heap->first;
1284 
1285 	/* find element with specified va address */
1286 	while (elem != NULL && elem != va_addr) {
1287 		elem = elem->next;
1288 		/* stop if we've blown past our VA */
1289 		if (elem > (struct malloc_elem *)va_addr) {
1290 			rte_errno = ENOENT;
1291 			return -1;
1292 		}
1293 	}
1294 	/* check if element was found */
1295 	if (elem == NULL || elem->msl->len != len) {
1296 		rte_errno = ENOENT;
1297 		return -1;
1298 	}
1299 	/* if element's size is not equal to segment len, segment is busy */
1300 	if (elem->state == ELEM_BUSY || elem->size != len) {
1301 		rte_errno = EBUSY;
1302 		return -1;
1303 	}
1304 	return destroy_elem(elem, len);
1305 }
1306 
1307 int
1308 malloc_heap_create(struct malloc_heap *heap, const char *heap_name)
1309 {
1310 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1311 	uint32_t next_socket_id = mcfg->next_socket_id;
1312 
1313 	/* prevent overflow. did you really create 2 billion heaps??? */
1314 	if (next_socket_id > INT32_MAX) {
1315 		RTE_LOG(ERR, EAL, "Cannot assign new socket ID's\n");
1316 		rte_errno = ENOSPC;
1317 		return -1;
1318 	}
1319 
1320 	/* initialize empty heap */
1321 	heap->alloc_count = 0;
1322 	heap->first = NULL;
1323 	heap->last = NULL;
1324 	LIST_INIT(heap->free_head);
1325 	rte_spinlock_init(&heap->lock);
1326 	heap->total_size = 0;
1327 	heap->socket_id = next_socket_id;
1328 
1329 	/* we hold a global mem hotplug writelock, so it's safe to increment */
1330 	mcfg->next_socket_id++;
1331 
1332 	/* set up name */
1333 	strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1334 	return 0;
1335 }
1336 
1337 int
1338 malloc_heap_destroy(struct malloc_heap *heap)
1339 {
1340 	if (heap->alloc_count != 0) {
1341 		RTE_LOG(ERR, EAL, "Heap is still in use\n");
1342 		rte_errno = EBUSY;
1343 		return -1;
1344 	}
1345 	if (heap->first != NULL || heap->last != NULL) {
1346 		RTE_LOG(ERR, EAL, "Heap still contains memory segments\n");
1347 		rte_errno = EBUSY;
1348 		return -1;
1349 	}
1350 	if (heap->total_size != 0)
1351 		RTE_LOG(ERR, EAL, "Total size not zero, heap is likely corrupt\n");
1352 
1353 	/* after this, the lock will be dropped */
1354 	memset(heap, 0, sizeof(*heap));
1355 
1356 	return 0;
1357 }
1358 
1359 int
1360 rte_eal_malloc_heap_init(void)
1361 {
1362 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1363 	unsigned int i;
1364 	const struct internal_config *internal_conf =
1365 		eal_get_internal_configuration();
1366 
1367 	if (internal_conf->match_allocations)
1368 		RTE_LOG(DEBUG, EAL, "Hugepages will be freed exactly as allocated.\n");
1369 
1370 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1371 		/* assign min socket ID to external heaps */
1372 		mcfg->next_socket_id = EXTERNAL_HEAP_MIN_SOCKET_ID;
1373 
1374 		/* assign names to default DPDK heaps */
1375 		for (i = 0; i < rte_socket_count(); i++) {
1376 			struct malloc_heap *heap = &mcfg->malloc_heaps[i];
1377 			char heap_name[RTE_HEAP_NAME_MAX_LEN];
1378 			int socket_id = rte_socket_id_by_idx(i);
1379 
1380 			snprintf(heap_name, sizeof(heap_name),
1381 					"socket_%i", socket_id);
1382 			strlcpy(heap->name, heap_name, RTE_HEAP_NAME_MAX_LEN);
1383 			heap->socket_id = socket_id;
1384 		}
1385 	}
1386 
1387 
1388 	if (register_mp_requests()) {
1389 		RTE_LOG(ERR, EAL, "Couldn't register malloc multiprocess actions\n");
1390 		rte_mcfg_mem_read_unlock();
1391 		return -1;
1392 	}
1393 
1394 	/* unlock mem hotplug here. it's safe for primary as no requests can
1395 	 * even come before primary itself is fully initialized, and secondaries
1396 	 * do not need to initialize the heap.
1397 	 */
1398 	rte_mcfg_mem_read_unlock();
1399 
1400 	/* secondary process does not need to initialize anything */
1401 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1402 		return 0;
1403 
1404 	/* add all IOVA-contiguous areas to the heap */
1405 	return rte_memseg_contig_walk(malloc_add_seg, NULL);
1406 }
1407 
1408 void
1409 rte_eal_malloc_heap_cleanup(void)
1410 {
1411 	unregister_mp_requests();
1412 }
1413