xref: /dpdk/lib/eal/linux/eal_memory.c (revision 51a5a72e2a82986b02244fcdd89c6571bc503de3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright(c) 2013 6WIND S.A.
4  */
5 
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdlib.h>
10 #include <stdio.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/stat.h>
16 #include <sys/file.h>
17 #include <sys/resource.h>
18 #include <unistd.h>
19 #include <limits.h>
20 #include <signal.h>
21 #include <setjmp.h>
22 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
23 #define MEMFD_SUPPORTED
24 #endif
25 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
26 #include <numa.h>
27 #include <numaif.h>
28 #endif
29 
30 #include <rte_errno.h>
31 #include <rte_log.h>
32 #include <rte_memory.h>
33 #include <rte_eal.h>
34 #include <rte_lcore.h>
35 #include <rte_common.h>
36 
37 #include "eal_private.h"
38 #include "eal_memalloc.h"
39 #include "eal_memcfg.h"
40 #include "eal_internal_cfg.h"
41 #include "eal_filesystem.h"
42 #include "eal_hugepages.h"
43 #include "eal_options.h"
44 
45 #define PFN_MASK_SIZE	8
46 
47 /**
48  * @file
49  * Huge page mapping under linux
50  *
51  * To reserve a big contiguous amount of memory, we use the hugepage
52  * feature of linux. For that, we need to have hugetlbfs mounted. This
53  * code will create many files in this directory (one per page) and
54  * map them in virtual memory. For each page, we will retrieve its
55  * physical address and remap it in order to have a virtual contiguous
56  * zone as well as a physical contiguous zone.
57  */
58 
59 static int phys_addrs_available = -1;
60 
61 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
62 
63 uint64_t eal_get_baseaddr(void)
64 {
65 	/*
66 	 * Linux kernel uses a really high address as starting address for
67 	 * serving mmaps calls. If there exists addressing limitations and IOVA
68 	 * mode is VA, this starting address is likely too high for those
69 	 * devices. However, it is possible to use a lower address in the
70 	 * process virtual address space as with 64 bits there is a lot of
71 	 * available space.
72 	 *
73 	 * Current known limitations are 39 or 40 bits. Setting the starting
74 	 * address at 4GB implies there are 508GB or 1020GB for mapping the
75 	 * available hugepages. This is likely enough for most systems, although
76 	 * a device with addressing limitations should call
77 	 * rte_mem_check_dma_mask for ensuring all memory is within supported
78 	 * range.
79 	 */
80 #if defined(RTE_ARCH_LOONGARCH)
81 	return 0x7000000000ULL;
82 #else
83 	return 0x100000000ULL;
84 #endif
85 }
86 
87 /*
88  * Get physical address of any mapped virtual address in the current process.
89  */
90 phys_addr_t
91 rte_mem_virt2phy(const void *virtaddr)
92 {
93 	int fd, retval;
94 	uint64_t page, physaddr;
95 	unsigned long virt_pfn;
96 	int page_size;
97 	off_t offset;
98 
99 	if (phys_addrs_available == 0)
100 		return RTE_BAD_IOVA;
101 
102 	/* standard page size */
103 	page_size = getpagesize();
104 
105 	fd = open("/proc/self/pagemap", O_RDONLY);
106 	if (fd < 0) {
107 		RTE_LOG(INFO, EAL, "%s(): cannot open /proc/self/pagemap: %s\n",
108 			__func__, strerror(errno));
109 		return RTE_BAD_IOVA;
110 	}
111 
112 	virt_pfn = (unsigned long)virtaddr / page_size;
113 	offset = sizeof(uint64_t) * virt_pfn;
114 	if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
115 		RTE_LOG(INFO, EAL, "%s(): seek error in /proc/self/pagemap: %s\n",
116 				__func__, strerror(errno));
117 		close(fd);
118 		return RTE_BAD_IOVA;
119 	}
120 
121 	retval = read(fd, &page, PFN_MASK_SIZE);
122 	close(fd);
123 	if (retval < 0) {
124 		RTE_LOG(INFO, EAL, "%s(): cannot read /proc/self/pagemap: %s\n",
125 				__func__, strerror(errno));
126 		return RTE_BAD_IOVA;
127 	} else if (retval != PFN_MASK_SIZE) {
128 		RTE_LOG(INFO, EAL, "%s(): read %d bytes from /proc/self/pagemap "
129 				"but expected %d:\n",
130 				__func__, retval, PFN_MASK_SIZE);
131 		return RTE_BAD_IOVA;
132 	}
133 
134 	/*
135 	 * the pfn (page frame number) are bits 0-54 (see
136 	 * pagemap.txt in linux Documentation)
137 	 */
138 	if ((page & 0x7fffffffffffffULL) == 0)
139 		return RTE_BAD_IOVA;
140 
141 	physaddr = ((page & 0x7fffffffffffffULL) * page_size)
142 		+ ((unsigned long)virtaddr % page_size);
143 
144 	return physaddr;
145 }
146 
147 rte_iova_t
148 rte_mem_virt2iova(const void *virtaddr)
149 {
150 	if (rte_eal_iova_mode() == RTE_IOVA_VA)
151 		return (uintptr_t)virtaddr;
152 	return rte_mem_virt2phy(virtaddr);
153 }
154 
155 /*
156  * For each hugepage in hugepg_tbl, fill the physaddr value. We find
157  * it by browsing the /proc/self/pagemap special file.
158  */
159 static int
160 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
161 {
162 	unsigned int i;
163 	phys_addr_t addr;
164 
165 	for (i = 0; i < hpi->num_pages[0]; i++) {
166 		addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
167 		if (addr == RTE_BAD_PHYS_ADDR)
168 			return -1;
169 		hugepg_tbl[i].physaddr = addr;
170 	}
171 	return 0;
172 }
173 
174 /*
175  * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
176  */
177 static int
178 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
179 {
180 	unsigned int i;
181 	static phys_addr_t addr;
182 
183 	for (i = 0; i < hpi->num_pages[0]; i++) {
184 		hugepg_tbl[i].physaddr = addr;
185 		addr += hugepg_tbl[i].size;
186 	}
187 	return 0;
188 }
189 
190 /*
191  * Check whether address-space layout randomization is enabled in
192  * the kernel. This is important for multi-process as it can prevent
193  * two processes mapping data to the same virtual address
194  * Returns:
195  *    0 - address space randomization disabled
196  *    1/2 - address space randomization enabled
197  *    negative error code on error
198  */
199 static int
200 aslr_enabled(void)
201 {
202 	char c;
203 	int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
204 	if (fd < 0)
205 		return -errno;
206 	retval = read(fd, &c, 1);
207 	close(fd);
208 	if (retval < 0)
209 		return -errno;
210 	if (retval == 0)
211 		return -EIO;
212 	switch (c) {
213 		case '0' : return 0;
214 		case '1' : return 1;
215 		case '2' : return 2;
216 		default: return -EINVAL;
217 	}
218 }
219 
220 static sigjmp_buf huge_jmpenv;
221 
222 static void huge_sigbus_handler(int signo __rte_unused)
223 {
224 	siglongjmp(huge_jmpenv, 1);
225 }
226 
227 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
228  * non-static local variable in the stack frame calling sigsetjmp might be
229  * clobbered by a call to longjmp.
230  */
231 static int huge_wrap_sigsetjmp(void)
232 {
233 	return sigsetjmp(huge_jmpenv, 1);
234 }
235 
236 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
237 /* Callback for numa library. */
238 void numa_error(char *where)
239 {
240 	RTE_LOG(ERR, EAL, "%s failed: %s\n", where, strerror(errno));
241 }
242 #endif
243 
244 /*
245  * Mmap all hugepages of hugepage table: it first open a file in
246  * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
247  * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
248  * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
249  * map contiguous physical blocks in contiguous virtual blocks.
250  */
251 static unsigned
252 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
253 		  uint64_t *essential_memory __rte_unused)
254 {
255 	int fd;
256 	unsigned i;
257 	void *virtaddr;
258 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
259 	int node_id = -1;
260 	int essential_prev = 0;
261 	int oldpolicy;
262 	struct bitmask *oldmask = NULL;
263 	bool have_numa = true;
264 	unsigned long maxnode = 0;
265 	const struct internal_config *internal_conf =
266 		eal_get_internal_configuration();
267 
268 	/* Check if kernel supports NUMA. */
269 	if (numa_available() != 0) {
270 		RTE_LOG(DEBUG, EAL, "NUMA is not supported.\n");
271 		have_numa = false;
272 	}
273 
274 	if (have_numa) {
275 		RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
276 		oldmask = numa_allocate_nodemask();
277 		if (get_mempolicy(&oldpolicy, oldmask->maskp,
278 				  oldmask->size + 1, 0, 0) < 0) {
279 			RTE_LOG(ERR, EAL,
280 				"Failed to get current mempolicy: %s. "
281 				"Assuming MPOL_DEFAULT.\n", strerror(errno));
282 			oldpolicy = MPOL_DEFAULT;
283 		}
284 		for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
285 			if (internal_conf->socket_mem[i])
286 				maxnode = i + 1;
287 	}
288 #endif
289 
290 	for (i = 0; i < hpi->num_pages[0]; i++) {
291 		struct hugepage_file *hf = &hugepg_tbl[i];
292 		uint64_t hugepage_sz = hpi->hugepage_sz;
293 
294 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
295 		if (maxnode) {
296 			unsigned int j;
297 
298 			for (j = 0; j < maxnode; j++)
299 				if (essential_memory[j])
300 					break;
301 
302 			if (j == maxnode) {
303 				node_id = (node_id + 1) % maxnode;
304 				while (!internal_conf->socket_mem[node_id]) {
305 					node_id++;
306 					node_id %= maxnode;
307 				}
308 				essential_prev = 0;
309 			} else {
310 				node_id = j;
311 				essential_prev = essential_memory[j];
312 
313 				if (essential_memory[j] < hugepage_sz)
314 					essential_memory[j] = 0;
315 				else
316 					essential_memory[j] -= hugepage_sz;
317 			}
318 
319 			RTE_LOG(DEBUG, EAL,
320 				"Setting policy MPOL_PREFERRED for socket %d\n",
321 				node_id);
322 			numa_set_preferred(node_id);
323 		}
324 #endif
325 
326 		hf->file_id = i;
327 		hf->size = hugepage_sz;
328 		eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
329 				hpi->hugedir, hf->file_id);
330 		hf->filepath[sizeof(hf->filepath) - 1] = '\0';
331 
332 		/* try to create hugepage file */
333 		fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
334 		if (fd < 0) {
335 			RTE_LOG(DEBUG, EAL, "%s(): open failed: %s\n", __func__,
336 					strerror(errno));
337 			goto out;
338 		}
339 
340 		/* map the segment, and populate page tables,
341 		 * the kernel fills this segment with zeros. we don't care where
342 		 * this gets mapped - we already have contiguous memory areas
343 		 * ready for us to map into.
344 		 */
345 		virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
346 				MAP_SHARED | MAP_POPULATE, fd, 0);
347 		if (virtaddr == MAP_FAILED) {
348 			RTE_LOG(DEBUG, EAL, "%s(): mmap failed: %s\n", __func__,
349 					strerror(errno));
350 			close(fd);
351 			goto out;
352 		}
353 
354 		hf->orig_va = virtaddr;
355 
356 		/* In linux, hugetlb limitations, like cgroup, are
357 		 * enforced at fault time instead of mmap(), even
358 		 * with the option of MAP_POPULATE. Kernel will send
359 		 * a SIGBUS signal. To avoid to be killed, save stack
360 		 * environment here, if SIGBUS happens, we can jump
361 		 * back here.
362 		 */
363 		if (huge_wrap_sigsetjmp()) {
364 			RTE_LOG(DEBUG, EAL, "SIGBUS: Cannot mmap more "
365 				"hugepages of size %u MB\n",
366 				(unsigned int)(hugepage_sz / 0x100000));
367 			munmap(virtaddr, hugepage_sz);
368 			close(fd);
369 			unlink(hugepg_tbl[i].filepath);
370 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
371 			if (maxnode)
372 				essential_memory[node_id] =
373 					essential_prev;
374 #endif
375 			goto out;
376 		}
377 		*(int *)virtaddr = 0;
378 
379 		/* set shared lock on the file. */
380 		if (flock(fd, LOCK_SH) < 0) {
381 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed:%s \n",
382 				__func__, strerror(errno));
383 			close(fd);
384 			goto out;
385 		}
386 
387 		close(fd);
388 	}
389 
390 out:
391 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
392 	if (maxnode) {
393 		RTE_LOG(DEBUG, EAL,
394 			"Restoring previous memory policy: %d\n", oldpolicy);
395 		if (oldpolicy == MPOL_DEFAULT) {
396 			numa_set_localalloc();
397 		} else if (set_mempolicy(oldpolicy, oldmask->maskp,
398 					 oldmask->size + 1) < 0) {
399 			RTE_LOG(ERR, EAL, "Failed to restore mempolicy: %s\n",
400 				strerror(errno));
401 			numa_set_localalloc();
402 		}
403 	}
404 	if (oldmask != NULL)
405 		numa_free_cpumask(oldmask);
406 #endif
407 	return i;
408 }
409 
410 /*
411  * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
412  * page.
413  */
414 static int
415 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
416 {
417 	int socket_id;
418 	char *end, *nodestr;
419 	unsigned i, hp_count = 0;
420 	uint64_t virt_addr;
421 	char buf[BUFSIZ];
422 	char hugedir_str[PATH_MAX];
423 	FILE *f;
424 
425 	f = fopen("/proc/self/numa_maps", "r");
426 	if (f == NULL) {
427 		RTE_LOG(NOTICE, EAL, "NUMA support not available"
428 			" consider that all memory is in socket_id 0\n");
429 		return 0;
430 	}
431 
432 	snprintf(hugedir_str, sizeof(hugedir_str),
433 			"%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
434 
435 	/* parse numa map */
436 	while (fgets(buf, sizeof(buf), f) != NULL) {
437 
438 		/* ignore non huge page */
439 		if (strstr(buf, " huge ") == NULL &&
440 				strstr(buf, hugedir_str) == NULL)
441 			continue;
442 
443 		/* get zone addr */
444 		virt_addr = strtoull(buf, &end, 16);
445 		if (virt_addr == 0 || end == buf) {
446 			RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
447 			goto error;
448 		}
449 
450 		/* get node id (socket id) */
451 		nodestr = strstr(buf, " N");
452 		if (nodestr == NULL) {
453 			RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
454 			goto error;
455 		}
456 		nodestr += 2;
457 		end = strstr(nodestr, "=");
458 		if (end == NULL) {
459 			RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
460 			goto error;
461 		}
462 		end[0] = '\0';
463 		end = NULL;
464 
465 		socket_id = strtoul(nodestr, &end, 0);
466 		if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
467 			RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
468 			goto error;
469 		}
470 
471 		/* if we find this page in our mappings, set socket_id */
472 		for (i = 0; i < hpi->num_pages[0]; i++) {
473 			void *va = (void *)(unsigned long)virt_addr;
474 			if (hugepg_tbl[i].orig_va == va) {
475 				hugepg_tbl[i].socket_id = socket_id;
476 				hp_count++;
477 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
478 				RTE_LOG(DEBUG, EAL,
479 					"Hugepage %s is on socket %d\n",
480 					hugepg_tbl[i].filepath, socket_id);
481 #endif
482 			}
483 		}
484 	}
485 
486 	if (hp_count < hpi->num_pages[0])
487 		goto error;
488 
489 	fclose(f);
490 	return 0;
491 
492 error:
493 	fclose(f);
494 	return -1;
495 }
496 
497 static int
498 cmp_physaddr(const void *a, const void *b)
499 {
500 #ifndef RTE_ARCH_PPC_64
501 	const struct hugepage_file *p1 = a;
502 	const struct hugepage_file *p2 = b;
503 #else
504 	/* PowerPC needs memory sorted in reverse order from x86 */
505 	const struct hugepage_file *p1 = b;
506 	const struct hugepage_file *p2 = a;
507 #endif
508 	if (p1->physaddr < p2->physaddr)
509 		return -1;
510 	else if (p1->physaddr > p2->physaddr)
511 		return 1;
512 	else
513 		return 0;
514 }
515 
516 /*
517  * Uses mmap to create a shared memory area for storage of data
518  * Used in this file to store the hugepage file map on disk
519  */
520 static void *
521 create_shared_memory(const char *filename, const size_t mem_size)
522 {
523 	void *retval;
524 	int fd;
525 	const struct internal_config *internal_conf =
526 		eal_get_internal_configuration();
527 
528 	/* if no shared files mode is used, create anonymous memory instead */
529 	if (internal_conf->no_shconf) {
530 		retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
531 				MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
532 		if (retval == MAP_FAILED)
533 			return NULL;
534 		return retval;
535 	}
536 
537 	fd = open(filename, O_CREAT | O_RDWR, 0600);
538 	if (fd < 0)
539 		return NULL;
540 	if (ftruncate(fd, mem_size) < 0) {
541 		close(fd);
542 		return NULL;
543 	}
544 	retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
545 	close(fd);
546 	if (retval == MAP_FAILED)
547 		return NULL;
548 	return retval;
549 }
550 
551 /*
552  * this copies *active* hugepages from one hugepage table to another.
553  * destination is typically the shared memory.
554  */
555 static int
556 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
557 		const struct hugepage_file * src, int src_size)
558 {
559 	int src_pos, dst_pos = 0;
560 
561 	for (src_pos = 0; src_pos < src_size; src_pos++) {
562 		if (src[src_pos].orig_va != NULL) {
563 			/* error on overflow attempt */
564 			if (dst_pos == dest_size)
565 				return -1;
566 			memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
567 			dst_pos++;
568 		}
569 	}
570 	return 0;
571 }
572 
573 static int
574 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
575 		unsigned num_hp_info)
576 {
577 	unsigned socket, size;
578 	int page, nrpages = 0;
579 	const struct internal_config *internal_conf =
580 		eal_get_internal_configuration();
581 
582 	/* get total number of hugepages */
583 	for (size = 0; size < num_hp_info; size++)
584 		for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
585 			nrpages +=
586 			internal_conf->hugepage_info[size].num_pages[socket];
587 
588 	for (page = 0; page < nrpages; page++) {
589 		struct hugepage_file *hp = &hugepg_tbl[page];
590 
591 		if (hp->orig_va != NULL && unlink(hp->filepath)) {
592 			RTE_LOG(WARNING, EAL, "%s(): Removing %s failed: %s\n",
593 				__func__, hp->filepath, strerror(errno));
594 		}
595 	}
596 	return 0;
597 }
598 
599 /*
600  * unmaps hugepages that are not going to be used. since we originally allocate
601  * ALL hugepages (not just those we need), additional unmapping needs to be done.
602  */
603 static int
604 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
605 		struct hugepage_info *hpi,
606 		unsigned num_hp_info)
607 {
608 	unsigned socket, size;
609 	int page, nrpages = 0;
610 	const struct internal_config *internal_conf =
611 		eal_get_internal_configuration();
612 
613 	/* get total number of hugepages */
614 	for (size = 0; size < num_hp_info; size++)
615 		for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
616 			nrpages += internal_conf->hugepage_info[size].num_pages[socket];
617 
618 	for (size = 0; size < num_hp_info; size++) {
619 		for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
620 			unsigned pages_found = 0;
621 
622 			/* traverse until we have unmapped all the unused pages */
623 			for (page = 0; page < nrpages; page++) {
624 				struct hugepage_file *hp = &hugepg_tbl[page];
625 
626 				/* find a page that matches the criteria */
627 				if ((hp->size == hpi[size].hugepage_sz) &&
628 						(hp->socket_id == (int) socket)) {
629 
630 					/* if we skipped enough pages, unmap the rest */
631 					if (pages_found == hpi[size].num_pages[socket]) {
632 						uint64_t unmap_len;
633 
634 						unmap_len = hp->size;
635 
636 						/* get start addr and len of the remaining segment */
637 						munmap(hp->orig_va,
638 							(size_t)unmap_len);
639 
640 						hp->orig_va = NULL;
641 						if (unlink(hp->filepath) == -1) {
642 							RTE_LOG(ERR, EAL, "%s(): Removing %s failed: %s\n",
643 									__func__, hp->filepath, strerror(errno));
644 							return -1;
645 						}
646 					} else {
647 						/* lock the page and skip */
648 						pages_found++;
649 					}
650 
651 				} /* match page */
652 			} /* foreach page */
653 		} /* foreach socket */
654 	} /* foreach pagesize */
655 
656 	return 0;
657 }
658 
659 static int
660 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
661 {
662 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
663 	struct rte_memseg_list *msl;
664 	struct rte_fbarray *arr;
665 	int cur_page, seg_len;
666 	unsigned int msl_idx;
667 	int ms_idx;
668 	uint64_t page_sz;
669 	size_t memseg_len;
670 	int socket_id;
671 #ifndef RTE_ARCH_64
672 	const struct internal_config *internal_conf =
673 		eal_get_internal_configuration();
674 #endif
675 	page_sz = hugepages[seg_start].size;
676 	socket_id = hugepages[seg_start].socket_id;
677 	seg_len = seg_end - seg_start;
678 
679 	RTE_LOG(DEBUG, EAL, "Attempting to map %" PRIu64 "M on socket %i\n",
680 			(seg_len * page_sz) >> 20ULL, socket_id);
681 
682 	/* find free space in memseg lists */
683 	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
684 		int free_len;
685 		bool empty;
686 		msl = &mcfg->memsegs[msl_idx];
687 		arr = &msl->memseg_arr;
688 
689 		if (msl->page_sz != page_sz)
690 			continue;
691 		if (msl->socket_id != socket_id)
692 			continue;
693 
694 		/* leave space for a hole if array is not empty */
695 		empty = arr->count == 0;
696 		/* find start of the biggest contiguous block and its size */
697 		ms_idx = rte_fbarray_find_biggest_free(arr, 0);
698 		if (ms_idx < 0)
699 			continue;
700 		/* hole is 1 segment long, so at least two segments long. */
701 		free_len = rte_fbarray_find_contig_free(arr, ms_idx);
702 		if (free_len < 2)
703 			continue;
704 		/* leave some space between memsegs, they are not IOVA
705 		 * contiguous, so they shouldn't be VA contiguous either.
706 		 */
707 		if (!empty) {
708 			ms_idx++;
709 			free_len--;
710 		}
711 
712 		/* we might not get all of the space we wanted */
713 		free_len = RTE_MIN(seg_len, free_len);
714 		seg_end = seg_start + free_len;
715 		seg_len = seg_end - seg_start;
716 		break;
717 	}
718 	if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
719 		RTE_LOG(ERR, EAL, "Could not find space for memseg. Please increase %s and/or %s in configuration.\n",
720 				RTE_STR(RTE_MAX_MEMSEG_PER_TYPE),
721 				RTE_STR(RTE_MAX_MEM_MB_PER_TYPE));
722 		return -1;
723 	}
724 
725 #ifdef RTE_ARCH_PPC_64
726 	/* for PPC64 we go through the list backwards */
727 	for (cur_page = seg_end - 1; cur_page >= seg_start;
728 			cur_page--, ms_idx++) {
729 #else
730 	for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
731 #endif
732 		struct hugepage_file *hfile = &hugepages[cur_page];
733 		struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
734 		void *addr;
735 		int fd;
736 
737 		fd = open(hfile->filepath, O_RDWR);
738 		if (fd < 0) {
739 			RTE_LOG(ERR, EAL, "Could not open '%s': %s\n",
740 					hfile->filepath, strerror(errno));
741 			return -1;
742 		}
743 		/* set shared lock on the file. */
744 		if (flock(fd, LOCK_SH) < 0) {
745 			RTE_LOG(DEBUG, EAL, "Could not lock '%s': %s\n",
746 					hfile->filepath, strerror(errno));
747 			close(fd);
748 			return -1;
749 		}
750 		memseg_len = (size_t)page_sz;
751 		addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
752 
753 		/* we know this address is already mmapped by memseg list, so
754 		 * using MAP_FIXED here is safe
755 		 */
756 		addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
757 				MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
758 		if (addr == MAP_FAILED) {
759 			RTE_LOG(ERR, EAL, "Couldn't remap '%s': %s\n",
760 					hfile->filepath, strerror(errno));
761 			close(fd);
762 			return -1;
763 		}
764 
765 		/* we have a new address, so unmap previous one */
766 #ifndef RTE_ARCH_64
767 		/* in 32-bit legacy mode, we have already unmapped the page */
768 		if (!internal_conf->legacy_mem)
769 			munmap(hfile->orig_va, page_sz);
770 #else
771 		munmap(hfile->orig_va, page_sz);
772 #endif
773 
774 		hfile->orig_va = NULL;
775 		hfile->final_va = addr;
776 
777 		/* rewrite physical addresses in IOVA as VA mode */
778 		if (rte_eal_iova_mode() == RTE_IOVA_VA)
779 			hfile->physaddr = (uintptr_t)addr;
780 
781 		/* set up memseg data */
782 		ms->addr = addr;
783 		ms->hugepage_sz = page_sz;
784 		ms->len = memseg_len;
785 		ms->iova = hfile->physaddr;
786 		ms->socket_id = hfile->socket_id;
787 		ms->nchannel = rte_memory_get_nchannel();
788 		ms->nrank = rte_memory_get_nrank();
789 
790 		rte_fbarray_set_used(arr, ms_idx);
791 
792 		/* store segment fd internally */
793 		if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
794 			RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
795 				rte_strerror(rte_errno));
796 	}
797 	RTE_LOG(DEBUG, EAL, "Allocated %" PRIu64 "M on socket %i\n",
798 			(seg_len * page_sz) >> 20, socket_id);
799 	return seg_len;
800 }
801 
802 static uint64_t
803 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
804 {
805 	uint64_t area_sz, max_pages;
806 
807 	/* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
808 	max_pages = RTE_MAX_MEMSEG_PER_LIST;
809 	max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
810 
811 	area_sz = RTE_MIN(page_sz * max_pages, max_mem);
812 
813 	/* make sure the list isn't smaller than the page size */
814 	area_sz = RTE_MAX(area_sz, page_sz);
815 
816 	return RTE_ALIGN(area_sz, page_sz);
817 }
818 
819 static int
820 memseg_list_free(struct rte_memseg_list *msl)
821 {
822 	if (rte_fbarray_destroy(&msl->memseg_arr)) {
823 		RTE_LOG(ERR, EAL, "Cannot destroy memseg list\n");
824 		return -1;
825 	}
826 	memset(msl, 0, sizeof(*msl));
827 	return 0;
828 }
829 
830 /*
831  * Our VA space is not preallocated yet, so preallocate it here. We need to know
832  * how many segments there are in order to map all pages into one address space,
833  * and leave appropriate holes between segments so that rte_malloc does not
834  * concatenate them into one big segment.
835  *
836  * we also need to unmap original pages to free up address space.
837  */
838 static int __rte_unused
839 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
840 {
841 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
842 	int cur_page, seg_start_page, end_seg, new_memseg;
843 	unsigned int hpi_idx, socket, i;
844 	int n_contig_segs, n_segs;
845 	int msl_idx;
846 	const struct internal_config *internal_conf =
847 		eal_get_internal_configuration();
848 
849 	/* before we preallocate segments, we need to free up our VA space.
850 	 * we're not removing files, and we already have information about
851 	 * PA-contiguousness, so it is safe to unmap everything.
852 	 */
853 	for (cur_page = 0; cur_page < n_pages; cur_page++) {
854 		struct hugepage_file *hpi = &hugepages[cur_page];
855 		munmap(hpi->orig_va, hpi->size);
856 		hpi->orig_va = NULL;
857 	}
858 
859 	/* we cannot know how many page sizes and sockets we have discovered, so
860 	 * loop over all of them
861 	 */
862 	for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
863 			hpi_idx++) {
864 		uint64_t page_sz =
865 			internal_conf->hugepage_info[hpi_idx].hugepage_sz;
866 
867 		for (i = 0; i < rte_socket_count(); i++) {
868 			struct rte_memseg_list *msl;
869 
870 			socket = rte_socket_id_by_idx(i);
871 			n_contig_segs = 0;
872 			n_segs = 0;
873 			seg_start_page = -1;
874 
875 			for (cur_page = 0; cur_page < n_pages; cur_page++) {
876 				struct hugepage_file *prev, *cur;
877 				int prev_seg_start_page = -1;
878 
879 				cur = &hugepages[cur_page];
880 				prev = cur_page == 0 ? NULL :
881 						&hugepages[cur_page - 1];
882 
883 				new_memseg = 0;
884 				end_seg = 0;
885 
886 				if (cur->size == 0)
887 					end_seg = 1;
888 				else if (cur->socket_id != (int) socket)
889 					end_seg = 1;
890 				else if (cur->size != page_sz)
891 					end_seg = 1;
892 				else if (cur_page == 0)
893 					new_memseg = 1;
894 #ifdef RTE_ARCH_PPC_64
895 				/* On PPC64 architecture, the mmap always start
896 				 * from higher address to lower address. Here,
897 				 * physical addresses are in descending order.
898 				 */
899 				else if ((prev->physaddr - cur->physaddr) !=
900 						cur->size)
901 					new_memseg = 1;
902 #else
903 				else if ((cur->physaddr - prev->physaddr) !=
904 						cur->size)
905 					new_memseg = 1;
906 #endif
907 				if (new_memseg) {
908 					/* if we're already inside a segment,
909 					 * new segment means end of current one
910 					 */
911 					if (seg_start_page != -1) {
912 						end_seg = 1;
913 						prev_seg_start_page =
914 								seg_start_page;
915 					}
916 					seg_start_page = cur_page;
917 				}
918 
919 				if (end_seg) {
920 					if (prev_seg_start_page != -1) {
921 						/* we've found a new segment */
922 						n_contig_segs++;
923 						n_segs += cur_page -
924 							prev_seg_start_page;
925 					} else if (seg_start_page != -1) {
926 						/* we didn't find new segment,
927 						 * but did end current one
928 						 */
929 						n_contig_segs++;
930 						n_segs += cur_page -
931 								seg_start_page;
932 						seg_start_page = -1;
933 						continue;
934 					} else {
935 						/* we're skipping this page */
936 						continue;
937 					}
938 				}
939 				/* segment continues */
940 			}
941 			/* check if we missed last segment */
942 			if (seg_start_page != -1) {
943 				n_contig_segs++;
944 				n_segs += cur_page - seg_start_page;
945 			}
946 
947 			/* if no segments were found, do not preallocate */
948 			if (n_segs == 0)
949 				continue;
950 
951 			/* we now have total number of pages that we will
952 			 * allocate for this segment list. add separator pages
953 			 * to the total count, and preallocate VA space.
954 			 */
955 			n_segs += n_contig_segs - 1;
956 
957 			/* now, preallocate VA space for these segments */
958 
959 			/* first, find suitable memseg list for this */
960 			for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
961 					msl_idx++) {
962 				msl = &mcfg->memsegs[msl_idx];
963 
964 				if (msl->base_va != NULL)
965 					continue;
966 				break;
967 			}
968 			if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
969 				RTE_LOG(ERR, EAL, "Not enough space in memseg lists, please increase %s\n",
970 					RTE_STR(RTE_MAX_MEMSEG_LISTS));
971 				return -1;
972 			}
973 
974 			/* now, allocate fbarray itself */
975 			if (eal_memseg_list_init(msl, page_sz, n_segs,
976 					socket, msl_idx, true) < 0)
977 				return -1;
978 
979 			/* finally, allocate VA space */
980 			if (eal_memseg_list_alloc(msl, 0) < 0) {
981 				RTE_LOG(ERR, EAL, "Cannot preallocate 0x%"PRIx64"kB hugepages\n",
982 					page_sz >> 10);
983 				return -1;
984 			}
985 		}
986 	}
987 	return 0;
988 }
989 
990 /*
991  * We cannot reallocate memseg lists on the fly because PPC64 stores pages
992  * backwards, therefore we have to process the entire memseg first before
993  * remapping it into memseg list VA space.
994  */
995 static int
996 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
997 {
998 	int cur_page, seg_start_page, new_memseg, ret;
999 
1000 	seg_start_page = 0;
1001 	for (cur_page = 0; cur_page < n_pages; cur_page++) {
1002 		struct hugepage_file *prev, *cur;
1003 
1004 		new_memseg = 0;
1005 
1006 		cur = &hugepages[cur_page];
1007 		prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1008 
1009 		/* if size is zero, no more pages left */
1010 		if (cur->size == 0)
1011 			break;
1012 
1013 		if (cur_page == 0)
1014 			new_memseg = 1;
1015 		else if (cur->socket_id != prev->socket_id)
1016 			new_memseg = 1;
1017 		else if (cur->size != prev->size)
1018 			new_memseg = 1;
1019 #ifdef RTE_ARCH_PPC_64
1020 		/* On PPC64 architecture, the mmap always start from higher
1021 		 * address to lower address. Here, physical addresses are in
1022 		 * descending order.
1023 		 */
1024 		else if ((prev->physaddr - cur->physaddr) != cur->size)
1025 			new_memseg = 1;
1026 #else
1027 		else if ((cur->physaddr - prev->physaddr) != cur->size)
1028 			new_memseg = 1;
1029 #endif
1030 
1031 		if (new_memseg) {
1032 			/* if this isn't the first time, remap segment */
1033 			if (cur_page != 0) {
1034 				int n_remapped = 0;
1035 				int n_needed = cur_page - seg_start_page;
1036 				while (n_remapped < n_needed) {
1037 					ret = remap_segment(hugepages, seg_start_page,
1038 							cur_page);
1039 					if (ret < 0)
1040 						return -1;
1041 					n_remapped += ret;
1042 					seg_start_page += ret;
1043 				}
1044 			}
1045 			/* remember where we started */
1046 			seg_start_page = cur_page;
1047 		}
1048 		/* continuation of previous memseg */
1049 	}
1050 	/* we were stopped, but we didn't remap the last segment, do it now */
1051 	if (cur_page != 0) {
1052 		int n_remapped = 0;
1053 		int n_needed = cur_page - seg_start_page;
1054 		while (n_remapped < n_needed) {
1055 			ret = remap_segment(hugepages, seg_start_page,
1056 					cur_page);
1057 			if (ret < 0)
1058 				return -1;
1059 			n_remapped += ret;
1060 			seg_start_page += ret;
1061 		}
1062 	}
1063 	return 0;
1064 }
1065 
1066 static inline size_t
1067 eal_get_hugepage_mem_size(void)
1068 {
1069 	uint64_t size = 0;
1070 	unsigned i, j;
1071 	struct internal_config *internal_conf =
1072 		eal_get_internal_configuration();
1073 
1074 	for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
1075 		struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
1076 		if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1077 			for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1078 				size += hpi->hugepage_sz * hpi->num_pages[j];
1079 			}
1080 		}
1081 	}
1082 
1083 	return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1084 }
1085 
1086 static struct sigaction huge_action_old;
1087 static int huge_need_recover;
1088 
1089 static void
1090 huge_register_sigbus(void)
1091 {
1092 	sigset_t mask;
1093 	struct sigaction action;
1094 
1095 	sigemptyset(&mask);
1096 	sigaddset(&mask, SIGBUS);
1097 	action.sa_flags = 0;
1098 	action.sa_mask = mask;
1099 	action.sa_handler = huge_sigbus_handler;
1100 
1101 	huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1102 }
1103 
1104 static void
1105 huge_recover_sigbus(void)
1106 {
1107 	if (huge_need_recover) {
1108 		sigaction(SIGBUS, &huge_action_old, NULL);
1109 		huge_need_recover = 0;
1110 	}
1111 }
1112 
1113 /*
1114  * Prepare physical memory mapping: fill configuration structure with
1115  * these infos, return 0 on success.
1116  *  1. map N huge pages in separate files in hugetlbfs
1117  *  2. find associated physical addr
1118  *  3. find associated NUMA socket ID
1119  *  4. sort all huge pages by physical address
1120  *  5. remap these N huge pages in the correct order
1121  *  6. unmap the first mapping
1122  *  7. fill memsegs in configuration with contiguous zones
1123  */
1124 static int
1125 eal_legacy_hugepage_init(void)
1126 {
1127 	struct rte_mem_config *mcfg;
1128 	struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1129 	struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1130 	struct internal_config *internal_conf =
1131 		eal_get_internal_configuration();
1132 
1133 	uint64_t memory[RTE_MAX_NUMA_NODES];
1134 
1135 	unsigned hp_offset;
1136 	int i, j;
1137 	int nr_hugefiles, nr_hugepages = 0;
1138 	void *addr;
1139 
1140 	memset(used_hp, 0, sizeof(used_hp));
1141 
1142 	/* get pointer to global configuration */
1143 	mcfg = rte_eal_get_configuration()->mem_config;
1144 
1145 	/* hugetlbfs can be disabled */
1146 	if (internal_conf->no_hugetlbfs) {
1147 		void *prealloc_addr;
1148 		size_t mem_sz;
1149 		struct rte_memseg_list *msl;
1150 		int n_segs, fd, flags;
1151 #ifdef MEMFD_SUPPORTED
1152 		int memfd;
1153 #endif
1154 		uint64_t page_sz;
1155 
1156 		/* nohuge mode is legacy mode */
1157 		internal_conf->legacy_mem = 1;
1158 
1159 		/* nohuge mode is single-file segments mode */
1160 		internal_conf->single_file_segments = 1;
1161 
1162 		/* create a memseg list */
1163 		msl = &mcfg->memsegs[0];
1164 
1165 		mem_sz = internal_conf->memory;
1166 		page_sz = RTE_PGSIZE_4K;
1167 		n_segs = mem_sz / page_sz;
1168 
1169 		if (eal_memseg_list_init_named(
1170 				msl, "nohugemem", page_sz, n_segs, 0, true)) {
1171 			return -1;
1172 		}
1173 
1174 		/* set up parameters for anonymous mmap */
1175 		fd = -1;
1176 		flags = MAP_PRIVATE | MAP_ANONYMOUS;
1177 
1178 #ifdef MEMFD_SUPPORTED
1179 		/* create a memfd and store it in the segment fd table */
1180 		memfd = memfd_create("nohuge", 0);
1181 		if (memfd < 0) {
1182 			RTE_LOG(DEBUG, EAL, "Cannot create memfd: %s\n",
1183 					strerror(errno));
1184 			RTE_LOG(DEBUG, EAL, "Falling back to anonymous map\n");
1185 		} else {
1186 			/* we got an fd - now resize it */
1187 			if (ftruncate(memfd, internal_conf->memory) < 0) {
1188 				RTE_LOG(ERR, EAL, "Cannot resize memfd: %s\n",
1189 						strerror(errno));
1190 				RTE_LOG(ERR, EAL, "Falling back to anonymous map\n");
1191 				close(memfd);
1192 			} else {
1193 				/* creating memfd-backed file was successful.
1194 				 * we want changes to memfd to be visible to
1195 				 * other processes (such as vhost backend), so
1196 				 * map it as shared memory.
1197 				 */
1198 				RTE_LOG(DEBUG, EAL, "Using memfd for anonymous memory\n");
1199 				fd = memfd;
1200 				flags = MAP_SHARED;
1201 			}
1202 		}
1203 #endif
1204 		/* preallocate address space for the memory, so that it can be
1205 		 * fit into the DMA mask.
1206 		 */
1207 		if (eal_memseg_list_alloc(msl, 0)) {
1208 			RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1209 			return -1;
1210 		}
1211 
1212 		prealloc_addr = msl->base_va;
1213 		addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
1214 				flags | MAP_FIXED, fd, 0);
1215 		if (addr == MAP_FAILED || addr != prealloc_addr) {
1216 			RTE_LOG(ERR, EAL, "%s: mmap() failed: %s\n", __func__,
1217 					strerror(errno));
1218 			munmap(prealloc_addr, mem_sz);
1219 			return -1;
1220 		}
1221 
1222 		/* we're in single-file segments mode, so only the segment list
1223 		 * fd needs to be set up.
1224 		 */
1225 		if (fd != -1) {
1226 			if (eal_memalloc_set_seg_list_fd(0, fd) < 0) {
1227 				RTE_LOG(ERR, EAL, "Cannot set up segment list fd\n");
1228 				/* not a serious error, proceed */
1229 			}
1230 		}
1231 
1232 		eal_memseg_list_populate(msl, addr, n_segs);
1233 
1234 		if (mcfg->dma_maskbits &&
1235 		    rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1236 			RTE_LOG(ERR, EAL,
1237 				"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1238 				__func__);
1239 			if (rte_eal_iova_mode() == RTE_IOVA_VA &&
1240 			    rte_eal_using_phys_addrs())
1241 				RTE_LOG(ERR, EAL,
1242 					"%s(): Please try initializing EAL with --iova-mode=pa parameter.\n",
1243 					__func__);
1244 			goto fail;
1245 		}
1246 		return 0;
1247 	}
1248 
1249 	/* calculate total number of hugepages available. at this point we haven't
1250 	 * yet started sorting them so they all are on socket 0 */
1251 	for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1252 		/* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1253 		used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
1254 
1255 		nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
1256 	}
1257 
1258 	/*
1259 	 * allocate a memory area for hugepage table.
1260 	 * this isn't shared memory yet. due to the fact that we need some
1261 	 * processing done on these pages, shared memory will be created
1262 	 * at a later stage.
1263 	 */
1264 	tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1265 	if (tmp_hp == NULL)
1266 		goto fail;
1267 
1268 	memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1269 
1270 	hp_offset = 0; /* where we start the current page size entries */
1271 
1272 	huge_register_sigbus();
1273 
1274 	/* make a copy of socket_mem, needed for balanced allocation. */
1275 	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1276 		memory[i] = internal_conf->socket_mem[i];
1277 
1278 	/* map all hugepages and sort them */
1279 	for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
1280 		unsigned pages_old, pages_new;
1281 		struct hugepage_info *hpi;
1282 
1283 		/*
1284 		 * we don't yet mark hugepages as used at this stage, so
1285 		 * we just map all hugepages available to the system
1286 		 * all hugepages are still located on socket 0
1287 		 */
1288 		hpi = &internal_conf->hugepage_info[i];
1289 
1290 		if (hpi->num_pages[0] == 0)
1291 			continue;
1292 
1293 		/* map all hugepages available */
1294 		pages_old = hpi->num_pages[0];
1295 		pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1296 		if (pages_new < pages_old) {
1297 			RTE_LOG(DEBUG, EAL,
1298 				"%d not %d hugepages of size %u MB allocated\n",
1299 				pages_new, pages_old,
1300 				(unsigned)(hpi->hugepage_sz / 0x100000));
1301 
1302 			int pages = pages_old - pages_new;
1303 
1304 			nr_hugepages -= pages;
1305 			hpi->num_pages[0] = pages_new;
1306 			if (pages_new == 0)
1307 				continue;
1308 		}
1309 
1310 		if (rte_eal_using_phys_addrs() &&
1311 				rte_eal_iova_mode() != RTE_IOVA_VA) {
1312 			/* find physical addresses for each hugepage */
1313 			if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1314 				RTE_LOG(DEBUG, EAL, "Failed to find phys addr "
1315 					"for %u MB pages\n",
1316 					(unsigned int)(hpi->hugepage_sz / 0x100000));
1317 				goto fail;
1318 			}
1319 		} else {
1320 			/* set physical addresses for each hugepage */
1321 			if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1322 				RTE_LOG(DEBUG, EAL, "Failed to set phys addr "
1323 					"for %u MB pages\n",
1324 					(unsigned int)(hpi->hugepage_sz / 0x100000));
1325 				goto fail;
1326 			}
1327 		}
1328 
1329 		if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1330 			RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
1331 					(unsigned)(hpi->hugepage_sz / 0x100000));
1332 			goto fail;
1333 		}
1334 
1335 		qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1336 		      sizeof(struct hugepage_file), cmp_physaddr);
1337 
1338 		/* we have processed a num of hugepages of this size, so inc offset */
1339 		hp_offset += hpi->num_pages[0];
1340 	}
1341 
1342 	huge_recover_sigbus();
1343 
1344 	if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
1345 		internal_conf->memory = eal_get_hugepage_mem_size();
1346 
1347 	nr_hugefiles = nr_hugepages;
1348 
1349 
1350 	/* clean out the numbers of pages */
1351 	for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
1352 		for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1353 			internal_conf->hugepage_info[i].num_pages[j] = 0;
1354 
1355 	/* get hugepages for each socket */
1356 	for (i = 0; i < nr_hugefiles; i++) {
1357 		int socket = tmp_hp[i].socket_id;
1358 
1359 		/* find a hugepage info with right size and increment num_pages */
1360 		const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1361 				(int)internal_conf->num_hugepage_sizes);
1362 		for (j = 0; j < nb_hpsizes; j++) {
1363 			if (tmp_hp[i].size ==
1364 					internal_conf->hugepage_info[j].hugepage_sz) {
1365 				internal_conf->hugepage_info[j].num_pages[socket]++;
1366 			}
1367 		}
1368 	}
1369 
1370 	/* make a copy of socket_mem, needed for number of pages calculation */
1371 	for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1372 		memory[i] = internal_conf->socket_mem[i];
1373 
1374 	/* calculate final number of pages */
1375 	nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
1376 			internal_conf->hugepage_info, used_hp,
1377 			internal_conf->num_hugepage_sizes);
1378 
1379 	/* error if not enough memory available */
1380 	if (nr_hugepages < 0)
1381 		goto fail;
1382 
1383 	/* reporting in! */
1384 	for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1385 		for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1386 			if (used_hp[i].num_pages[j] > 0) {
1387 				RTE_LOG(DEBUG, EAL,
1388 					"Requesting %u pages of size %uMB"
1389 					" from socket %i\n",
1390 					used_hp[i].num_pages[j],
1391 					(unsigned)
1392 					(used_hp[i].hugepage_sz / 0x100000),
1393 					j);
1394 			}
1395 		}
1396 	}
1397 
1398 	/* create shared memory */
1399 	hugepage = create_shared_memory(eal_hugepage_data_path(),
1400 			nr_hugefiles * sizeof(struct hugepage_file));
1401 
1402 	if (hugepage == NULL) {
1403 		RTE_LOG(ERR, EAL, "Failed to create shared memory!\n");
1404 		goto fail;
1405 	}
1406 	memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1407 
1408 	/*
1409 	 * unmap pages that we won't need (looks at used_hp).
1410 	 * also, sets final_va to NULL on pages that were unmapped.
1411 	 */
1412 	if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1413 			internal_conf->num_hugepage_sizes) < 0) {
1414 		RTE_LOG(ERR, EAL, "Unmapping and locking hugepages failed!\n");
1415 		goto fail;
1416 	}
1417 
1418 	/*
1419 	 * copy stuff from malloc'd hugepage* to the actual shared memory.
1420 	 * this procedure only copies those hugepages that have orig_va
1421 	 * not NULL. has overflow protection.
1422 	 */
1423 	if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1424 			tmp_hp, nr_hugefiles) < 0) {
1425 		RTE_LOG(ERR, EAL, "Copying tables to shared memory failed!\n");
1426 		goto fail;
1427 	}
1428 
1429 #ifndef RTE_ARCH_64
1430 	/* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1431 	if (internal_conf->legacy_mem &&
1432 			prealloc_segments(hugepage, nr_hugefiles)) {
1433 		RTE_LOG(ERR, EAL, "Could not preallocate VA space for hugepages\n");
1434 		goto fail;
1435 	}
1436 #endif
1437 
1438 	/* remap all pages we do need into memseg list VA space, so that those
1439 	 * pages become first-class citizens in DPDK memory subsystem
1440 	 */
1441 	if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1442 		RTE_LOG(ERR, EAL, "Couldn't remap hugepage files into memseg lists\n");
1443 		goto fail;
1444 	}
1445 
1446 	/* free the hugepage backing files */
1447 	if (internal_conf->hugepage_file.unlink_before_mapping &&
1448 		unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
1449 		RTE_LOG(ERR, EAL, "Unlinking hugepage files failed!\n");
1450 		goto fail;
1451 	}
1452 
1453 	/* free the temporary hugepage table */
1454 	free(tmp_hp);
1455 	tmp_hp = NULL;
1456 
1457 	munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1458 	hugepage = NULL;
1459 
1460 	/* we're not going to allocate more pages, so release VA space for
1461 	 * unused memseg lists
1462 	 */
1463 	for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1464 		struct rte_memseg_list *msl = &mcfg->memsegs[i];
1465 		size_t mem_sz;
1466 
1467 		/* skip inactive lists */
1468 		if (msl->base_va == NULL)
1469 			continue;
1470 		/* skip lists where there is at least one page allocated */
1471 		if (msl->memseg_arr.count > 0)
1472 			continue;
1473 		/* this is an unused list, deallocate it */
1474 		mem_sz = msl->len;
1475 		munmap(msl->base_va, mem_sz);
1476 		msl->base_va = NULL;
1477 		msl->heap = 0;
1478 
1479 		/* destroy backing fbarray */
1480 		rte_fbarray_destroy(&msl->memseg_arr);
1481 	}
1482 
1483 	if (mcfg->dma_maskbits &&
1484 	    rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1485 		RTE_LOG(ERR, EAL,
1486 			"%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
1487 			__func__);
1488 		goto fail;
1489 	}
1490 
1491 	return 0;
1492 
1493 fail:
1494 	huge_recover_sigbus();
1495 	free(tmp_hp);
1496 	if (hugepage != NULL)
1497 		munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1498 
1499 	return -1;
1500 }
1501 
1502 /*
1503  * uses fstat to report the size of a file on disk
1504  */
1505 static off_t
1506 getFileSize(int fd)
1507 {
1508 	struct stat st;
1509 	if (fstat(fd, &st) < 0)
1510 		return 0;
1511 	return st.st_size;
1512 }
1513 
1514 /*
1515  * This creates the memory mappings in the secondary process to match that of
1516  * the server process. It goes through each memory segment in the DPDK runtime
1517  * configuration and finds the hugepages which form that segment, mapping them
1518  * in order to form a contiguous block in the virtual memory space
1519  */
1520 static int
1521 eal_legacy_hugepage_attach(void)
1522 {
1523 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1524 	struct hugepage_file *hp = NULL;
1525 	unsigned int num_hp = 0;
1526 	unsigned int i = 0;
1527 	unsigned int cur_seg;
1528 	off_t size = 0;
1529 	int fd, fd_hugepage = -1;
1530 
1531 	if (aslr_enabled() > 0) {
1532 		RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
1533 				"(ASLR) is enabled in the kernel.\n");
1534 		RTE_LOG(WARNING, EAL, "   This may cause issues with mapping memory "
1535 				"into secondary processes\n");
1536 	}
1537 
1538 	fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1539 	if (fd_hugepage < 0) {
1540 		RTE_LOG(ERR, EAL, "Could not open %s\n",
1541 				eal_hugepage_data_path());
1542 		goto error;
1543 	}
1544 
1545 	size = getFileSize(fd_hugepage);
1546 	hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1547 	if (hp == MAP_FAILED) {
1548 		RTE_LOG(ERR, EAL, "Could not mmap %s\n",
1549 				eal_hugepage_data_path());
1550 		goto error;
1551 	}
1552 
1553 	num_hp = size / sizeof(struct hugepage_file);
1554 	RTE_LOG(DEBUG, EAL, "Analysing %u files\n", num_hp);
1555 
1556 	/* map all segments into memory to make sure we get the addrs. the
1557 	 * segments themselves are already in memseg list (which is shared and
1558 	 * has its VA space already preallocated), so we just need to map
1559 	 * everything into correct addresses.
1560 	 */
1561 	for (i = 0; i < num_hp; i++) {
1562 		struct hugepage_file *hf = &hp[i];
1563 		size_t map_sz = hf->size;
1564 		void *map_addr = hf->final_va;
1565 		int msl_idx, ms_idx;
1566 		struct rte_memseg_list *msl;
1567 		struct rte_memseg *ms;
1568 
1569 		/* if size is zero, no more pages left */
1570 		if (map_sz == 0)
1571 			break;
1572 
1573 		fd = open(hf->filepath, O_RDWR);
1574 		if (fd < 0) {
1575 			RTE_LOG(ERR, EAL, "Could not open %s: %s\n",
1576 				hf->filepath, strerror(errno));
1577 			goto error;
1578 		}
1579 
1580 		map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1581 				MAP_SHARED | MAP_FIXED, fd, 0);
1582 		if (map_addr == MAP_FAILED) {
1583 			RTE_LOG(ERR, EAL, "Could not map %s: %s\n",
1584 				hf->filepath, strerror(errno));
1585 			goto fd_error;
1586 		}
1587 
1588 		/* set shared lock on the file. */
1589 		if (flock(fd, LOCK_SH) < 0) {
1590 			RTE_LOG(DEBUG, EAL, "%s(): Locking file failed: %s\n",
1591 				__func__, strerror(errno));
1592 			goto mmap_error;
1593 		}
1594 
1595 		/* find segment data */
1596 		msl = rte_mem_virt2memseg_list(map_addr);
1597 		if (msl == NULL) {
1598 			RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg list\n",
1599 				__func__);
1600 			goto mmap_error;
1601 		}
1602 		ms = rte_mem_virt2memseg(map_addr, msl);
1603 		if (ms == NULL) {
1604 			RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg\n",
1605 				__func__);
1606 			goto mmap_error;
1607 		}
1608 
1609 		msl_idx = msl - mcfg->memsegs;
1610 		ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1611 		if (ms_idx < 0) {
1612 			RTE_LOG(DEBUG, EAL, "%s(): Cannot find memseg idx\n",
1613 				__func__);
1614 			goto mmap_error;
1615 		}
1616 
1617 		/* store segment fd internally */
1618 		if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
1619 			RTE_LOG(ERR, EAL, "Could not store segment fd: %s\n",
1620 				rte_strerror(rte_errno));
1621 	}
1622 	/* unmap the hugepage config file, since we are done using it */
1623 	munmap(hp, size);
1624 	close(fd_hugepage);
1625 	return 0;
1626 
1627 mmap_error:
1628 	munmap(hp[i].final_va, hp[i].size);
1629 fd_error:
1630 	close(fd);
1631 error:
1632 	/* unwind mmap's done so far */
1633 	for (cur_seg = 0; cur_seg < i; cur_seg++)
1634 		munmap(hp[cur_seg].final_va, hp[cur_seg].size);
1635 
1636 	if (hp != NULL && hp != MAP_FAILED)
1637 		munmap(hp, size);
1638 	if (fd_hugepage >= 0)
1639 		close(fd_hugepage);
1640 	return -1;
1641 }
1642 
1643 static int
1644 eal_hugepage_attach(void)
1645 {
1646 	if (eal_memalloc_sync_with_primary()) {
1647 		RTE_LOG(ERR, EAL, "Could not map memory from primary process\n");
1648 		if (aslr_enabled() > 0)
1649 			RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes\n");
1650 		return -1;
1651 	}
1652 	return 0;
1653 }
1654 
1655 int
1656 rte_eal_hugepage_init(void)
1657 {
1658 	const struct internal_config *internal_conf =
1659 		eal_get_internal_configuration();
1660 
1661 	return internal_conf->legacy_mem ?
1662 			eal_legacy_hugepage_init() :
1663 			eal_dynmem_hugepage_init();
1664 }
1665 
1666 int
1667 rte_eal_hugepage_attach(void)
1668 {
1669 	const struct internal_config *internal_conf =
1670 		eal_get_internal_configuration();
1671 
1672 	return internal_conf->legacy_mem ?
1673 			eal_legacy_hugepage_attach() :
1674 			eal_hugepage_attach();
1675 }
1676 
1677 int
1678 rte_eal_using_phys_addrs(void)
1679 {
1680 	if (phys_addrs_available == -1) {
1681 		uint64_t tmp = 0;
1682 
1683 		if (rte_eal_has_hugepages() != 0 &&
1684 		    rte_mem_virt2phy(&tmp) != RTE_BAD_PHYS_ADDR)
1685 			phys_addrs_available = 1;
1686 		else
1687 			phys_addrs_available = 0;
1688 	}
1689 	return phys_addrs_available;
1690 }
1691 
1692 static int __rte_unused
1693 memseg_primary_init_32(void)
1694 {
1695 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1696 	int active_sockets, hpi_idx, msl_idx = 0;
1697 	unsigned int socket_id, i;
1698 	struct rte_memseg_list *msl;
1699 	uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
1700 	uint64_t max_mem;
1701 	struct internal_config *internal_conf =
1702 		eal_get_internal_configuration();
1703 
1704 	/* no-huge does not need this at all */
1705 	if (internal_conf->no_hugetlbfs)
1706 		return 0;
1707 
1708 	/* this is a giant hack, but desperate times call for desperate
1709 	 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
1710 	 * because having upwards of 2 gigabytes of VA space already mapped will
1711 	 * interfere with our ability to map and sort hugepages.
1712 	 *
1713 	 * therefore, in legacy 32-bit mode, we will be initializing memseg
1714 	 * lists much later - in eal_memory.c, right after we unmap all the
1715 	 * unneeded pages. this will not affect secondary processes, as those
1716 	 * should be able to mmap the space without (too many) problems.
1717 	 */
1718 	if (internal_conf->legacy_mem)
1719 		return 0;
1720 
1721 	/* 32-bit mode is a very special case. we cannot know in advance where
1722 	 * the user will want to allocate their memory, so we have to do some
1723 	 * heuristics.
1724 	 */
1725 	active_sockets = 0;
1726 	total_requested_mem = 0;
1727 	if (internal_conf->force_sockets)
1728 		for (i = 0; i < rte_socket_count(); i++) {
1729 			uint64_t mem;
1730 
1731 			socket_id = rte_socket_id_by_idx(i);
1732 			mem = internal_conf->socket_mem[socket_id];
1733 
1734 			if (mem == 0)
1735 				continue;
1736 
1737 			active_sockets++;
1738 			total_requested_mem += mem;
1739 		}
1740 	else
1741 		total_requested_mem = internal_conf->memory;
1742 
1743 	max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
1744 	if (total_requested_mem > max_mem) {
1745 		RTE_LOG(ERR, EAL, "Invalid parameters: 32-bit process can at most use %uM of memory\n",
1746 				(unsigned int)(max_mem >> 20));
1747 		return -1;
1748 	}
1749 	total_extra_mem = max_mem - total_requested_mem;
1750 	extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
1751 			total_extra_mem / active_sockets;
1752 
1753 	/* the allocation logic is a little bit convoluted, but here's how it
1754 	 * works, in a nutshell:
1755 	 *  - if user hasn't specified on which sockets to allocate memory via
1756 	 *    --socket-mem, we allocate all of our memory on main core socket.
1757 	 *  - if user has specified sockets to allocate memory on, there may be
1758 	 *    some "unused" memory left (e.g. if user has specified --socket-mem
1759 	 *    such that not all memory adds up to 2 gigabytes), so add it to all
1760 	 *    sockets that are in use equally.
1761 	 *
1762 	 * page sizes are sorted by size in descending order, so we can safely
1763 	 * assume that we dispense with bigger page sizes first.
1764 	 */
1765 
1766 	/* create memseg lists */
1767 	for (i = 0; i < rte_socket_count(); i++) {
1768 		int hp_sizes = (int) internal_conf->num_hugepage_sizes;
1769 		uint64_t max_socket_mem, cur_socket_mem;
1770 		unsigned int main_lcore_socket;
1771 		struct rte_config *cfg = rte_eal_get_configuration();
1772 		bool skip;
1773 
1774 		socket_id = rte_socket_id_by_idx(i);
1775 
1776 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1777 		/* we can still sort pages by socket in legacy mode */
1778 		if (!internal_conf->legacy_mem && socket_id > 0)
1779 			break;
1780 #endif
1781 
1782 		/* if we didn't specifically request memory on this socket */
1783 		skip = active_sockets != 0 &&
1784 				internal_conf->socket_mem[socket_id] == 0;
1785 		/* ...or if we didn't specifically request memory on *any*
1786 		 * socket, and this is not main lcore
1787 		 */
1788 		main_lcore_socket = rte_lcore_to_socket_id(cfg->main_lcore);
1789 		skip |= active_sockets == 0 && socket_id != main_lcore_socket;
1790 
1791 		if (skip) {
1792 			RTE_LOG(DEBUG, EAL, "Will not preallocate memory on socket %u\n",
1793 					socket_id);
1794 			continue;
1795 		}
1796 
1797 		/* max amount of memory on this socket */
1798 		max_socket_mem = (active_sockets != 0 ?
1799 					internal_conf->socket_mem[socket_id] :
1800 					internal_conf->memory) +
1801 					extra_mem_per_socket;
1802 		cur_socket_mem = 0;
1803 
1804 		for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
1805 			uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
1806 			uint64_t hugepage_sz;
1807 			struct hugepage_info *hpi;
1808 			int type_msl_idx, max_segs, total_segs = 0;
1809 
1810 			hpi = &internal_conf->hugepage_info[hpi_idx];
1811 			hugepage_sz = hpi->hugepage_sz;
1812 
1813 			/* check if pages are actually available */
1814 			if (hpi->num_pages[socket_id] == 0)
1815 				continue;
1816 
1817 			max_segs = RTE_MAX_MEMSEG_PER_TYPE;
1818 			max_pagesz_mem = max_socket_mem - cur_socket_mem;
1819 
1820 			/* make it multiple of page size */
1821 			max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
1822 					hugepage_sz);
1823 
1824 			RTE_LOG(DEBUG, EAL, "Attempting to preallocate "
1825 					"%" PRIu64 "M on socket %i\n",
1826 					max_pagesz_mem >> 20, socket_id);
1827 
1828 			type_msl_idx = 0;
1829 			while (cur_pagesz_mem < max_pagesz_mem &&
1830 					total_segs < max_segs) {
1831 				uint64_t cur_mem;
1832 				unsigned int n_segs;
1833 
1834 				if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
1835 					RTE_LOG(ERR, EAL,
1836 						"No more space in memseg lists, please increase %s\n",
1837 						RTE_STR(RTE_MAX_MEMSEG_LISTS));
1838 					return -1;
1839 				}
1840 
1841 				msl = &mcfg->memsegs[msl_idx];
1842 
1843 				cur_mem = get_mem_amount(hugepage_sz,
1844 						max_pagesz_mem);
1845 				n_segs = cur_mem / hugepage_sz;
1846 
1847 				if (eal_memseg_list_init(msl, hugepage_sz,
1848 						n_segs, socket_id, type_msl_idx,
1849 						true)) {
1850 					/* failing to allocate a memseg list is
1851 					 * a serious error.
1852 					 */
1853 					RTE_LOG(ERR, EAL, "Cannot allocate memseg list\n");
1854 					return -1;
1855 				}
1856 
1857 				if (eal_memseg_list_alloc(msl, 0)) {
1858 					/* if we couldn't allocate VA space, we
1859 					 * can try with smaller page sizes.
1860 					 */
1861 					RTE_LOG(ERR, EAL, "Cannot allocate VA space for memseg list, retrying with different page size\n");
1862 					/* deallocate memseg list */
1863 					if (memseg_list_free(msl))
1864 						return -1;
1865 					break;
1866 				}
1867 
1868 				total_segs += msl->memseg_arr.len;
1869 				cur_pagesz_mem = total_segs * hugepage_sz;
1870 				type_msl_idx++;
1871 				msl_idx++;
1872 			}
1873 			cur_socket_mem += cur_pagesz_mem;
1874 		}
1875 		if (cur_socket_mem == 0) {
1876 			RTE_LOG(ERR, EAL, "Cannot allocate VA space on socket %u\n",
1877 				socket_id);
1878 			return -1;
1879 		}
1880 	}
1881 
1882 	return 0;
1883 }
1884 
1885 static int __rte_unused
1886 memseg_primary_init(void)
1887 {
1888 	return eal_dynmem_memseg_lists_init();
1889 }
1890 
1891 static int
1892 memseg_secondary_init(void)
1893 {
1894 	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1895 	int msl_idx = 0;
1896 	struct rte_memseg_list *msl;
1897 
1898 	for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
1899 
1900 		msl = &mcfg->memsegs[msl_idx];
1901 
1902 		/* skip empty and external memseg lists */
1903 		if (msl->memseg_arr.len == 0 || msl->external)
1904 			continue;
1905 
1906 		if (rte_fbarray_attach(&msl->memseg_arr)) {
1907 			RTE_LOG(ERR, EAL, "Cannot attach to primary process memseg lists\n");
1908 			return -1;
1909 		}
1910 
1911 		/* preallocate VA space */
1912 		if (eal_memseg_list_alloc(msl, 0)) {
1913 			RTE_LOG(ERR, EAL, "Cannot preallocate VA space for hugepage memory\n");
1914 			return -1;
1915 		}
1916 	}
1917 
1918 	return 0;
1919 }
1920 
1921 int
1922 rte_eal_memseg_init(void)
1923 {
1924 	/* increase rlimit to maximum */
1925 	struct rlimit lim;
1926 
1927 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1928 	const struct internal_config *internal_conf =
1929 		eal_get_internal_configuration();
1930 #endif
1931 	if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
1932 		/* set limit to maximum */
1933 		lim.rlim_cur = lim.rlim_max;
1934 
1935 		if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
1936 			RTE_LOG(DEBUG, EAL, "Setting maximum number of open files failed: %s\n",
1937 					strerror(errno));
1938 		} else {
1939 			RTE_LOG(DEBUG, EAL, "Setting maximum number of open files to %"
1940 					PRIu64 "\n",
1941 					(uint64_t)lim.rlim_cur);
1942 		}
1943 	} else {
1944 		RTE_LOG(ERR, EAL, "Cannot get current resource limits\n");
1945 	}
1946 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1947 	if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
1948 		RTE_LOG(WARNING, EAL, "DPDK is running on a NUMA system, but is compiled without NUMA support.\n");
1949 		RTE_LOG(WARNING, EAL, "This will have adverse consequences for performance and usability.\n");
1950 		RTE_LOG(WARNING, EAL, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.\n");
1951 	}
1952 #endif
1953 
1954 	return rte_eal_process_type() == RTE_PROC_PRIMARY ?
1955 #ifndef RTE_ARCH_64
1956 			memseg_primary_init_32() :
1957 #else
1958 			memseg_primary_init() :
1959 #endif
1960 			memseg_secondary_init();
1961 }
1962