1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright(c) 2013 6WIND S.A.
4 */
5
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdlib.h>
10 #include <stdio.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <string.h>
14 #include <sys/mman.h>
15 #include <sys/stat.h>
16 #include <sys/file.h>
17 #include <sys/resource.h>
18 #include <unistd.h>
19 #include <limits.h>
20 #include <signal.h>
21 #include <setjmp.h>
22 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
23 #define MEMFD_SUPPORTED
24 #endif
25 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
26 #include <numa.h>
27 #include <numaif.h>
28 #endif
29
30 #include <rte_errno.h>
31 #include <rte_log.h>
32 #include <rte_memory.h>
33 #include <rte_eal.h>
34 #include <rte_lcore.h>
35 #include <rte_common.h>
36
37 #include "eal_private.h"
38 #include "eal_memalloc.h"
39 #include "eal_memcfg.h"
40 #include "eal_internal_cfg.h"
41 #include "eal_filesystem.h"
42 #include "eal_hugepages.h"
43 #include "eal_options.h"
44
45 #define PFN_MASK_SIZE 8
46
47 /**
48 * @file
49 * Huge page mapping under linux
50 *
51 * To reserve a big contiguous amount of memory, we use the hugepage
52 * feature of linux. For that, we need to have hugetlbfs mounted. This
53 * code will create many files in this directory (one per page) and
54 * map them in virtual memory. For each page, we will retrieve its
55 * physical address and remap it in order to have a virtual contiguous
56 * zone as well as a physical contiguous zone.
57 */
58
59 static int phys_addrs_available = -1;
60
61 #define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
62
eal_get_baseaddr(void)63 uint64_t eal_get_baseaddr(void)
64 {
65 /*
66 * Linux kernel uses a really high address as starting address for
67 * serving mmaps calls. If there exists addressing limitations and IOVA
68 * mode is VA, this starting address is likely too high for those
69 * devices. However, it is possible to use a lower address in the
70 * process virtual address space as with 64 bits there is a lot of
71 * available space.
72 *
73 * Current known limitations are 39 or 40 bits. Setting the starting
74 * address at 4GB implies there are 508GB or 1020GB for mapping the
75 * available hugepages. This is likely enough for most systems, although
76 * a device with addressing limitations should call
77 * rte_mem_check_dma_mask for ensuring all memory is within supported
78 * range.
79 */
80 #if defined(RTE_ARCH_LOONGARCH)
81 return 0x7000000000ULL;
82 #else
83 return 0x100000000ULL;
84 #endif
85 }
86
87 /*
88 * Get physical address of any mapped virtual address in the current process.
89 */
90 phys_addr_t
rte_mem_virt2phy(const void * virtaddr)91 rte_mem_virt2phy(const void *virtaddr)
92 {
93 int fd, retval;
94 uint64_t page, physaddr;
95 unsigned long virt_pfn;
96 int page_size;
97 off_t offset;
98
99 if (phys_addrs_available == 0)
100 return RTE_BAD_IOVA;
101
102 /* standard page size */
103 page_size = getpagesize();
104
105 fd = open("/proc/self/pagemap", O_RDONLY);
106 if (fd < 0) {
107 EAL_LOG(INFO, "%s(): cannot open /proc/self/pagemap: %s",
108 __func__, strerror(errno));
109 return RTE_BAD_IOVA;
110 }
111
112 virt_pfn = (unsigned long)virtaddr / page_size;
113 offset = sizeof(uint64_t) * virt_pfn;
114 if (lseek(fd, offset, SEEK_SET) == (off_t) -1) {
115 EAL_LOG(INFO, "%s(): seek error in /proc/self/pagemap: %s",
116 __func__, strerror(errno));
117 close(fd);
118 return RTE_BAD_IOVA;
119 }
120
121 retval = read(fd, &page, PFN_MASK_SIZE);
122 close(fd);
123 if (retval < 0) {
124 EAL_LOG(INFO, "%s(): cannot read /proc/self/pagemap: %s",
125 __func__, strerror(errno));
126 return RTE_BAD_IOVA;
127 } else if (retval != PFN_MASK_SIZE) {
128 EAL_LOG(INFO, "%s(): read %d bytes from /proc/self/pagemap "
129 "but expected %d:",
130 __func__, retval, PFN_MASK_SIZE);
131 return RTE_BAD_IOVA;
132 }
133
134 /*
135 * the pfn (page frame number) are bits 0-54 (see
136 * pagemap.txt in linux Documentation)
137 */
138 if ((page & 0x7fffffffffffffULL) == 0)
139 return RTE_BAD_IOVA;
140
141 physaddr = ((page & 0x7fffffffffffffULL) * page_size)
142 + ((unsigned long)virtaddr % page_size);
143
144 return physaddr;
145 }
146
147 rte_iova_t
rte_mem_virt2iova(const void * virtaddr)148 rte_mem_virt2iova(const void *virtaddr)
149 {
150 if (rte_eal_iova_mode() == RTE_IOVA_VA)
151 return (uintptr_t)virtaddr;
152 return rte_mem_virt2phy(virtaddr);
153 }
154
155 /*
156 * For each hugepage in hugepg_tbl, fill the physaddr value. We find
157 * it by browsing the /proc/self/pagemap special file.
158 */
159 static int
find_physaddrs(struct hugepage_file * hugepg_tbl,struct hugepage_info * hpi)160 find_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
161 {
162 unsigned int i;
163 phys_addr_t addr;
164
165 for (i = 0; i < hpi->num_pages[0]; i++) {
166 addr = rte_mem_virt2phy(hugepg_tbl[i].orig_va);
167 if (addr == RTE_BAD_PHYS_ADDR)
168 return -1;
169 hugepg_tbl[i].physaddr = addr;
170 }
171 return 0;
172 }
173
174 /*
175 * For each hugepage in hugepg_tbl, fill the physaddr value sequentially.
176 */
177 static int
set_physaddrs(struct hugepage_file * hugepg_tbl,struct hugepage_info * hpi)178 set_physaddrs(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
179 {
180 unsigned int i;
181 static phys_addr_t addr;
182
183 for (i = 0; i < hpi->num_pages[0]; i++) {
184 hugepg_tbl[i].physaddr = addr;
185 addr += hugepg_tbl[i].size;
186 }
187 return 0;
188 }
189
190 /*
191 * Check whether address-space layout randomization is enabled in
192 * the kernel. This is important for multi-process as it can prevent
193 * two processes mapping data to the same virtual address
194 * Returns:
195 * 0 - address space randomization disabled
196 * 1/2 - address space randomization enabled
197 * negative error code on error
198 */
199 static int
aslr_enabled(void)200 aslr_enabled(void)
201 {
202 char c;
203 int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
204 if (fd < 0)
205 return -errno;
206 retval = read(fd, &c, 1);
207 close(fd);
208 if (retval < 0)
209 return -errno;
210 if (retval == 0)
211 return -EIO;
212 switch (c) {
213 case '0' : return 0;
214 case '1' : return 1;
215 case '2' : return 2;
216 default: return -EINVAL;
217 }
218 }
219
220 static sigjmp_buf huge_jmpenv;
221
huge_sigbus_handler(int signo __rte_unused)222 static void huge_sigbus_handler(int signo __rte_unused)
223 {
224 siglongjmp(huge_jmpenv, 1);
225 }
226
227 /* Put setjmp into a wrap method to avoid compiling error. Any non-volatile,
228 * non-static local variable in the stack frame calling sigsetjmp might be
229 * clobbered by a call to longjmp.
230 */
huge_wrap_sigsetjmp(void)231 static int huge_wrap_sigsetjmp(void)
232 {
233 return sigsetjmp(huge_jmpenv, 1);
234 }
235
236 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
237 /* Callback for numa library. */
numa_error(char * where)238 void numa_error(char *where)
239 {
240 EAL_LOG(ERR, "%s failed: %s", where, strerror(errno));
241 }
242 #endif
243
244 /*
245 * Mmap all hugepages of hugepage table: it first open a file in
246 * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
247 * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
248 * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
249 * map contiguous physical blocks in contiguous virtual blocks.
250 */
251 static unsigned
map_all_hugepages(struct hugepage_file * hugepg_tbl,struct hugepage_info * hpi,uint64_t * essential_memory __rte_unused)252 map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
253 uint64_t *essential_memory __rte_unused)
254 {
255 int fd;
256 unsigned i;
257 void *virtaddr;
258 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
259 int node_id = -1;
260 int essential_prev = 0;
261 int oldpolicy;
262 struct bitmask *oldmask = NULL;
263 bool have_numa = true;
264 unsigned long maxnode = 0;
265 const struct internal_config *internal_conf =
266 eal_get_internal_configuration();
267
268 /* Check if kernel supports NUMA. */
269 if (numa_available() != 0) {
270 EAL_LOG(DEBUG, "NUMA is not supported.");
271 have_numa = false;
272 }
273
274 if (have_numa) {
275 EAL_LOG(DEBUG, "Trying to obtain current memory policy.");
276 oldmask = numa_allocate_nodemask();
277 if (get_mempolicy(&oldpolicy, oldmask->maskp,
278 oldmask->size + 1, 0, 0) < 0) {
279 EAL_LOG(ERR,
280 "Failed to get current mempolicy: %s. "
281 "Assuming MPOL_DEFAULT.", strerror(errno));
282 oldpolicy = MPOL_DEFAULT;
283 }
284 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
285 if (internal_conf->socket_mem[i])
286 maxnode = i + 1;
287 }
288 #endif
289
290 for (i = 0; i < hpi->num_pages[0]; i++) {
291 struct hugepage_file *hf = &hugepg_tbl[i];
292 uint64_t hugepage_sz = hpi->hugepage_sz;
293
294 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
295 if (maxnode) {
296 unsigned int j;
297
298 for (j = 0; j < maxnode; j++)
299 if (essential_memory[j])
300 break;
301
302 if (j == maxnode) {
303 node_id = (node_id + 1) % maxnode;
304 while (!internal_conf->socket_mem[node_id]) {
305 node_id++;
306 node_id %= maxnode;
307 }
308 essential_prev = 0;
309 } else {
310 node_id = j;
311 essential_prev = essential_memory[j];
312
313 if (essential_memory[j] < hugepage_sz)
314 essential_memory[j] = 0;
315 else
316 essential_memory[j] -= hugepage_sz;
317 }
318
319 EAL_LOG(DEBUG,
320 "Setting policy MPOL_PREFERRED for socket %d",
321 node_id);
322 numa_set_preferred(node_id);
323 }
324 #endif
325
326 hf->file_id = i;
327 hf->size = hugepage_sz;
328 eal_get_hugefile_path(hf->filepath, sizeof(hf->filepath),
329 hpi->hugedir, hf->file_id);
330 hf->filepath[sizeof(hf->filepath) - 1] = '\0';
331
332 /* try to create hugepage file */
333 fd = open(hf->filepath, O_CREAT | O_RDWR, 0600);
334 if (fd < 0) {
335 EAL_LOG(DEBUG, "%s(): open failed: %s", __func__,
336 strerror(errno));
337 goto out;
338 }
339
340 /* map the segment, and populate page tables,
341 * the kernel fills this segment with zeros. we don't care where
342 * this gets mapped - we already have contiguous memory areas
343 * ready for us to map into.
344 */
345 virtaddr = mmap(NULL, hugepage_sz, PROT_READ | PROT_WRITE,
346 MAP_SHARED | MAP_POPULATE, fd, 0);
347 if (virtaddr == MAP_FAILED) {
348 EAL_LOG(DEBUG, "%s(): mmap failed: %s", __func__,
349 strerror(errno));
350 close(fd);
351 goto out;
352 }
353
354 hf->orig_va = virtaddr;
355
356 /* In linux, hugetlb limitations, like cgroup, are
357 * enforced at fault time instead of mmap(), even
358 * with the option of MAP_POPULATE. Kernel will send
359 * a SIGBUS signal. To avoid to be killed, save stack
360 * environment here, if SIGBUS happens, we can jump
361 * back here.
362 */
363 if (huge_wrap_sigsetjmp()) {
364 EAL_LOG(DEBUG, "SIGBUS: Cannot mmap more "
365 "hugepages of size %u MB",
366 (unsigned int)(hugepage_sz / 0x100000));
367 munmap(virtaddr, hugepage_sz);
368 close(fd);
369 unlink(hugepg_tbl[i].filepath);
370 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
371 if (maxnode)
372 essential_memory[node_id] =
373 essential_prev;
374 #endif
375 goto out;
376 }
377 *(int *)virtaddr = 0;
378
379 /* set shared lock on the file. */
380 if (flock(fd, LOCK_SH) < 0) {
381 EAL_LOG(DEBUG, "%s(): Locking file failed:%s ",
382 __func__, strerror(errno));
383 close(fd);
384 goto out;
385 }
386
387 close(fd);
388 }
389
390 out:
391 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
392 if (maxnode) {
393 EAL_LOG(DEBUG,
394 "Restoring previous memory policy: %d", oldpolicy);
395 if (oldpolicy == MPOL_DEFAULT) {
396 numa_set_localalloc();
397 } else if (set_mempolicy(oldpolicy, oldmask->maskp,
398 oldmask->size + 1) < 0) {
399 EAL_LOG(ERR, "Failed to restore mempolicy: %s",
400 strerror(errno));
401 numa_set_localalloc();
402 }
403 }
404 if (oldmask != NULL)
405 numa_free_cpumask(oldmask);
406 #endif
407 return i;
408 }
409
410 /*
411 * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
412 * page.
413 */
414 static int
find_numasocket(struct hugepage_file * hugepg_tbl,struct hugepage_info * hpi)415 find_numasocket(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi)
416 {
417 int socket_id;
418 char *end, *nodestr;
419 unsigned i, hp_count = 0;
420 uint64_t virt_addr;
421 char buf[BUFSIZ];
422 char hugedir_str[PATH_MAX];
423 FILE *f;
424
425 f = fopen("/proc/self/numa_maps", "r");
426 if (f == NULL) {
427 EAL_LOG(NOTICE, "NUMA support not available"
428 " consider that all memory is in socket_id 0");
429 return 0;
430 }
431
432 snprintf(hugedir_str, sizeof(hugedir_str),
433 "%s/%s", hpi->hugedir, eal_get_hugefile_prefix());
434
435 /* parse numa map */
436 while (fgets(buf, sizeof(buf), f) != NULL) {
437
438 /* ignore non huge page */
439 if (strstr(buf, " huge ") == NULL &&
440 strstr(buf, hugedir_str) == NULL)
441 continue;
442
443 /* get zone addr */
444 virt_addr = strtoull(buf, &end, 16);
445 if (virt_addr == 0 || end == buf) {
446 EAL_LOG(ERR, "%s(): error in numa_maps parsing", __func__);
447 goto error;
448 }
449
450 /* get node id (socket id) */
451 nodestr = strstr(buf, " N");
452 if (nodestr == NULL) {
453 EAL_LOG(ERR, "%s(): error in numa_maps parsing", __func__);
454 goto error;
455 }
456 nodestr += 2;
457 end = strstr(nodestr, "=");
458 if (end == NULL) {
459 EAL_LOG(ERR, "%s(): error in numa_maps parsing", __func__);
460 goto error;
461 }
462 end[0] = '\0';
463 end = NULL;
464
465 socket_id = strtoul(nodestr, &end, 0);
466 if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
467 EAL_LOG(ERR, "%s(): error in numa_maps parsing", __func__);
468 goto error;
469 }
470
471 /* if we find this page in our mappings, set socket_id */
472 for (i = 0; i < hpi->num_pages[0]; i++) {
473 void *va = (void *)(unsigned long)virt_addr;
474 if (hugepg_tbl[i].orig_va == va) {
475 hugepg_tbl[i].socket_id = socket_id;
476 hp_count++;
477 #ifdef RTE_EAL_NUMA_AWARE_HUGEPAGES
478 EAL_LOG(DEBUG,
479 "Hugepage %s is on socket %d",
480 hugepg_tbl[i].filepath, socket_id);
481 #endif
482 }
483 }
484 }
485
486 if (hp_count < hpi->num_pages[0])
487 goto error;
488
489 fclose(f);
490 return 0;
491
492 error:
493 fclose(f);
494 return -1;
495 }
496
497 static int
cmp_physaddr(const void * a,const void * b)498 cmp_physaddr(const void *a, const void *b)
499 {
500 #ifndef RTE_ARCH_PPC_64
501 const struct hugepage_file *p1 = a;
502 const struct hugepage_file *p2 = b;
503 #else
504 /* PowerPC needs memory sorted in reverse order from x86 */
505 const struct hugepage_file *p1 = b;
506 const struct hugepage_file *p2 = a;
507 #endif
508 if (p1->physaddr < p2->physaddr)
509 return -1;
510 else if (p1->physaddr > p2->physaddr)
511 return 1;
512 else
513 return 0;
514 }
515
516 /*
517 * Uses mmap to create a shared memory area for storage of data
518 * Used in this file to store the hugepage file map on disk
519 */
520 static void *
create_shared_memory(const char * filename,const size_t mem_size)521 create_shared_memory(const char *filename, const size_t mem_size)
522 {
523 void *retval;
524 int fd;
525 const struct internal_config *internal_conf =
526 eal_get_internal_configuration();
527
528 /* if no shared files mode is used, create anonymous memory instead */
529 if (internal_conf->no_shconf) {
530 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE,
531 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
532 if (retval == MAP_FAILED)
533 return NULL;
534 return retval;
535 }
536
537 fd = open(filename, O_CREAT | O_RDWR, 0600);
538 if (fd < 0)
539 return NULL;
540 if (ftruncate(fd, mem_size) < 0) {
541 close(fd);
542 return NULL;
543 }
544 retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
545 close(fd);
546 if (retval == MAP_FAILED)
547 return NULL;
548 return retval;
549 }
550
551 /*
552 * this copies *active* hugepages from one hugepage table to another.
553 * destination is typically the shared memory.
554 */
555 static int
copy_hugepages_to_shared_mem(struct hugepage_file * dst,int dest_size,const struct hugepage_file * src,int src_size)556 copy_hugepages_to_shared_mem(struct hugepage_file * dst, int dest_size,
557 const struct hugepage_file * src, int src_size)
558 {
559 int src_pos, dst_pos = 0;
560
561 for (src_pos = 0; src_pos < src_size; src_pos++) {
562 if (src[src_pos].orig_va != NULL) {
563 /* error on overflow attempt */
564 if (dst_pos == dest_size)
565 return -1;
566 memcpy(&dst[dst_pos], &src[src_pos], sizeof(struct hugepage_file));
567 dst_pos++;
568 }
569 }
570 return 0;
571 }
572
573 static int
unlink_hugepage_files(struct hugepage_file * hugepg_tbl,unsigned num_hp_info)574 unlink_hugepage_files(struct hugepage_file *hugepg_tbl,
575 unsigned num_hp_info)
576 {
577 unsigned socket, size;
578 int page, nrpages = 0;
579 const struct internal_config *internal_conf =
580 eal_get_internal_configuration();
581
582 /* get total number of hugepages */
583 for (size = 0; size < num_hp_info; size++)
584 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
585 nrpages +=
586 internal_conf->hugepage_info[size].num_pages[socket];
587
588 for (page = 0; page < nrpages; page++) {
589 struct hugepage_file *hp = &hugepg_tbl[page];
590
591 if (hp->orig_va != NULL && unlink(hp->filepath)) {
592 EAL_LOG(WARNING, "%s(): Removing %s failed: %s",
593 __func__, hp->filepath, strerror(errno));
594 }
595 }
596 return 0;
597 }
598
599 /*
600 * unmaps hugepages that are not going to be used. since we originally allocate
601 * ALL hugepages (not just those we need), additional unmapping needs to be done.
602 */
603 static int
unmap_unneeded_hugepages(struct hugepage_file * hugepg_tbl,struct hugepage_info * hpi,unsigned num_hp_info)604 unmap_unneeded_hugepages(struct hugepage_file *hugepg_tbl,
605 struct hugepage_info *hpi,
606 unsigned num_hp_info)
607 {
608 unsigned socket, size;
609 int page, nrpages = 0;
610 const struct internal_config *internal_conf =
611 eal_get_internal_configuration();
612
613 /* get total number of hugepages */
614 for (size = 0; size < num_hp_info; size++)
615 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++)
616 nrpages += internal_conf->hugepage_info[size].num_pages[socket];
617
618 for (size = 0; size < num_hp_info; size++) {
619 for (socket = 0; socket < RTE_MAX_NUMA_NODES; socket++) {
620 unsigned pages_found = 0;
621
622 /* traverse until we have unmapped all the unused pages */
623 for (page = 0; page < nrpages; page++) {
624 struct hugepage_file *hp = &hugepg_tbl[page];
625
626 /* find a page that matches the criteria */
627 if ((hp->size == hpi[size].hugepage_sz) &&
628 (hp->socket_id == (int) socket)) {
629
630 /* if we skipped enough pages, unmap the rest */
631 if (pages_found == hpi[size].num_pages[socket]) {
632 uint64_t unmap_len;
633
634 unmap_len = hp->size;
635
636 /* get start addr and len of the remaining segment */
637 munmap(hp->orig_va,
638 (size_t)unmap_len);
639
640 hp->orig_va = NULL;
641 if (unlink(hp->filepath) == -1) {
642 EAL_LOG(ERR, "%s(): Removing %s failed: %s",
643 __func__, hp->filepath, strerror(errno));
644 return -1;
645 }
646 } else {
647 /* lock the page and skip */
648 pages_found++;
649 }
650
651 } /* match page */
652 } /* foreach page */
653 } /* foreach socket */
654 } /* foreach pagesize */
655
656 return 0;
657 }
658
659 static int
remap_segment(struct hugepage_file * hugepages,int seg_start,int seg_end)660 remap_segment(struct hugepage_file *hugepages, int seg_start, int seg_end)
661 {
662 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
663 struct rte_memseg_list *msl;
664 struct rte_fbarray *arr;
665 int cur_page, seg_len;
666 unsigned int msl_idx;
667 int ms_idx;
668 uint64_t page_sz;
669 size_t memseg_len;
670 int socket_id;
671 #ifndef RTE_ARCH_64
672 const struct internal_config *internal_conf =
673 eal_get_internal_configuration();
674 #endif
675 page_sz = hugepages[seg_start].size;
676 socket_id = hugepages[seg_start].socket_id;
677 seg_len = seg_end - seg_start;
678
679 EAL_LOG(DEBUG, "Attempting to map %" PRIu64 "M on socket %i",
680 (seg_len * page_sz) >> 20ULL, socket_id);
681
682 /* find free space in memseg lists */
683 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
684 int free_len;
685 bool empty;
686 msl = &mcfg->memsegs[msl_idx];
687 arr = &msl->memseg_arr;
688
689 if (msl->page_sz != page_sz)
690 continue;
691 if (msl->socket_id != socket_id)
692 continue;
693
694 /* leave space for a hole if array is not empty */
695 empty = arr->count == 0;
696 /* find start of the biggest contiguous block and its size */
697 ms_idx = rte_fbarray_find_biggest_free(arr, 0);
698 if (ms_idx < 0)
699 continue;
700 /* hole is 1 segment long, so at least two segments long. */
701 free_len = rte_fbarray_find_contig_free(arr, ms_idx);
702 if (free_len < 2)
703 continue;
704 /* leave some space between memsegs, they are not IOVA
705 * contiguous, so they shouldn't be VA contiguous either.
706 */
707 if (!empty) {
708 ms_idx++;
709 free_len--;
710 }
711
712 /* we might not get all of the space we wanted */
713 free_len = RTE_MIN(seg_len, free_len);
714 seg_end = seg_start + free_len;
715 seg_len = seg_end - seg_start;
716 break;
717 }
718 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
719 EAL_LOG(ERR, "Could not find space for memseg. Please increase RTE_MAX_MEMSEG_PER_LIST "
720 "RTE_MAX_MEMSEG_PER_TYPE and/or RTE_MAX_MEM_MB_PER_TYPE in configuration.");
721 return -1;
722 }
723
724 #ifdef RTE_ARCH_PPC_64
725 /* for PPC64 we go through the list backwards */
726 for (cur_page = seg_end - 1; cur_page >= seg_start;
727 cur_page--, ms_idx++) {
728 #else
729 for (cur_page = seg_start; cur_page < seg_end; cur_page++, ms_idx++) {
730 #endif
731 struct hugepage_file *hfile = &hugepages[cur_page];
732 struct rte_memseg *ms = rte_fbarray_get(arr, ms_idx);
733 void *addr;
734 int fd;
735
736 fd = open(hfile->filepath, O_RDWR);
737 if (fd < 0) {
738 EAL_LOG(ERR, "Could not open '%s': %s",
739 hfile->filepath, strerror(errno));
740 return -1;
741 }
742 /* set shared lock on the file. */
743 if (flock(fd, LOCK_SH) < 0) {
744 EAL_LOG(DEBUG, "Could not lock '%s': %s",
745 hfile->filepath, strerror(errno));
746 close(fd);
747 return -1;
748 }
749 memseg_len = (size_t)page_sz;
750 addr = RTE_PTR_ADD(msl->base_va, ms_idx * memseg_len);
751
752 /* we know this address is already mmapped by memseg list, so
753 * using MAP_FIXED here is safe
754 */
755 addr = mmap(addr, page_sz, PROT_READ | PROT_WRITE,
756 MAP_SHARED | MAP_POPULATE | MAP_FIXED, fd, 0);
757 if (addr == MAP_FAILED) {
758 EAL_LOG(ERR, "Couldn't remap '%s': %s",
759 hfile->filepath, strerror(errno));
760 close(fd);
761 return -1;
762 }
763
764 /* we have a new address, so unmap previous one */
765 #ifndef RTE_ARCH_64
766 /* in 32-bit legacy mode, we have already unmapped the page */
767 if (!internal_conf->legacy_mem)
768 munmap(hfile->orig_va, page_sz);
769 #else
770 munmap(hfile->orig_va, page_sz);
771 #endif
772
773 hfile->orig_va = NULL;
774 hfile->final_va = addr;
775
776 /* rewrite physical addresses in IOVA as VA mode */
777 if (rte_eal_iova_mode() == RTE_IOVA_VA)
778 hfile->physaddr = (uintptr_t)addr;
779
780 /* set up memseg data */
781 ms->addr = addr;
782 ms->hugepage_sz = page_sz;
783 ms->len = memseg_len;
784 ms->iova = hfile->physaddr;
785 ms->socket_id = hfile->socket_id;
786 ms->nchannel = rte_memory_get_nchannel();
787 ms->nrank = rte_memory_get_nrank();
788
789 rte_fbarray_set_used(arr, ms_idx);
790
791 /* store segment fd internally */
792 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
793 EAL_LOG(ERR, "Could not store segment fd: %s",
794 rte_strerror(rte_errno));
795 }
796 EAL_LOG(DEBUG, "Allocated %" PRIu64 "M on socket %i",
797 (seg_len * page_sz) >> 20, socket_id);
798 return seg_len;
799 }
800
801 static uint64_t
802 get_mem_amount(uint64_t page_sz, uint64_t max_mem)
803 {
804 uint64_t area_sz, max_pages;
805
806 /* limit to RTE_MAX_MEMSEG_PER_LIST pages or RTE_MAX_MEM_MB_PER_LIST */
807 max_pages = RTE_MAX_MEMSEG_PER_LIST;
808 max_mem = RTE_MIN((uint64_t)RTE_MAX_MEM_MB_PER_LIST << 20, max_mem);
809
810 area_sz = RTE_MIN(page_sz * max_pages, max_mem);
811
812 /* make sure the list isn't smaller than the page size */
813 area_sz = RTE_MAX(area_sz, page_sz);
814
815 return RTE_ALIGN(area_sz, page_sz);
816 }
817
818 static int
819 memseg_list_free(struct rte_memseg_list *msl)
820 {
821 if (rte_fbarray_destroy(&msl->memseg_arr)) {
822 EAL_LOG(ERR, "Cannot destroy memseg list");
823 return -1;
824 }
825 memset(msl, 0, sizeof(*msl));
826 return 0;
827 }
828
829 /*
830 * Our VA space is not preallocated yet, so preallocate it here. We need to know
831 * how many segments there are in order to map all pages into one address space,
832 * and leave appropriate holes between segments so that rte_malloc does not
833 * concatenate them into one big segment.
834 *
835 * we also need to unmap original pages to free up address space.
836 */
837 static int __rte_unused
838 prealloc_segments(struct hugepage_file *hugepages, int n_pages)
839 {
840 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
841 int cur_page, seg_start_page, end_seg, new_memseg;
842 unsigned int hpi_idx, socket, i;
843 int n_contig_segs, n_segs;
844 int msl_idx;
845 const struct internal_config *internal_conf =
846 eal_get_internal_configuration();
847
848 /* before we preallocate segments, we need to free up our VA space.
849 * we're not removing files, and we already have information about
850 * PA-contiguousness, so it is safe to unmap everything.
851 */
852 for (cur_page = 0; cur_page < n_pages; cur_page++) {
853 struct hugepage_file *hpi = &hugepages[cur_page];
854 munmap(hpi->orig_va, hpi->size);
855 hpi->orig_va = NULL;
856 }
857
858 /* we cannot know how many page sizes and sockets we have discovered, so
859 * loop over all of them
860 */
861 for (hpi_idx = 0; hpi_idx < internal_conf->num_hugepage_sizes;
862 hpi_idx++) {
863 uint64_t page_sz =
864 internal_conf->hugepage_info[hpi_idx].hugepage_sz;
865
866 for (i = 0; i < rte_socket_count(); i++) {
867 struct rte_memseg_list *msl;
868
869 socket = rte_socket_id_by_idx(i);
870 n_contig_segs = 0;
871 n_segs = 0;
872 seg_start_page = -1;
873
874 for (cur_page = 0; cur_page < n_pages; cur_page++) {
875 struct hugepage_file *prev, *cur;
876 int prev_seg_start_page = -1;
877
878 cur = &hugepages[cur_page];
879 prev = cur_page == 0 ? NULL :
880 &hugepages[cur_page - 1];
881
882 new_memseg = 0;
883 end_seg = 0;
884
885 if (cur->size == 0)
886 end_seg = 1;
887 else if (cur->socket_id != (int) socket)
888 end_seg = 1;
889 else if (cur->size != page_sz)
890 end_seg = 1;
891 else if (cur_page == 0)
892 new_memseg = 1;
893 #ifdef RTE_ARCH_PPC_64
894 /* On PPC64 architecture, the mmap always start
895 * from higher address to lower address. Here,
896 * physical addresses are in descending order.
897 */
898 else if ((prev->physaddr - cur->physaddr) !=
899 cur->size)
900 new_memseg = 1;
901 #else
902 else if ((cur->physaddr - prev->physaddr) !=
903 cur->size)
904 new_memseg = 1;
905 #endif
906 if (new_memseg) {
907 /* if we're already inside a segment,
908 * new segment means end of current one
909 */
910 if (seg_start_page != -1) {
911 end_seg = 1;
912 prev_seg_start_page =
913 seg_start_page;
914 }
915 seg_start_page = cur_page;
916 }
917
918 if (end_seg) {
919 if (prev_seg_start_page != -1) {
920 /* we've found a new segment */
921 n_contig_segs++;
922 n_segs += cur_page -
923 prev_seg_start_page;
924 } else if (seg_start_page != -1) {
925 /* we didn't find new segment,
926 * but did end current one
927 */
928 n_contig_segs++;
929 n_segs += cur_page -
930 seg_start_page;
931 seg_start_page = -1;
932 continue;
933 } else {
934 /* we're skipping this page */
935 continue;
936 }
937 }
938 /* segment continues */
939 }
940 /* check if we missed last segment */
941 if (seg_start_page != -1) {
942 n_contig_segs++;
943 n_segs += cur_page - seg_start_page;
944 }
945
946 /* if no segments were found, do not preallocate */
947 if (n_segs == 0)
948 continue;
949
950 /* we now have total number of pages that we will
951 * allocate for this segment list. add separator pages
952 * to the total count, and preallocate VA space.
953 */
954 n_segs += n_contig_segs - 1;
955
956 /* now, preallocate VA space for these segments */
957
958 /* first, find suitable memseg list for this */
959 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS;
960 msl_idx++) {
961 msl = &mcfg->memsegs[msl_idx];
962
963 if (msl->base_va != NULL)
964 continue;
965 break;
966 }
967 if (msl_idx == RTE_MAX_MEMSEG_LISTS) {
968 EAL_LOG(ERR, "Not enough space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS");
969 return -1;
970 }
971
972 /* now, allocate fbarray itself */
973 if (eal_memseg_list_init(msl, page_sz, n_segs,
974 socket, msl_idx, true) < 0)
975 return -1;
976
977 /* finally, allocate VA space */
978 if (eal_memseg_list_alloc(msl, 0) < 0) {
979 EAL_LOG(ERR, "Cannot preallocate 0x%"PRIx64"kB hugepages",
980 page_sz >> 10);
981 return -1;
982 }
983 }
984 }
985 return 0;
986 }
987
988 /*
989 * We cannot reallocate memseg lists on the fly because PPC64 stores pages
990 * backwards, therefore we have to process the entire memseg first before
991 * remapping it into memseg list VA space.
992 */
993 static int
994 remap_needed_hugepages(struct hugepage_file *hugepages, int n_pages)
995 {
996 int cur_page, seg_start_page, new_memseg, ret;
997
998 seg_start_page = 0;
999 for (cur_page = 0; cur_page < n_pages; cur_page++) {
1000 struct hugepage_file *prev, *cur;
1001
1002 new_memseg = 0;
1003
1004 cur = &hugepages[cur_page];
1005 prev = cur_page == 0 ? NULL : &hugepages[cur_page - 1];
1006
1007 /* if size is zero, no more pages left */
1008 if (cur->size == 0)
1009 break;
1010
1011 if (cur_page == 0)
1012 new_memseg = 1;
1013 else if (cur->socket_id != prev->socket_id)
1014 new_memseg = 1;
1015 else if (cur->size != prev->size)
1016 new_memseg = 1;
1017 #ifdef RTE_ARCH_PPC_64
1018 /* On PPC64 architecture, the mmap always start from higher
1019 * address to lower address. Here, physical addresses are in
1020 * descending order.
1021 */
1022 else if ((prev->physaddr - cur->physaddr) != cur->size)
1023 new_memseg = 1;
1024 #else
1025 else if ((cur->physaddr - prev->physaddr) != cur->size)
1026 new_memseg = 1;
1027 #endif
1028
1029 if (new_memseg) {
1030 /* if this isn't the first time, remap segment */
1031 if (cur_page != 0) {
1032 int n_remapped = 0;
1033 int n_needed = cur_page - seg_start_page;
1034 while (n_remapped < n_needed) {
1035 ret = remap_segment(hugepages, seg_start_page,
1036 cur_page);
1037 if (ret < 0)
1038 return -1;
1039 n_remapped += ret;
1040 seg_start_page += ret;
1041 }
1042 }
1043 /* remember where we started */
1044 seg_start_page = cur_page;
1045 }
1046 /* continuation of previous memseg */
1047 }
1048 /* we were stopped, but we didn't remap the last segment, do it now */
1049 if (cur_page != 0) {
1050 int n_remapped = 0;
1051 int n_needed = cur_page - seg_start_page;
1052 while (n_remapped < n_needed) {
1053 ret = remap_segment(hugepages, seg_start_page,
1054 cur_page);
1055 if (ret < 0)
1056 return -1;
1057 n_remapped += ret;
1058 seg_start_page += ret;
1059 }
1060 }
1061 return 0;
1062 }
1063
1064 static inline size_t
1065 eal_get_hugepage_mem_size(void)
1066 {
1067 uint64_t size = 0;
1068 unsigned i, j;
1069 struct internal_config *internal_conf =
1070 eal_get_internal_configuration();
1071
1072 for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
1073 struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
1074 if (strnlen(hpi->hugedir, sizeof(hpi->hugedir)) != 0) {
1075 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1076 size += hpi->hugepage_sz * hpi->num_pages[j];
1077 }
1078 }
1079 }
1080
1081 return (size < SIZE_MAX) ? (size_t)(size) : SIZE_MAX;
1082 }
1083
1084 static struct sigaction huge_action_old;
1085 static int huge_need_recover;
1086
1087 static void
1088 huge_register_sigbus(void)
1089 {
1090 sigset_t mask;
1091 struct sigaction action;
1092
1093 sigemptyset(&mask);
1094 sigaddset(&mask, SIGBUS);
1095 action.sa_flags = 0;
1096 action.sa_mask = mask;
1097 action.sa_handler = huge_sigbus_handler;
1098
1099 huge_need_recover = !sigaction(SIGBUS, &action, &huge_action_old);
1100 }
1101
1102 static void
1103 huge_recover_sigbus(void)
1104 {
1105 if (huge_need_recover) {
1106 sigaction(SIGBUS, &huge_action_old, NULL);
1107 huge_need_recover = 0;
1108 }
1109 }
1110
1111 /*
1112 * Prepare physical memory mapping: fill configuration structure with
1113 * these infos, return 0 on success.
1114 * 1. map N huge pages in separate files in hugetlbfs
1115 * 2. find associated physical addr
1116 * 3. find associated NUMA socket ID
1117 * 4. sort all huge pages by physical address
1118 * 5. remap these N huge pages in the correct order
1119 * 6. unmap the first mapping
1120 * 7. fill memsegs in configuration with contiguous zones
1121 */
1122 static int
1123 eal_legacy_hugepage_init(void)
1124 {
1125 struct rte_mem_config *mcfg;
1126 struct hugepage_file *hugepage = NULL, *tmp_hp = NULL;
1127 struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
1128 struct internal_config *internal_conf =
1129 eal_get_internal_configuration();
1130
1131 uint64_t memory[RTE_MAX_NUMA_NODES];
1132
1133 unsigned hp_offset;
1134 int i, j;
1135 int nr_hugefiles, nr_hugepages = 0;
1136 void *addr;
1137
1138 memset(used_hp, 0, sizeof(used_hp));
1139
1140 /* get pointer to global configuration */
1141 mcfg = rte_eal_get_configuration()->mem_config;
1142
1143 /* hugetlbfs can be disabled */
1144 if (internal_conf->no_hugetlbfs) {
1145 void *prealloc_addr;
1146 size_t mem_sz;
1147 struct rte_memseg_list *msl;
1148 int n_segs, fd, flags;
1149 #ifdef MEMFD_SUPPORTED
1150 int memfd;
1151 #endif
1152 uint64_t page_sz;
1153
1154 /* nohuge mode is legacy mode */
1155 internal_conf->legacy_mem = 1;
1156
1157 /* nohuge mode is single-file segments mode */
1158 internal_conf->single_file_segments = 1;
1159
1160 /* create a memseg list */
1161 msl = &mcfg->memsegs[0];
1162
1163 mem_sz = internal_conf->memory;
1164 page_sz = RTE_PGSIZE_4K;
1165 n_segs = mem_sz / page_sz;
1166
1167 if (eal_memseg_list_init_named(
1168 msl, "nohugemem", page_sz, n_segs, 0, true)) {
1169 return -1;
1170 }
1171
1172 /* set up parameters for anonymous mmap */
1173 fd = -1;
1174 flags = MAP_PRIVATE | MAP_ANONYMOUS;
1175
1176 #ifdef MEMFD_SUPPORTED
1177 /* create a memfd and store it in the segment fd table */
1178 memfd = memfd_create("nohuge", 0);
1179 if (memfd < 0) {
1180 EAL_LOG(DEBUG, "Cannot create memfd: %s",
1181 strerror(errno));
1182 EAL_LOG(DEBUG, "Falling back to anonymous map");
1183 } else {
1184 /* we got an fd - now resize it */
1185 if (ftruncate(memfd, internal_conf->memory) < 0) {
1186 EAL_LOG(ERR, "Cannot resize memfd: %s",
1187 strerror(errno));
1188 EAL_LOG(ERR, "Falling back to anonymous map");
1189 close(memfd);
1190 } else {
1191 /* creating memfd-backed file was successful.
1192 * we want changes to memfd to be visible to
1193 * other processes (such as vhost backend), so
1194 * map it as shared memory.
1195 */
1196 EAL_LOG(DEBUG, "Using memfd for anonymous memory");
1197 fd = memfd;
1198 flags = MAP_SHARED;
1199 }
1200 }
1201 #endif
1202 /* preallocate address space for the memory, so that it can be
1203 * fit into the DMA mask.
1204 */
1205 if (eal_memseg_list_alloc(msl, 0)) {
1206 EAL_LOG(ERR, "Cannot preallocate VA space for hugepage memory");
1207 return -1;
1208 }
1209
1210 prealloc_addr = msl->base_va;
1211 addr = mmap(prealloc_addr, mem_sz, PROT_READ | PROT_WRITE,
1212 flags | MAP_FIXED, fd, 0);
1213 if (addr == MAP_FAILED || addr != prealloc_addr) {
1214 EAL_LOG(ERR, "%s: mmap() failed: %s", __func__,
1215 strerror(errno));
1216 munmap(prealloc_addr, mem_sz);
1217 return -1;
1218 }
1219
1220 /* we're in single-file segments mode, so only the segment list
1221 * fd needs to be set up.
1222 */
1223 if (fd != -1) {
1224 if (eal_memalloc_set_seg_list_fd(0, fd) < 0) {
1225 EAL_LOG(ERR, "Cannot set up segment list fd");
1226 /* not a serious error, proceed */
1227 }
1228 }
1229
1230 eal_memseg_list_populate(msl, addr, n_segs);
1231
1232 if (mcfg->dma_maskbits &&
1233 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1234 EAL_LOG(ERR,
1235 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.",
1236 __func__);
1237 if (rte_eal_iova_mode() == RTE_IOVA_VA &&
1238 rte_eal_using_phys_addrs())
1239 EAL_LOG(ERR,
1240 "%s(): Please try initializing EAL with --iova-mode=pa parameter.",
1241 __func__);
1242 goto fail;
1243 }
1244 return 0;
1245 }
1246
1247 /* calculate total number of hugepages available. at this point we haven't
1248 * yet started sorting them so they all are on socket 0 */
1249 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1250 /* meanwhile, also initialize used_hp hugepage sizes in used_hp */
1251 used_hp[i].hugepage_sz = internal_conf->hugepage_info[i].hugepage_sz;
1252
1253 nr_hugepages += internal_conf->hugepage_info[i].num_pages[0];
1254 }
1255
1256 /*
1257 * allocate a memory area for hugepage table.
1258 * this isn't shared memory yet. due to the fact that we need some
1259 * processing done on these pages, shared memory will be created
1260 * at a later stage.
1261 */
1262 tmp_hp = malloc(nr_hugepages * sizeof(struct hugepage_file));
1263 if (tmp_hp == NULL)
1264 goto fail;
1265
1266 memset(tmp_hp, 0, nr_hugepages * sizeof(struct hugepage_file));
1267
1268 hp_offset = 0; /* where we start the current page size entries */
1269
1270 huge_register_sigbus();
1271
1272 /* make a copy of socket_mem, needed for balanced allocation. */
1273 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1274 memory[i] = internal_conf->socket_mem[i];
1275
1276 /* map all hugepages and sort them */
1277 for (i = 0; i < (int)internal_conf->num_hugepage_sizes; i++) {
1278 unsigned pages_old, pages_new;
1279 struct hugepage_info *hpi;
1280
1281 /*
1282 * we don't yet mark hugepages as used at this stage, so
1283 * we just map all hugepages available to the system
1284 * all hugepages are still located on socket 0
1285 */
1286 hpi = &internal_conf->hugepage_info[i];
1287
1288 if (hpi->num_pages[0] == 0)
1289 continue;
1290
1291 /* map all hugepages available */
1292 pages_old = hpi->num_pages[0];
1293 pages_new = map_all_hugepages(&tmp_hp[hp_offset], hpi, memory);
1294 if (pages_new < pages_old) {
1295 EAL_LOG(DEBUG,
1296 "%d not %d hugepages of size %u MB allocated",
1297 pages_new, pages_old,
1298 (unsigned)(hpi->hugepage_sz / 0x100000));
1299
1300 int pages = pages_old - pages_new;
1301
1302 nr_hugepages -= pages;
1303 hpi->num_pages[0] = pages_new;
1304 if (pages_new == 0)
1305 continue;
1306 }
1307
1308 if (rte_eal_using_phys_addrs() &&
1309 rte_eal_iova_mode() != RTE_IOVA_VA) {
1310 /* find physical addresses for each hugepage */
1311 if (find_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1312 EAL_LOG(DEBUG, "Failed to find phys addr "
1313 "for %u MB pages",
1314 (unsigned int)(hpi->hugepage_sz / 0x100000));
1315 goto fail;
1316 }
1317 } else {
1318 /* set physical addresses for each hugepage */
1319 if (set_physaddrs(&tmp_hp[hp_offset], hpi) < 0) {
1320 EAL_LOG(DEBUG, "Failed to set phys addr "
1321 "for %u MB pages",
1322 (unsigned int)(hpi->hugepage_sz / 0x100000));
1323 goto fail;
1324 }
1325 }
1326
1327 if (find_numasocket(&tmp_hp[hp_offset], hpi) < 0){
1328 EAL_LOG(DEBUG, "Failed to find NUMA socket for %u MB pages",
1329 (unsigned)(hpi->hugepage_sz / 0x100000));
1330 goto fail;
1331 }
1332
1333 qsort(&tmp_hp[hp_offset], hpi->num_pages[0],
1334 sizeof(struct hugepage_file), cmp_physaddr);
1335
1336 /* we have processed a num of hugepages of this size, so inc offset */
1337 hp_offset += hpi->num_pages[0];
1338 }
1339
1340 huge_recover_sigbus();
1341
1342 if (internal_conf->memory == 0 && internal_conf->force_sockets == 0)
1343 internal_conf->memory = eal_get_hugepage_mem_size();
1344
1345 nr_hugefiles = nr_hugepages;
1346
1347
1348 /* clean out the numbers of pages */
1349 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++)
1350 for (j = 0; j < RTE_MAX_NUMA_NODES; j++)
1351 internal_conf->hugepage_info[i].num_pages[j] = 0;
1352
1353 /* get hugepages for each socket */
1354 for (i = 0; i < nr_hugefiles; i++) {
1355 int socket = tmp_hp[i].socket_id;
1356
1357 /* find a hugepage info with right size and increment num_pages */
1358 const int nb_hpsizes = RTE_MIN(MAX_HUGEPAGE_SIZES,
1359 (int)internal_conf->num_hugepage_sizes);
1360 for (j = 0; j < nb_hpsizes; j++) {
1361 if (tmp_hp[i].size ==
1362 internal_conf->hugepage_info[j].hugepage_sz) {
1363 internal_conf->hugepage_info[j].num_pages[socket]++;
1364 }
1365 }
1366 }
1367
1368 /* make a copy of socket_mem, needed for number of pages calculation */
1369 for (i = 0; i < RTE_MAX_NUMA_NODES; i++)
1370 memory[i] = internal_conf->socket_mem[i];
1371
1372 /* calculate final number of pages */
1373 nr_hugepages = eal_dynmem_calc_num_pages_per_socket(memory,
1374 internal_conf->hugepage_info, used_hp,
1375 internal_conf->num_hugepage_sizes);
1376
1377 /* error if not enough memory available */
1378 if (nr_hugepages < 0)
1379 goto fail;
1380
1381 /* reporting in! */
1382 for (i = 0; i < (int) internal_conf->num_hugepage_sizes; i++) {
1383 for (j = 0; j < RTE_MAX_NUMA_NODES; j++) {
1384 if (used_hp[i].num_pages[j] > 0) {
1385 EAL_LOG(DEBUG,
1386 "Requesting %u pages of size %uMB"
1387 " from socket %i",
1388 used_hp[i].num_pages[j],
1389 (unsigned)
1390 (used_hp[i].hugepage_sz / 0x100000),
1391 j);
1392 }
1393 }
1394 }
1395
1396 /* create shared memory */
1397 hugepage = create_shared_memory(eal_hugepage_data_path(),
1398 nr_hugefiles * sizeof(struct hugepage_file));
1399
1400 if (hugepage == NULL) {
1401 EAL_LOG(ERR, "Failed to create shared memory!");
1402 goto fail;
1403 }
1404 memset(hugepage, 0, nr_hugefiles * sizeof(struct hugepage_file));
1405
1406 /*
1407 * unmap pages that we won't need (looks at used_hp).
1408 * also, sets final_va to NULL on pages that were unmapped.
1409 */
1410 if (unmap_unneeded_hugepages(tmp_hp, used_hp,
1411 internal_conf->num_hugepage_sizes) < 0) {
1412 EAL_LOG(ERR, "Unmapping and locking hugepages failed!");
1413 goto fail;
1414 }
1415
1416 /*
1417 * copy stuff from malloc'd hugepage* to the actual shared memory.
1418 * this procedure only copies those hugepages that have orig_va
1419 * not NULL. has overflow protection.
1420 */
1421 if (copy_hugepages_to_shared_mem(hugepage, nr_hugefiles,
1422 tmp_hp, nr_hugefiles) < 0) {
1423 EAL_LOG(ERR, "Copying tables to shared memory failed!");
1424 goto fail;
1425 }
1426
1427 #ifndef RTE_ARCH_64
1428 /* for legacy 32-bit mode, we did not preallocate VA space, so do it */
1429 if (internal_conf->legacy_mem &&
1430 prealloc_segments(hugepage, nr_hugefiles)) {
1431 EAL_LOG(ERR, "Could not preallocate VA space for hugepages");
1432 goto fail;
1433 }
1434 #endif
1435
1436 /* remap all pages we do need into memseg list VA space, so that those
1437 * pages become first-class citizens in DPDK memory subsystem
1438 */
1439 if (remap_needed_hugepages(hugepage, nr_hugefiles)) {
1440 EAL_LOG(ERR, "Couldn't remap hugepage files into memseg lists");
1441 goto fail;
1442 }
1443
1444 /* free the hugepage backing files */
1445 if (internal_conf->hugepage_file.unlink_before_mapping &&
1446 unlink_hugepage_files(tmp_hp, internal_conf->num_hugepage_sizes) < 0) {
1447 EAL_LOG(ERR, "Unlinking hugepage files failed!");
1448 goto fail;
1449 }
1450
1451 /* free the temporary hugepage table */
1452 free(tmp_hp);
1453 tmp_hp = NULL;
1454
1455 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1456 hugepage = NULL;
1457
1458 /* we're not going to allocate more pages, so release VA space for
1459 * unused memseg lists
1460 */
1461 for (i = 0; i < RTE_MAX_MEMSEG_LISTS; i++) {
1462 struct rte_memseg_list *msl = &mcfg->memsegs[i];
1463 size_t mem_sz;
1464
1465 /* skip inactive lists */
1466 if (msl->base_va == NULL)
1467 continue;
1468 /* skip lists where there is at least one page allocated */
1469 if (msl->memseg_arr.count > 0)
1470 continue;
1471 /* this is an unused list, deallocate it */
1472 mem_sz = msl->len;
1473 munmap(msl->base_va, mem_sz);
1474 msl->base_va = NULL;
1475 msl->heap = 0;
1476
1477 /* destroy backing fbarray */
1478 rte_fbarray_destroy(&msl->memseg_arr);
1479 }
1480
1481 if (mcfg->dma_maskbits &&
1482 rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
1483 EAL_LOG(ERR,
1484 "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.",
1485 __func__);
1486 goto fail;
1487 }
1488
1489 return 0;
1490
1491 fail:
1492 huge_recover_sigbus();
1493 free(tmp_hp);
1494 if (hugepage != NULL)
1495 munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file));
1496
1497 return -1;
1498 }
1499
1500 /*
1501 * uses fstat to report the size of a file on disk
1502 */
1503 static off_t
1504 getFileSize(int fd)
1505 {
1506 struct stat st;
1507 if (fstat(fd, &st) < 0)
1508 return 0;
1509 return st.st_size;
1510 }
1511
1512 /*
1513 * This creates the memory mappings in the secondary process to match that of
1514 * the server process. It goes through each memory segment in the DPDK runtime
1515 * configuration and finds the hugepages which form that segment, mapping them
1516 * in order to form a contiguous block in the virtual memory space
1517 */
1518 static int
1519 eal_legacy_hugepage_attach(void)
1520 {
1521 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1522 struct hugepage_file *hp = NULL;
1523 unsigned int num_hp = 0;
1524 unsigned int i = 0;
1525 unsigned int cur_seg;
1526 off_t size = 0;
1527 int fd, fd_hugepage = -1;
1528
1529 if (aslr_enabled() > 0) {
1530 EAL_LOG(WARNING, "WARNING: Address Space Layout Randomization "
1531 "(ASLR) is enabled in the kernel.");
1532 EAL_LOG(WARNING, " This may cause issues with mapping memory "
1533 "into secondary processes");
1534 }
1535
1536 fd_hugepage = open(eal_hugepage_data_path(), O_RDONLY);
1537 if (fd_hugepage < 0) {
1538 EAL_LOG(ERR, "Could not open %s",
1539 eal_hugepage_data_path());
1540 goto error;
1541 }
1542
1543 size = getFileSize(fd_hugepage);
1544 hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
1545 if (hp == MAP_FAILED) {
1546 EAL_LOG(ERR, "Could not mmap %s",
1547 eal_hugepage_data_path());
1548 goto error;
1549 }
1550
1551 num_hp = size / sizeof(struct hugepage_file);
1552 EAL_LOG(DEBUG, "Analysing %u files", num_hp);
1553
1554 /* map all segments into memory to make sure we get the addrs. the
1555 * segments themselves are already in memseg list (which is shared and
1556 * has its VA space already preallocated), so we just need to map
1557 * everything into correct addresses.
1558 */
1559 for (i = 0; i < num_hp; i++) {
1560 struct hugepage_file *hf = &hp[i];
1561 size_t map_sz = hf->size;
1562 void *map_addr = hf->final_va;
1563 int msl_idx, ms_idx;
1564 struct rte_memseg_list *msl;
1565 struct rte_memseg *ms;
1566
1567 /* if size is zero, no more pages left */
1568 if (map_sz == 0)
1569 break;
1570
1571 fd = open(hf->filepath, O_RDWR);
1572 if (fd < 0) {
1573 EAL_LOG(ERR, "Could not open %s: %s",
1574 hf->filepath, strerror(errno));
1575 goto error;
1576 }
1577
1578 map_addr = mmap(map_addr, map_sz, PROT_READ | PROT_WRITE,
1579 MAP_SHARED | MAP_FIXED, fd, 0);
1580 if (map_addr == MAP_FAILED) {
1581 EAL_LOG(ERR, "Could not map %s: %s",
1582 hf->filepath, strerror(errno));
1583 goto fd_error;
1584 }
1585
1586 /* set shared lock on the file. */
1587 if (flock(fd, LOCK_SH) < 0) {
1588 EAL_LOG(DEBUG, "%s(): Locking file failed: %s",
1589 __func__, strerror(errno));
1590 goto mmap_error;
1591 }
1592
1593 /* find segment data */
1594 msl = rte_mem_virt2memseg_list(map_addr);
1595 if (msl == NULL) {
1596 EAL_LOG(DEBUG, "%s(): Cannot find memseg list",
1597 __func__);
1598 goto mmap_error;
1599 }
1600 ms = rte_mem_virt2memseg(map_addr, msl);
1601 if (ms == NULL) {
1602 EAL_LOG(DEBUG, "%s(): Cannot find memseg",
1603 __func__);
1604 goto mmap_error;
1605 }
1606
1607 msl_idx = msl - mcfg->memsegs;
1608 ms_idx = rte_fbarray_find_idx(&msl->memseg_arr, ms);
1609 if (ms_idx < 0) {
1610 EAL_LOG(DEBUG, "%s(): Cannot find memseg idx",
1611 __func__);
1612 goto mmap_error;
1613 }
1614
1615 /* store segment fd internally */
1616 if (eal_memalloc_set_seg_fd(msl_idx, ms_idx, fd) < 0)
1617 EAL_LOG(ERR, "Could not store segment fd: %s",
1618 rte_strerror(rte_errno));
1619 }
1620 /* unmap the hugepage config file, since we are done using it */
1621 munmap(hp, size);
1622 close(fd_hugepage);
1623 return 0;
1624
1625 mmap_error:
1626 munmap(hp[i].final_va, hp[i].size);
1627 fd_error:
1628 close(fd);
1629 error:
1630 /* unwind mmap's done so far */
1631 for (cur_seg = 0; cur_seg < i; cur_seg++)
1632 munmap(hp[cur_seg].final_va, hp[cur_seg].size);
1633
1634 if (hp != NULL && hp != MAP_FAILED)
1635 munmap(hp, size);
1636 if (fd_hugepage >= 0)
1637 close(fd_hugepage);
1638 return -1;
1639 }
1640
1641 static int
1642 eal_hugepage_attach(void)
1643 {
1644 if (eal_memalloc_sync_with_primary()) {
1645 EAL_LOG(ERR, "Could not map memory from primary process");
1646 if (aslr_enabled() > 0)
1647 EAL_LOG(ERR, "It is recommended to disable ASLR in the kernel and retry running both primary and secondary processes");
1648 return -1;
1649 }
1650 return 0;
1651 }
1652
1653 int
1654 rte_eal_hugepage_init(void)
1655 {
1656 const struct internal_config *internal_conf =
1657 eal_get_internal_configuration();
1658
1659 return internal_conf->legacy_mem ?
1660 eal_legacy_hugepage_init() :
1661 eal_dynmem_hugepage_init();
1662 }
1663
1664 int
1665 rte_eal_hugepage_attach(void)
1666 {
1667 const struct internal_config *internal_conf =
1668 eal_get_internal_configuration();
1669
1670 return internal_conf->legacy_mem ?
1671 eal_legacy_hugepage_attach() :
1672 eal_hugepage_attach();
1673 }
1674
1675 int
1676 rte_eal_using_phys_addrs(void)
1677 {
1678 if (phys_addrs_available == -1) {
1679 uint64_t tmp = 0;
1680
1681 if (rte_eal_has_hugepages() != 0 &&
1682 rte_mem_virt2phy(&tmp) != RTE_BAD_PHYS_ADDR)
1683 phys_addrs_available = 1;
1684 else
1685 phys_addrs_available = 0;
1686 }
1687 return phys_addrs_available;
1688 }
1689
1690 static int __rte_unused
1691 memseg_primary_init_32(void)
1692 {
1693 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1694 int active_sockets, hpi_idx, msl_idx = 0;
1695 unsigned int socket_id, i;
1696 struct rte_memseg_list *msl;
1697 uint64_t extra_mem_per_socket, total_extra_mem, total_requested_mem;
1698 uint64_t max_mem;
1699 struct internal_config *internal_conf =
1700 eal_get_internal_configuration();
1701
1702 /* no-huge does not need this at all */
1703 if (internal_conf->no_hugetlbfs)
1704 return 0;
1705
1706 /* this is a giant hack, but desperate times call for desperate
1707 * measures. in legacy 32-bit mode, we cannot preallocate VA space,
1708 * because having upwards of 2 gigabytes of VA space already mapped will
1709 * interfere with our ability to map and sort hugepages.
1710 *
1711 * therefore, in legacy 32-bit mode, we will be initializing memseg
1712 * lists much later - in eal_memory.c, right after we unmap all the
1713 * unneeded pages. this will not affect secondary processes, as those
1714 * should be able to mmap the space without (too many) problems.
1715 */
1716 if (internal_conf->legacy_mem)
1717 return 0;
1718
1719 /* 32-bit mode is a very special case. we cannot know in advance where
1720 * the user will want to allocate their memory, so we have to do some
1721 * heuristics.
1722 */
1723 active_sockets = 0;
1724 total_requested_mem = 0;
1725 if (internal_conf->force_sockets)
1726 for (i = 0; i < rte_socket_count(); i++) {
1727 uint64_t mem;
1728
1729 socket_id = rte_socket_id_by_idx(i);
1730 mem = internal_conf->socket_mem[socket_id];
1731
1732 if (mem == 0)
1733 continue;
1734
1735 active_sockets++;
1736 total_requested_mem += mem;
1737 }
1738 else
1739 total_requested_mem = internal_conf->memory;
1740
1741 max_mem = (uint64_t)RTE_MAX_MEM_MB << 20;
1742 if (total_requested_mem > max_mem) {
1743 EAL_LOG(ERR, "Invalid parameters: 32-bit process can at most use %uM of memory",
1744 (unsigned int)(max_mem >> 20));
1745 return -1;
1746 }
1747 total_extra_mem = max_mem - total_requested_mem;
1748 extra_mem_per_socket = active_sockets == 0 ? total_extra_mem :
1749 total_extra_mem / active_sockets;
1750
1751 /* the allocation logic is a little bit convoluted, but here's how it
1752 * works, in a nutshell:
1753 * - if user hasn't specified on which sockets to allocate memory via
1754 * --socket-mem, we allocate all of our memory on main core socket.
1755 * - if user has specified sockets to allocate memory on, there may be
1756 * some "unused" memory left (e.g. if user has specified --socket-mem
1757 * such that not all memory adds up to 2 gigabytes), so add it to all
1758 * sockets that are in use equally.
1759 *
1760 * page sizes are sorted by size in descending order, so we can safely
1761 * assume that we dispense with bigger page sizes first.
1762 */
1763
1764 /* create memseg lists */
1765 for (i = 0; i < rte_socket_count(); i++) {
1766 int hp_sizes = (int) internal_conf->num_hugepage_sizes;
1767 uint64_t max_socket_mem, cur_socket_mem;
1768 unsigned int main_lcore_socket;
1769 struct rte_config *cfg = rte_eal_get_configuration();
1770 bool skip;
1771
1772 socket_id = rte_socket_id_by_idx(i);
1773
1774 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1775 /* we can still sort pages by socket in legacy mode */
1776 if (!internal_conf->legacy_mem && socket_id > 0)
1777 break;
1778 #endif
1779
1780 /* if we didn't specifically request memory on this socket */
1781 skip = active_sockets != 0 &&
1782 internal_conf->socket_mem[socket_id] == 0;
1783 /* ...or if we didn't specifically request memory on *any*
1784 * socket, and this is not main lcore
1785 */
1786 main_lcore_socket = rte_lcore_to_socket_id(cfg->main_lcore);
1787 skip |= active_sockets == 0 && socket_id != main_lcore_socket;
1788
1789 if (skip) {
1790 EAL_LOG(DEBUG, "Will not preallocate memory on socket %u",
1791 socket_id);
1792 continue;
1793 }
1794
1795 /* max amount of memory on this socket */
1796 max_socket_mem = (active_sockets != 0 ?
1797 internal_conf->socket_mem[socket_id] :
1798 internal_conf->memory) +
1799 extra_mem_per_socket;
1800 cur_socket_mem = 0;
1801
1802 for (hpi_idx = 0; hpi_idx < hp_sizes; hpi_idx++) {
1803 uint64_t max_pagesz_mem, cur_pagesz_mem = 0;
1804 uint64_t hugepage_sz;
1805 struct hugepage_info *hpi;
1806 int type_msl_idx, max_segs, total_segs = 0;
1807
1808 hpi = &internal_conf->hugepage_info[hpi_idx];
1809 hugepage_sz = hpi->hugepage_sz;
1810
1811 /* check if pages are actually available */
1812 if (hpi->num_pages[socket_id] == 0)
1813 continue;
1814
1815 max_segs = RTE_MAX_MEMSEG_PER_TYPE;
1816 max_pagesz_mem = max_socket_mem - cur_socket_mem;
1817
1818 /* make it multiple of page size */
1819 max_pagesz_mem = RTE_ALIGN_FLOOR(max_pagesz_mem,
1820 hugepage_sz);
1821
1822 EAL_LOG(DEBUG, "Attempting to preallocate "
1823 "%" PRIu64 "M on socket %i",
1824 max_pagesz_mem >> 20, socket_id);
1825
1826 type_msl_idx = 0;
1827 while (cur_pagesz_mem < max_pagesz_mem &&
1828 total_segs < max_segs) {
1829 uint64_t cur_mem;
1830 unsigned int n_segs;
1831
1832 if (msl_idx >= RTE_MAX_MEMSEG_LISTS) {
1833 EAL_LOG(ERR,
1834 "No more space in memseg lists, please increase RTE_MAX_MEMSEG_LISTS");
1835 return -1;
1836 }
1837
1838 msl = &mcfg->memsegs[msl_idx];
1839
1840 cur_mem = get_mem_amount(hugepage_sz,
1841 max_pagesz_mem);
1842 n_segs = cur_mem / hugepage_sz;
1843
1844 if (eal_memseg_list_init(msl, hugepage_sz,
1845 n_segs, socket_id, type_msl_idx,
1846 true)) {
1847 /* failing to allocate a memseg list is
1848 * a serious error.
1849 */
1850 EAL_LOG(ERR, "Cannot allocate memseg list");
1851 return -1;
1852 }
1853
1854 if (eal_memseg_list_alloc(msl, 0)) {
1855 /* if we couldn't allocate VA space, we
1856 * can try with smaller page sizes.
1857 */
1858 EAL_LOG(ERR, "Cannot allocate VA space for memseg list, retrying with different page size");
1859 /* deallocate memseg list */
1860 if (memseg_list_free(msl))
1861 return -1;
1862 break;
1863 }
1864
1865 total_segs += msl->memseg_arr.len;
1866 cur_pagesz_mem = total_segs * hugepage_sz;
1867 type_msl_idx++;
1868 msl_idx++;
1869 }
1870 cur_socket_mem += cur_pagesz_mem;
1871 }
1872 if (cur_socket_mem == 0) {
1873 EAL_LOG(ERR, "Cannot allocate VA space on socket %u",
1874 socket_id);
1875 return -1;
1876 }
1877 }
1878
1879 return 0;
1880 }
1881
1882 static int __rte_unused
1883 memseg_primary_init(void)
1884 {
1885 return eal_dynmem_memseg_lists_init();
1886 }
1887
1888 static int
1889 memseg_secondary_init(void)
1890 {
1891 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
1892 int msl_idx = 0;
1893 struct rte_memseg_list *msl;
1894
1895 for (msl_idx = 0; msl_idx < RTE_MAX_MEMSEG_LISTS; msl_idx++) {
1896
1897 msl = &mcfg->memsegs[msl_idx];
1898
1899 /* skip empty and external memseg lists */
1900 if (msl->memseg_arr.len == 0 || msl->external)
1901 continue;
1902
1903 if (rte_fbarray_attach(&msl->memseg_arr)) {
1904 EAL_LOG(ERR, "Cannot attach to primary process memseg lists");
1905 return -1;
1906 }
1907
1908 /* preallocate VA space */
1909 if (eal_memseg_list_alloc(msl, 0)) {
1910 EAL_LOG(ERR, "Cannot preallocate VA space for hugepage memory");
1911 return -1;
1912 }
1913 }
1914
1915 return 0;
1916 }
1917
1918 int
1919 rte_eal_memseg_init(void)
1920 {
1921 /* increase rlimit to maximum */
1922 struct rlimit lim;
1923
1924 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1925 const struct internal_config *internal_conf =
1926 eal_get_internal_configuration();
1927 #endif
1928 if (getrlimit(RLIMIT_NOFILE, &lim) == 0) {
1929 /* set limit to maximum */
1930 lim.rlim_cur = lim.rlim_max;
1931
1932 if (setrlimit(RLIMIT_NOFILE, &lim) < 0) {
1933 EAL_LOG(DEBUG, "Setting maximum number of open files failed: %s",
1934 strerror(errno));
1935 } else {
1936 EAL_LOG(DEBUG, "Setting maximum number of open files to %"
1937 PRIu64,
1938 (uint64_t)lim.rlim_cur);
1939 }
1940 } else {
1941 EAL_LOG(ERR, "Cannot get current resource limits");
1942 }
1943 #ifndef RTE_EAL_NUMA_AWARE_HUGEPAGES
1944 if (!internal_conf->legacy_mem && rte_socket_count() > 1) {
1945 EAL_LOG(WARNING, "DPDK is running on a NUMA system, but is compiled without NUMA support.");
1946 EAL_LOG(WARNING, "This will have adverse consequences for performance and usability.");
1947 EAL_LOG(WARNING, "Please use --"OPT_LEGACY_MEM" option, or recompile with NUMA support.");
1948 }
1949 #endif
1950
1951 return rte_eal_process_type() == RTE_PROC_PRIMARY ?
1952 #ifndef RTE_ARCH_64
1953 memseg_primary_init_32() :
1954 #else
1955 memseg_primary_init() :
1956 #endif
1957 memseg_secondary_init();
1958 }
1959