199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson * Copyright (c) 2020 Dmitry Kozlyuk
399a2dd95SBruce Richardson */
499a2dd95SBruce Richardson
599a2dd95SBruce Richardson #include <rte_errno.h>
699a2dd95SBruce Richardson
799a2dd95SBruce Richardson #include "eal_internal_cfg.h"
899a2dd95SBruce Richardson #include "eal_memalloc.h"
999a2dd95SBruce Richardson #include "eal_memcfg.h"
1099a2dd95SBruce Richardson #include "eal_private.h"
1199a2dd95SBruce Richardson #include "eal_windows.h"
1299a2dd95SBruce Richardson
1399a2dd95SBruce Richardson int
eal_memalloc_get_seg_fd(int list_idx,int seg_idx)1499a2dd95SBruce Richardson eal_memalloc_get_seg_fd(int list_idx, int seg_idx)
1599a2dd95SBruce Richardson {
1699a2dd95SBruce Richardson /* Hugepages have no associated files in Windows. */
1799a2dd95SBruce Richardson RTE_SET_USED(list_idx);
1899a2dd95SBruce Richardson RTE_SET_USED(seg_idx);
1999a2dd95SBruce Richardson EAL_LOG_NOT_IMPLEMENTED();
207e71c4dcSJie Zhou return -ENOTSUP;
2199a2dd95SBruce Richardson }
2299a2dd95SBruce Richardson
2399a2dd95SBruce Richardson int
eal_memalloc_get_seg_fd_offset(int list_idx,int seg_idx,size_t * offset)2499a2dd95SBruce Richardson eal_memalloc_get_seg_fd_offset(int list_idx, int seg_idx, size_t *offset)
2599a2dd95SBruce Richardson {
2699a2dd95SBruce Richardson /* Hugepages have no associated files in Windows. */
2799a2dd95SBruce Richardson RTE_SET_USED(list_idx);
2899a2dd95SBruce Richardson RTE_SET_USED(seg_idx);
2999a2dd95SBruce Richardson RTE_SET_USED(offset);
3099a2dd95SBruce Richardson EAL_LOG_NOT_IMPLEMENTED();
317e71c4dcSJie Zhou return -ENOTSUP;
3299a2dd95SBruce Richardson }
3399a2dd95SBruce Richardson
3499a2dd95SBruce Richardson static int
alloc_seg(struct rte_memseg * ms,void * requested_addr,int socket_id,struct hugepage_info * hi)3599a2dd95SBruce Richardson alloc_seg(struct rte_memseg *ms, void *requested_addr, int socket_id,
3699a2dd95SBruce Richardson struct hugepage_info *hi)
3799a2dd95SBruce Richardson {
3899a2dd95SBruce Richardson HANDLE current_process;
3999a2dd95SBruce Richardson unsigned int numa_node;
4099a2dd95SBruce Richardson size_t alloc_sz;
4199a2dd95SBruce Richardson void *addr;
4299a2dd95SBruce Richardson rte_iova_t iova = RTE_BAD_IOVA;
4399a2dd95SBruce Richardson PSAPI_WORKING_SET_EX_INFORMATION info;
4499a2dd95SBruce Richardson PSAPI_WORKING_SET_EX_BLOCK *page;
4599a2dd95SBruce Richardson
4699a2dd95SBruce Richardson if (ms->len > 0) {
4799a2dd95SBruce Richardson /* If a segment is already allocated as needed, return it. */
4899a2dd95SBruce Richardson if ((ms->addr == requested_addr) &&
4999a2dd95SBruce Richardson (ms->socket_id == socket_id) &&
5099a2dd95SBruce Richardson (ms->hugepage_sz == hi->hugepage_sz)) {
5199a2dd95SBruce Richardson return 0;
5299a2dd95SBruce Richardson }
5399a2dd95SBruce Richardson
5499a2dd95SBruce Richardson /* Bugcheck, should not happen. */
55*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Attempted to reallocate segment %p "
5699a2dd95SBruce Richardson "(size %zu) on socket %d", ms->addr,
5799a2dd95SBruce Richardson ms->len, ms->socket_id);
5899a2dd95SBruce Richardson return -1;
5999a2dd95SBruce Richardson }
6099a2dd95SBruce Richardson
6199a2dd95SBruce Richardson current_process = GetCurrentProcess();
6299a2dd95SBruce Richardson numa_node = eal_socket_numa_node(socket_id);
6399a2dd95SBruce Richardson alloc_sz = hi->hugepage_sz;
6499a2dd95SBruce Richardson
6599a2dd95SBruce Richardson if (requested_addr == NULL) {
6699a2dd95SBruce Richardson /* Request a new chunk of memory from OS. */
6799a2dd95SBruce Richardson addr = eal_mem_alloc_socket(alloc_sz, socket_id);
6899a2dd95SBruce Richardson if (addr == NULL) {
69*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Cannot allocate %zu bytes "
70*ae67895bSDavid Marchand "on socket %d", alloc_sz, socket_id);
7199a2dd95SBruce Richardson return -1;
7299a2dd95SBruce Richardson }
7399a2dd95SBruce Richardson } else {
7499a2dd95SBruce Richardson /* Requested address is already reserved, commit memory. */
7599a2dd95SBruce Richardson addr = eal_mem_commit(requested_addr, alloc_sz, socket_id);
7699a2dd95SBruce Richardson
7799a2dd95SBruce Richardson /* During commitment, memory is temporary freed and might
7899a2dd95SBruce Richardson * be allocated by different non-EAL thread. This is a fatal
7999a2dd95SBruce Richardson * error, because it breaks MSL assumptions.
8099a2dd95SBruce Richardson */
8199a2dd95SBruce Richardson if ((addr != NULL) && (addr != requested_addr)) {
82*ae67895bSDavid Marchand EAL_LOG(CRIT, "Address %p occupied by an alien "
83*ae67895bSDavid Marchand " allocation - MSL is not VA-contiguous!",
8499a2dd95SBruce Richardson requested_addr);
8599a2dd95SBruce Richardson return -1;
8699a2dd95SBruce Richardson }
8799a2dd95SBruce Richardson
8899a2dd95SBruce Richardson if (addr == NULL) {
89*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Cannot commit reserved memory %p "
90*ae67895bSDavid Marchand "(size %zu) on socket %d",
9199a2dd95SBruce Richardson requested_addr, alloc_sz, socket_id);
9299a2dd95SBruce Richardson return -1;
9399a2dd95SBruce Richardson }
9499a2dd95SBruce Richardson }
9599a2dd95SBruce Richardson
9699a2dd95SBruce Richardson /* Force OS to allocate a physical page and select a NUMA node.
9799a2dd95SBruce Richardson * Hugepages are not pageable in Windows, so there's no race
9899a2dd95SBruce Richardson * for physical address.
9999a2dd95SBruce Richardson */
10099a2dd95SBruce Richardson *(volatile int *)addr = *(volatile int *)addr;
10199a2dd95SBruce Richardson
10299a2dd95SBruce Richardson iova = rte_mem_virt2iova(addr);
10399a2dd95SBruce Richardson if (iova == RTE_BAD_IOVA) {
104*ae67895bSDavid Marchand EAL_LOG(DEBUG,
105*ae67895bSDavid Marchand "Cannot get IOVA of allocated segment");
10699a2dd95SBruce Richardson goto error;
10799a2dd95SBruce Richardson }
10899a2dd95SBruce Richardson
10999a2dd95SBruce Richardson /* Only "Ex" function can handle hugepages. */
11099a2dd95SBruce Richardson info.VirtualAddress = addr;
11199a2dd95SBruce Richardson if (!QueryWorkingSetEx(current_process, &info, sizeof(info))) {
11299a2dd95SBruce Richardson RTE_LOG_WIN32_ERR("QueryWorkingSetEx(%p)", addr);
11399a2dd95SBruce Richardson goto error;
11499a2dd95SBruce Richardson }
11599a2dd95SBruce Richardson
11699a2dd95SBruce Richardson page = &info.VirtualAttributes;
11799a2dd95SBruce Richardson if (!page->Valid || !page->LargePage) {
118*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Got regular page instead of a hugepage");
11999a2dd95SBruce Richardson goto error;
12099a2dd95SBruce Richardson }
12199a2dd95SBruce Richardson if (page->Node != numa_node) {
122*ae67895bSDavid Marchand EAL_LOG(DEBUG,
123*ae67895bSDavid Marchand "NUMA node hint %u (socket %d) not respected, got %u",
12499a2dd95SBruce Richardson numa_node, socket_id, page->Node);
12599a2dd95SBruce Richardson goto error;
12699a2dd95SBruce Richardson }
12799a2dd95SBruce Richardson
12899a2dd95SBruce Richardson ms->addr = addr;
12999a2dd95SBruce Richardson ms->hugepage_sz = hi->hugepage_sz;
13099a2dd95SBruce Richardson ms->len = alloc_sz;
13199a2dd95SBruce Richardson ms->nchannel = rte_memory_get_nchannel();
13299a2dd95SBruce Richardson ms->nrank = rte_memory_get_nrank();
13399a2dd95SBruce Richardson ms->iova = iova;
13499a2dd95SBruce Richardson ms->socket_id = socket_id;
13599a2dd95SBruce Richardson
13699a2dd95SBruce Richardson return 0;
13799a2dd95SBruce Richardson
13899a2dd95SBruce Richardson error:
13999a2dd95SBruce Richardson /* Only jump here when `addr` and `alloc_sz` are valid. */
14099a2dd95SBruce Richardson if (eal_mem_decommit(addr, alloc_sz) && (rte_errno == EADDRNOTAVAIL)) {
14199a2dd95SBruce Richardson /* During decommitment, memory is temporarily returned
14299a2dd95SBruce Richardson * to the system and the address may become unavailable.
14399a2dd95SBruce Richardson */
144*ae67895bSDavid Marchand EAL_LOG(CRIT, "Address %p occupied by an alien "
145*ae67895bSDavid Marchand " allocation - MSL is not VA-contiguous!", addr);
14699a2dd95SBruce Richardson }
14799a2dd95SBruce Richardson return -1;
14899a2dd95SBruce Richardson }
14999a2dd95SBruce Richardson
15099a2dd95SBruce Richardson static int
free_seg(struct rte_memseg * ms)15199a2dd95SBruce Richardson free_seg(struct rte_memseg *ms)
15299a2dd95SBruce Richardson {
15399a2dd95SBruce Richardson if (eal_mem_decommit(ms->addr, ms->len)) {
15499a2dd95SBruce Richardson if (rte_errno == EADDRNOTAVAIL) {
15599a2dd95SBruce Richardson /* See alloc_seg() for explanation. */
156*ae67895bSDavid Marchand EAL_LOG(CRIT, "Address %p occupied by an alien "
157*ae67895bSDavid Marchand " allocation - MSL is not VA-contiguous!",
15899a2dd95SBruce Richardson ms->addr);
15999a2dd95SBruce Richardson }
16099a2dd95SBruce Richardson return -1;
16199a2dd95SBruce Richardson }
16299a2dd95SBruce Richardson
16399a2dd95SBruce Richardson /* Must clear the segment, because alloc_seg() inspects it. */
16499a2dd95SBruce Richardson memset(ms, 0, sizeof(*ms));
16599a2dd95SBruce Richardson return 0;
16699a2dd95SBruce Richardson }
16799a2dd95SBruce Richardson
16899a2dd95SBruce Richardson struct alloc_walk_param {
16999a2dd95SBruce Richardson struct hugepage_info *hi;
17099a2dd95SBruce Richardson struct rte_memseg **ms;
17199a2dd95SBruce Richardson size_t page_sz;
17299a2dd95SBruce Richardson unsigned int segs_allocated;
17399a2dd95SBruce Richardson unsigned int n_segs;
17499a2dd95SBruce Richardson int socket;
17599a2dd95SBruce Richardson bool exact;
17699a2dd95SBruce Richardson };
17799a2dd95SBruce Richardson
17899a2dd95SBruce Richardson static int
alloc_seg_walk(const struct rte_memseg_list * msl,void * arg)17999a2dd95SBruce Richardson alloc_seg_walk(const struct rte_memseg_list *msl, void *arg)
18099a2dd95SBruce Richardson {
18199a2dd95SBruce Richardson struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
18299a2dd95SBruce Richardson struct alloc_walk_param *wa = arg;
18399a2dd95SBruce Richardson struct rte_memseg_list *cur_msl;
18499a2dd95SBruce Richardson size_t page_sz;
18599a2dd95SBruce Richardson int cur_idx, start_idx, j;
18699a2dd95SBruce Richardson unsigned int msl_idx, need, i;
18799a2dd95SBruce Richardson
18899a2dd95SBruce Richardson if (msl->page_sz != wa->page_sz)
18999a2dd95SBruce Richardson return 0;
19099a2dd95SBruce Richardson if (msl->socket_id != wa->socket)
19199a2dd95SBruce Richardson return 0;
19299a2dd95SBruce Richardson
19399a2dd95SBruce Richardson page_sz = (size_t)msl->page_sz;
19499a2dd95SBruce Richardson
19599a2dd95SBruce Richardson msl_idx = msl - mcfg->memsegs;
19699a2dd95SBruce Richardson cur_msl = &mcfg->memsegs[msl_idx];
19799a2dd95SBruce Richardson
19899a2dd95SBruce Richardson need = wa->n_segs;
19999a2dd95SBruce Richardson
20099a2dd95SBruce Richardson /* try finding space in memseg list */
20199a2dd95SBruce Richardson if (wa->exact) {
20299a2dd95SBruce Richardson /* if we require exact number of pages in a list, find them */
20399a2dd95SBruce Richardson cur_idx = rte_fbarray_find_next_n_free(
20499a2dd95SBruce Richardson &cur_msl->memseg_arr, 0, need);
20599a2dd95SBruce Richardson if (cur_idx < 0)
20699a2dd95SBruce Richardson return 0;
20799a2dd95SBruce Richardson start_idx = cur_idx;
20899a2dd95SBruce Richardson } else {
20999a2dd95SBruce Richardson int cur_len;
21099a2dd95SBruce Richardson
21199a2dd95SBruce Richardson /* we don't require exact number of pages, so we're going to go
21299a2dd95SBruce Richardson * for best-effort allocation. that means finding the biggest
21399a2dd95SBruce Richardson * unused block, and going with that.
21499a2dd95SBruce Richardson */
21599a2dd95SBruce Richardson cur_idx = rte_fbarray_find_biggest_free(
21699a2dd95SBruce Richardson &cur_msl->memseg_arr, 0);
21799a2dd95SBruce Richardson if (cur_idx < 0)
21899a2dd95SBruce Richardson return 0;
21999a2dd95SBruce Richardson start_idx = cur_idx;
22099a2dd95SBruce Richardson /* adjust the size to possibly be smaller than original
22199a2dd95SBruce Richardson * request, but do not allow it to be bigger.
22299a2dd95SBruce Richardson */
22399a2dd95SBruce Richardson cur_len = rte_fbarray_find_contig_free(
22499a2dd95SBruce Richardson &cur_msl->memseg_arr, cur_idx);
22599a2dd95SBruce Richardson need = RTE_MIN(need, (unsigned int)cur_len);
22699a2dd95SBruce Richardson }
22799a2dd95SBruce Richardson
22899a2dd95SBruce Richardson for (i = 0; i < need; i++, cur_idx++) {
22999a2dd95SBruce Richardson struct rte_memseg *cur;
23099a2dd95SBruce Richardson void *map_addr;
23199a2dd95SBruce Richardson
23299a2dd95SBruce Richardson cur = rte_fbarray_get(&cur_msl->memseg_arr, cur_idx);
23399a2dd95SBruce Richardson map_addr = RTE_PTR_ADD(cur_msl->base_va, cur_idx * page_sz);
23499a2dd95SBruce Richardson
23599a2dd95SBruce Richardson if (alloc_seg(cur, map_addr, wa->socket, wa->hi)) {
236*ae67895bSDavid Marchand EAL_LOG(DEBUG, "attempted to allocate %i segments, "
237*ae67895bSDavid Marchand "but only %i were allocated", need, i);
23899a2dd95SBruce Richardson
23999a2dd95SBruce Richardson /* if exact number wasn't requested, stop */
24099a2dd95SBruce Richardson if (!wa->exact)
24199a2dd95SBruce Richardson goto out;
24299a2dd95SBruce Richardson
24399a2dd95SBruce Richardson /* clean up */
24499a2dd95SBruce Richardson for (j = start_idx; j < cur_idx; j++) {
24599a2dd95SBruce Richardson struct rte_memseg *tmp;
24699a2dd95SBruce Richardson struct rte_fbarray *arr = &cur_msl->memseg_arr;
24799a2dd95SBruce Richardson
24899a2dd95SBruce Richardson tmp = rte_fbarray_get(arr, j);
24999a2dd95SBruce Richardson rte_fbarray_set_free(arr, j);
25099a2dd95SBruce Richardson
25199a2dd95SBruce Richardson if (free_seg(tmp))
252*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Cannot free page");
25399a2dd95SBruce Richardson }
25499a2dd95SBruce Richardson /* clear the list */
25599a2dd95SBruce Richardson if (wa->ms)
25699a2dd95SBruce Richardson memset(wa->ms, 0, sizeof(*wa->ms) * wa->n_segs);
25799a2dd95SBruce Richardson
25899a2dd95SBruce Richardson return -1;
25999a2dd95SBruce Richardson }
26099a2dd95SBruce Richardson if (wa->ms)
26199a2dd95SBruce Richardson wa->ms[i] = cur;
26299a2dd95SBruce Richardson
26399a2dd95SBruce Richardson rte_fbarray_set_used(&cur_msl->memseg_arr, cur_idx);
26499a2dd95SBruce Richardson }
26599a2dd95SBruce Richardson
26699a2dd95SBruce Richardson out:
26799a2dd95SBruce Richardson wa->segs_allocated = i;
26899a2dd95SBruce Richardson if (i > 0)
26999a2dd95SBruce Richardson cur_msl->version++;
27099a2dd95SBruce Richardson
27199a2dd95SBruce Richardson /* if we didn't allocate any segments, move on to the next list */
27299a2dd95SBruce Richardson return i > 0;
27399a2dd95SBruce Richardson }
27499a2dd95SBruce Richardson
27599a2dd95SBruce Richardson struct free_walk_param {
27699a2dd95SBruce Richardson struct hugepage_info *hi;
27799a2dd95SBruce Richardson struct rte_memseg *ms;
27899a2dd95SBruce Richardson };
27999a2dd95SBruce Richardson static int
free_seg_walk(const struct rte_memseg_list * msl,void * arg)28099a2dd95SBruce Richardson free_seg_walk(const struct rte_memseg_list *msl, void *arg)
28199a2dd95SBruce Richardson {
28299a2dd95SBruce Richardson struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
28399a2dd95SBruce Richardson struct rte_memseg_list *found_msl;
28499a2dd95SBruce Richardson struct free_walk_param *wa = arg;
28599a2dd95SBruce Richardson uintptr_t start_addr, end_addr;
28699a2dd95SBruce Richardson int msl_idx, seg_idx, ret;
28799a2dd95SBruce Richardson
28899a2dd95SBruce Richardson start_addr = (uintptr_t) msl->base_va;
28999a2dd95SBruce Richardson end_addr = start_addr + msl->len;
29099a2dd95SBruce Richardson
29199a2dd95SBruce Richardson if ((uintptr_t)wa->ms->addr < start_addr ||
29299a2dd95SBruce Richardson (uintptr_t)wa->ms->addr >= end_addr)
29399a2dd95SBruce Richardson return 0;
29499a2dd95SBruce Richardson
29599a2dd95SBruce Richardson msl_idx = msl - mcfg->memsegs;
29699a2dd95SBruce Richardson seg_idx = RTE_PTR_DIFF(wa->ms->addr, start_addr) / msl->page_sz;
29799a2dd95SBruce Richardson
29899a2dd95SBruce Richardson /* msl is const */
29999a2dd95SBruce Richardson found_msl = &mcfg->memsegs[msl_idx];
30099a2dd95SBruce Richardson found_msl->version++;
30199a2dd95SBruce Richardson
30299a2dd95SBruce Richardson rte_fbarray_set_free(&found_msl->memseg_arr, seg_idx);
30399a2dd95SBruce Richardson
30499a2dd95SBruce Richardson ret = free_seg(wa->ms);
30599a2dd95SBruce Richardson
30699a2dd95SBruce Richardson return (ret < 0) ? (-1) : 1;
30799a2dd95SBruce Richardson }
30899a2dd95SBruce Richardson
30999a2dd95SBruce Richardson int
eal_memalloc_alloc_seg_bulk(struct rte_memseg ** ms,int n_segs,size_t page_sz,int socket,bool exact)31099a2dd95SBruce Richardson eal_memalloc_alloc_seg_bulk(struct rte_memseg **ms, int n_segs,
31199a2dd95SBruce Richardson size_t page_sz, int socket, bool exact)
31299a2dd95SBruce Richardson {
31399a2dd95SBruce Richardson unsigned int i;
31499a2dd95SBruce Richardson int ret = -1;
31599a2dd95SBruce Richardson struct alloc_walk_param wa;
31699a2dd95SBruce Richardson struct hugepage_info *hi = NULL;
31799a2dd95SBruce Richardson struct internal_config *internal_conf =
31899a2dd95SBruce Richardson eal_get_internal_configuration();
31999a2dd95SBruce Richardson
32099a2dd95SBruce Richardson if (internal_conf->legacy_mem) {
321*ae67895bSDavid Marchand EAL_LOG(ERR, "dynamic allocation not supported in legacy mode");
32299a2dd95SBruce Richardson return -ENOTSUP;
32399a2dd95SBruce Richardson }
32499a2dd95SBruce Richardson
32599a2dd95SBruce Richardson for (i = 0; i < internal_conf->num_hugepage_sizes; i++) {
32699a2dd95SBruce Richardson struct hugepage_info *hpi = &internal_conf->hugepage_info[i];
32799a2dd95SBruce Richardson if (page_sz == hpi->hugepage_sz) {
32899a2dd95SBruce Richardson hi = hpi;
32999a2dd95SBruce Richardson break;
33099a2dd95SBruce Richardson }
33199a2dd95SBruce Richardson }
33299a2dd95SBruce Richardson if (!hi) {
333*ae67895bSDavid Marchand EAL_LOG(ERR, "cannot find relevant hugepage_info entry");
33499a2dd95SBruce Richardson return -1;
33599a2dd95SBruce Richardson }
33699a2dd95SBruce Richardson
33799a2dd95SBruce Richardson memset(&wa, 0, sizeof(wa));
33899a2dd95SBruce Richardson wa.exact = exact;
33999a2dd95SBruce Richardson wa.hi = hi;
34099a2dd95SBruce Richardson wa.ms = ms;
34199a2dd95SBruce Richardson wa.n_segs = n_segs;
34299a2dd95SBruce Richardson wa.page_sz = page_sz;
34399a2dd95SBruce Richardson wa.socket = socket;
34499a2dd95SBruce Richardson wa.segs_allocated = 0;
34599a2dd95SBruce Richardson
34699a2dd95SBruce Richardson /* memalloc is locked, so it's safe to use thread-unsafe version */
34799a2dd95SBruce Richardson ret = rte_memseg_list_walk_thread_unsafe(alloc_seg_walk, &wa);
34899a2dd95SBruce Richardson if (ret == 0) {
349*ae67895bSDavid Marchand EAL_LOG(ERR, "cannot find suitable memseg_list");
35099a2dd95SBruce Richardson ret = -1;
35199a2dd95SBruce Richardson } else if (ret > 0) {
35299a2dd95SBruce Richardson ret = (int)wa.segs_allocated;
35399a2dd95SBruce Richardson }
35499a2dd95SBruce Richardson
35599a2dd95SBruce Richardson return ret;
35699a2dd95SBruce Richardson }
35799a2dd95SBruce Richardson
35899a2dd95SBruce Richardson struct rte_memseg *
eal_memalloc_alloc_seg(size_t page_sz,int socket)35999a2dd95SBruce Richardson eal_memalloc_alloc_seg(size_t page_sz, int socket)
36099a2dd95SBruce Richardson {
36199a2dd95SBruce Richardson struct rte_memseg *ms = NULL;
36299a2dd95SBruce Richardson eal_memalloc_alloc_seg_bulk(&ms, 1, page_sz, socket, true);
36399a2dd95SBruce Richardson return ms;
36499a2dd95SBruce Richardson }
36599a2dd95SBruce Richardson
36699a2dd95SBruce Richardson int
eal_memalloc_free_seg_bulk(struct rte_memseg ** ms,int n_segs)36799a2dd95SBruce Richardson eal_memalloc_free_seg_bulk(struct rte_memseg **ms, int n_segs)
36899a2dd95SBruce Richardson {
36999a2dd95SBruce Richardson int seg, ret = 0;
37099a2dd95SBruce Richardson struct internal_config *internal_conf =
37199a2dd95SBruce Richardson eal_get_internal_configuration();
37299a2dd95SBruce Richardson
37399a2dd95SBruce Richardson /* dynamic free not supported in legacy mode */
37499a2dd95SBruce Richardson if (internal_conf->legacy_mem)
37599a2dd95SBruce Richardson return -1;
37699a2dd95SBruce Richardson
37799a2dd95SBruce Richardson for (seg = 0; seg < n_segs; seg++) {
37899a2dd95SBruce Richardson struct rte_memseg *cur = ms[seg];
37999a2dd95SBruce Richardson struct hugepage_info *hi = NULL;
38099a2dd95SBruce Richardson struct free_walk_param wa;
38199a2dd95SBruce Richardson size_t i;
38299a2dd95SBruce Richardson int walk_res;
38399a2dd95SBruce Richardson
38499a2dd95SBruce Richardson /* if this page is marked as unfreeable, fail */
38599a2dd95SBruce Richardson if (cur->flags & RTE_MEMSEG_FLAG_DO_NOT_FREE) {
386*ae67895bSDavid Marchand EAL_LOG(DEBUG, "Page is not allowed to be freed");
38799a2dd95SBruce Richardson ret = -1;
38899a2dd95SBruce Richardson continue;
38999a2dd95SBruce Richardson }
39099a2dd95SBruce Richardson
39199a2dd95SBruce Richardson memset(&wa, 0, sizeof(wa));
39299a2dd95SBruce Richardson
39399a2dd95SBruce Richardson for (i = 0; i < RTE_DIM(internal_conf->hugepage_info); i++) {
39499a2dd95SBruce Richardson hi = &internal_conf->hugepage_info[i];
39599a2dd95SBruce Richardson if (cur->hugepage_sz == hi->hugepage_sz)
39699a2dd95SBruce Richardson break;
39799a2dd95SBruce Richardson }
39899a2dd95SBruce Richardson if (i == RTE_DIM(internal_conf->hugepage_info)) {
399*ae67895bSDavid Marchand EAL_LOG(ERR, "Can't find relevant hugepage_info entry");
40099a2dd95SBruce Richardson ret = -1;
40199a2dd95SBruce Richardson continue;
40299a2dd95SBruce Richardson }
40399a2dd95SBruce Richardson
40499a2dd95SBruce Richardson wa.ms = cur;
40599a2dd95SBruce Richardson wa.hi = hi;
40699a2dd95SBruce Richardson
40799a2dd95SBruce Richardson /* memalloc is locked, so it's safe to use thread-unsafe version
40899a2dd95SBruce Richardson */
40999a2dd95SBruce Richardson walk_res = rte_memseg_list_walk_thread_unsafe(free_seg_walk,
41099a2dd95SBruce Richardson &wa);
41199a2dd95SBruce Richardson if (walk_res == 1)
41299a2dd95SBruce Richardson continue;
41399a2dd95SBruce Richardson if (walk_res == 0)
414*ae67895bSDavid Marchand EAL_LOG(ERR, "Couldn't find memseg list");
41599a2dd95SBruce Richardson ret = -1;
41699a2dd95SBruce Richardson }
41799a2dd95SBruce Richardson return ret;
41899a2dd95SBruce Richardson }
41999a2dd95SBruce Richardson
42099a2dd95SBruce Richardson int
eal_memalloc_free_seg(struct rte_memseg * ms)42199a2dd95SBruce Richardson eal_memalloc_free_seg(struct rte_memseg *ms)
42299a2dd95SBruce Richardson {
42399a2dd95SBruce Richardson return eal_memalloc_free_seg_bulk(&ms, 1);
42499a2dd95SBruce Richardson }
42599a2dd95SBruce Richardson
42699a2dd95SBruce Richardson int
eal_memalloc_sync_with_primary(void)42799a2dd95SBruce Richardson eal_memalloc_sync_with_primary(void)
42899a2dd95SBruce Richardson {
42999a2dd95SBruce Richardson /* No multi-process support. */
43099a2dd95SBruce Richardson EAL_LOG_NOT_IMPLEMENTED();
4317e71c4dcSJie Zhou return -ENOTSUP;
43299a2dd95SBruce Richardson }
43399a2dd95SBruce Richardson
43499a2dd95SBruce Richardson int
eal_memalloc_cleanup(void)43599a2dd95SBruce Richardson eal_memalloc_cleanup(void)
43699a2dd95SBruce Richardson {
43799a2dd95SBruce Richardson /* not implemented */
43899a2dd95SBruce Richardson return 0;
43999a2dd95SBruce Richardson }
44099a2dd95SBruce Richardson
44199a2dd95SBruce Richardson int
eal_memalloc_init(void)44299a2dd95SBruce Richardson eal_memalloc_init(void)
44399a2dd95SBruce Richardson {
44499a2dd95SBruce Richardson /* No action required. */
44599a2dd95SBruce Richardson return 0;
44699a2dd95SBruce Richardson }
447