1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdint.h> 7 #include <inttypes.h> 8 #include <string.h> 9 #include <errno.h> 10 11 #include <eal_trace_internal.h> 12 #include <rte_log.h> 13 #include <rte_memory.h> 14 #include <rte_memzone.h> 15 #include <rte_eal.h> 16 #include <rte_errno.h> 17 #include <rte_string_fns.h> 18 #include <rte_common.h> 19 20 #include "malloc_heap.h" 21 #include "malloc_elem.h" 22 #include "eal_private.h" 23 #include "eal_memcfg.h" 24 25 /* Default count used until rte_memzone_max_set() is called */ 26 #define DEFAULT_MAX_MEMZONE_COUNT 2560 27 28 int 29 rte_memzone_max_set(size_t max) 30 { 31 struct rte_mem_config *mcfg; 32 33 if (eal_get_internal_configuration()->init_complete > 0) { 34 EAL_LOG(ERR, "Max memzone cannot be set after EAL init"); 35 return -1; 36 } 37 38 mcfg = rte_eal_get_configuration()->mem_config; 39 if (mcfg == NULL) { 40 EAL_LOG(ERR, "Failed to set max memzone count"); 41 return -1; 42 } 43 44 mcfg->max_memzone = max; 45 46 return 0; 47 } 48 49 size_t 50 rte_memzone_max_get(void) 51 { 52 struct rte_mem_config *mcfg; 53 54 mcfg = rte_eal_get_configuration()->mem_config; 55 if (mcfg == NULL || mcfg->max_memzone == 0) 56 return DEFAULT_MAX_MEMZONE_COUNT; 57 58 return mcfg->max_memzone; 59 } 60 61 static inline const struct rte_memzone * 62 memzone_lookup_thread_unsafe(const char *name) 63 { 64 struct rte_mem_config *mcfg; 65 struct rte_fbarray *arr; 66 const struct rte_memzone *mz; 67 int i = 0; 68 69 /* get pointer to global configuration */ 70 mcfg = rte_eal_get_configuration()->mem_config; 71 arr = &mcfg->memzones; 72 73 /* 74 * the algorithm is not optimal (linear), but there are few 75 * zones and this function should be called at init only 76 */ 77 i = rte_fbarray_find_next_used(arr, 0); 78 while (i >= 0) { 79 mz = rte_fbarray_get(arr, i); 80 if (mz->addr != NULL && 81 !strncmp(name, mz->name, RTE_MEMZONE_NAMESIZE)) 82 return mz; 83 i = rte_fbarray_find_next_used(arr, i + 1); 84 } 85 return NULL; 86 } 87 88 #define MEMZONE_KNOWN_FLAGS (RTE_MEMZONE_2MB \ 89 | RTE_MEMZONE_1GB \ 90 | RTE_MEMZONE_16MB \ 91 | RTE_MEMZONE_16GB \ 92 | RTE_MEMZONE_256KB \ 93 | RTE_MEMZONE_256MB \ 94 | RTE_MEMZONE_512MB \ 95 | RTE_MEMZONE_4GB \ 96 | RTE_MEMZONE_SIZE_HINT_ONLY \ 97 | RTE_MEMZONE_IOVA_CONTIG \ 98 ) 99 100 static const struct rte_memzone * 101 memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, 102 int socket_id, unsigned int flags, unsigned int align, 103 unsigned int bound) 104 { 105 struct rte_memzone *mz; 106 struct rte_mem_config *mcfg; 107 struct rte_fbarray *arr; 108 void *mz_addr; 109 size_t requested_len; 110 int mz_idx; 111 bool contig; 112 113 /* get pointer to global configuration */ 114 mcfg = rte_eal_get_configuration()->mem_config; 115 arr = &mcfg->memzones; 116 117 /* no more room in config */ 118 if (arr->count >= arr->len) { 119 EAL_LOG(ERR, 120 "%s(): Number of requested memzone segments exceeds maximum " 121 "%u", __func__, arr->len); 122 123 rte_errno = ENOSPC; 124 return NULL; 125 } 126 127 if (strlen(name) > sizeof(mz->name) - 1) { 128 EAL_LOG(DEBUG, "%s(): memzone <%s>: name too long", 129 __func__, name); 130 rte_errno = ENAMETOOLONG; 131 return NULL; 132 } 133 134 /* zone already exist */ 135 if ((memzone_lookup_thread_unsafe(name)) != NULL) { 136 EAL_LOG(DEBUG, "%s(): memzone <%s> already exists", 137 __func__, name); 138 rte_errno = EEXIST; 139 return NULL; 140 } 141 142 /* if alignment is not a power of two */ 143 if (align && !rte_is_power_of_2(align)) { 144 EAL_LOG(ERR, "%s(): Invalid alignment: %u", __func__, 145 align); 146 rte_errno = EINVAL; 147 return NULL; 148 } 149 150 /* alignment less than cache size is not allowed */ 151 if (align < RTE_CACHE_LINE_SIZE) 152 align = RTE_CACHE_LINE_SIZE; 153 154 /* align length on cache boundary. Check for overflow before doing so */ 155 if (len > SIZE_MAX - RTE_CACHE_LINE_MASK) { 156 rte_errno = EINVAL; /* requested size too big */ 157 return NULL; 158 } 159 160 len = RTE_ALIGN_CEIL(len, RTE_CACHE_LINE_SIZE); 161 162 /* save minimal requested length */ 163 requested_len = RTE_MAX((size_t)RTE_CACHE_LINE_SIZE, len); 164 165 /* check that boundary condition is valid */ 166 if (bound != 0 && (requested_len > bound || !rte_is_power_of_2(bound))) { 167 rte_errno = EINVAL; 168 return NULL; 169 } 170 171 if ((socket_id != SOCKET_ID_ANY) && socket_id < 0) { 172 rte_errno = EINVAL; 173 return NULL; 174 } 175 176 if ((flags & ~MEMZONE_KNOWN_FLAGS) != 0) { 177 rte_errno = EINVAL; 178 return NULL; 179 } 180 181 /* only set socket to SOCKET_ID_ANY if we aren't allocating for an 182 * external heap. 183 */ 184 if (!rte_eal_has_hugepages() && socket_id < RTE_MAX_NUMA_NODES) 185 socket_id = SOCKET_ID_ANY; 186 187 contig = (flags & RTE_MEMZONE_IOVA_CONTIG) != 0; 188 /* malloc only cares about size flags, remove contig flag from flags */ 189 flags &= ~RTE_MEMZONE_IOVA_CONTIG; 190 191 if (len == 0 && bound == 0) { 192 /* no size constraints were placed, so use malloc elem len */ 193 requested_len = 0; 194 mz_addr = malloc_heap_alloc_biggest(socket_id, flags, align, contig); 195 } else { 196 if (len == 0) 197 requested_len = bound; 198 /* allocate memory on heap */ 199 mz_addr = malloc_heap_alloc(requested_len, socket_id, flags, align, bound, contig); 200 } 201 if (mz_addr == NULL) { 202 rte_errno = ENOMEM; 203 return NULL; 204 } 205 206 struct malloc_elem *elem = malloc_elem_from_data(mz_addr); 207 208 /* fill the zone in config */ 209 mz_idx = rte_fbarray_find_next_free(arr, 0); 210 211 if (mz_idx < 0) { 212 mz = NULL; 213 } else { 214 rte_fbarray_set_used(arr, mz_idx); 215 mz = rte_fbarray_get(arr, mz_idx); 216 } 217 218 if (mz == NULL) { 219 EAL_LOG(ERR, "%s(): Cannot find free memzone", __func__); 220 malloc_heap_free(elem); 221 rte_errno = ENOSPC; 222 return NULL; 223 } 224 225 strlcpy(mz->name, name, sizeof(mz->name)); 226 mz->iova = rte_malloc_virt2iova(mz_addr); 227 mz->addr = mz_addr; 228 mz->len = requested_len == 0 ? 229 elem->size - elem->pad - MALLOC_ELEM_OVERHEAD : 230 requested_len; 231 mz->hugepage_sz = elem->msl->page_sz; 232 mz->socket_id = elem->msl->socket_id; 233 mz->flags = 0; 234 235 return mz; 236 } 237 238 static const struct rte_memzone * 239 rte_memzone_reserve_thread_safe(const char *name, size_t len, int socket_id, 240 unsigned int flags, unsigned int align, unsigned int bound) 241 { 242 struct rte_mem_config *mcfg; 243 const struct rte_memzone *mz = NULL; 244 245 /* get pointer to global configuration */ 246 mcfg = rte_eal_get_configuration()->mem_config; 247 248 rte_rwlock_write_lock(&mcfg->mlock); 249 250 mz = memzone_reserve_aligned_thread_unsafe( 251 name, len, socket_id, flags, align, bound); 252 253 rte_eal_trace_memzone_reserve(name, len, socket_id, flags, align, 254 bound, mz); 255 256 rte_rwlock_write_unlock(&mcfg->mlock); 257 258 return mz; 259 } 260 261 /* 262 * Return a pointer to a correctly filled memzone descriptor (with a 263 * specified alignment and boundary). If the allocation cannot be done, 264 * return NULL. 265 */ 266 const struct rte_memzone * 267 rte_memzone_reserve_bounded(const char *name, size_t len, int socket_id, 268 unsigned flags, unsigned align, unsigned bound) 269 { 270 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, 271 align, bound); 272 } 273 274 /* 275 * Return a pointer to a correctly filled memzone descriptor (with a 276 * specified alignment). If the allocation cannot be done, return NULL. 277 */ 278 const struct rte_memzone * 279 rte_memzone_reserve_aligned(const char *name, size_t len, int socket_id, 280 unsigned flags, unsigned align) 281 { 282 return rte_memzone_reserve_thread_safe(name, len, socket_id, flags, 283 align, 0); 284 } 285 286 /* 287 * Return a pointer to a correctly filled memzone descriptor. If the 288 * allocation cannot be done, return NULL. 289 */ 290 const struct rte_memzone * 291 rte_memzone_reserve(const char *name, size_t len, int socket_id, 292 unsigned flags) 293 { 294 return rte_memzone_reserve_thread_safe(name, len, socket_id, 295 flags, RTE_CACHE_LINE_SIZE, 0); 296 } 297 298 int 299 rte_memzone_free(const struct rte_memzone *mz) 300 { 301 char name[RTE_MEMZONE_NAMESIZE]; 302 struct rte_mem_config *mcfg; 303 struct rte_fbarray *arr; 304 struct rte_memzone *found_mz; 305 int ret = 0; 306 void *addr = NULL; 307 unsigned idx; 308 309 if (mz == NULL) 310 return -EINVAL; 311 312 rte_strlcpy(name, mz->name, RTE_MEMZONE_NAMESIZE); 313 mcfg = rte_eal_get_configuration()->mem_config; 314 arr = &mcfg->memzones; 315 316 rte_rwlock_write_lock(&mcfg->mlock); 317 318 idx = rte_fbarray_find_idx(arr, mz); 319 found_mz = rte_fbarray_get(arr, idx); 320 321 if (found_mz == NULL) { 322 ret = -EINVAL; 323 } else if (found_mz->addr == NULL) { 324 EAL_LOG(ERR, "Memzone is not allocated"); 325 ret = -EINVAL; 326 } else { 327 addr = found_mz->addr; 328 memset(found_mz, 0, sizeof(*found_mz)); 329 rte_fbarray_set_free(arr, idx); 330 } 331 332 rte_rwlock_write_unlock(&mcfg->mlock); 333 334 rte_eal_trace_memzone_free(name, addr, ret); 335 336 rte_free(addr); 337 338 return ret; 339 } 340 341 /* 342 * Lookup for the memzone identified by the given name 343 */ 344 const struct rte_memzone * 345 rte_memzone_lookup(const char *name) 346 { 347 struct rte_mem_config *mcfg; 348 const struct rte_memzone *memzone = NULL; 349 350 mcfg = rte_eal_get_configuration()->mem_config; 351 352 rte_rwlock_read_lock(&mcfg->mlock); 353 354 memzone = memzone_lookup_thread_unsafe(name); 355 356 rte_rwlock_read_unlock(&mcfg->mlock); 357 358 rte_eal_trace_memzone_lookup(name, memzone); 359 return memzone; 360 } 361 362 struct memzone_info { 363 FILE *f; 364 uint64_t total_size; 365 }; 366 367 static void 368 dump_memzone(const struct rte_memzone *mz, void *arg) 369 { 370 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; 371 struct rte_memseg_list *msl = NULL; 372 struct memzone_info *info = arg; 373 void *cur_addr, *mz_end; 374 struct rte_memseg *ms; 375 int mz_idx, ms_idx; 376 FILE *f = info->f; 377 size_t page_sz; 378 379 mz_idx = rte_fbarray_find_idx(&mcfg->memzones, mz); 380 info->total_size += mz->len; 381 382 fprintf(f, "Zone %u: name:<%s>, len:0x%zx, virt:%p, " 383 "socket_id:%"PRId32", flags:%"PRIx32"\n", 384 mz_idx, 385 mz->name, 386 mz->len, 387 mz->addr, 388 mz->socket_id, 389 mz->flags); 390 391 /* go through each page occupied by this memzone */ 392 msl = rte_mem_virt2memseg_list(mz->addr); 393 if (!msl) { 394 EAL_LOG(DEBUG, "Skipping bad memzone"); 395 return; 396 } 397 page_sz = (size_t)mz->hugepage_sz; 398 cur_addr = RTE_PTR_ALIGN_FLOOR(mz->addr, page_sz); 399 mz_end = RTE_PTR_ADD(cur_addr, mz->len); 400 401 fprintf(f, "physical segments used:\n"); 402 ms_idx = RTE_PTR_DIFF(mz->addr, msl->base_va) / page_sz; 403 ms = rte_fbarray_get(&msl->memseg_arr, ms_idx); 404 405 do { 406 fprintf(f, " addr: %p iova: 0x%" PRIx64 " " 407 "len: 0x%zx " 408 "pagesz: 0x%zx\n", 409 cur_addr, ms->iova, ms->len, page_sz); 410 411 /* advance VA to next page */ 412 cur_addr = RTE_PTR_ADD(cur_addr, page_sz); 413 414 /* memzones occupy contiguous segments */ 415 ++ms; 416 } while (cur_addr < mz_end); 417 } 418 419 /* Dump all reserved memory zones on console */ 420 void 421 rte_memzone_dump(FILE *f) 422 { 423 struct memzone_info info = { .f = f }; 424 425 rte_memzone_walk(dump_memzone, &info); 426 fprintf(f, "Total Memory Zones size = %"PRIu64"M\n", 427 info.total_size / (1024 * 1024)); 428 } 429 430 /* 431 * Init the memzone subsystem 432 */ 433 int 434 rte_eal_memzone_init(void) 435 { 436 struct rte_mem_config *mcfg; 437 int ret = 0; 438 439 /* get pointer to global configuration */ 440 mcfg = rte_eal_get_configuration()->mem_config; 441 442 rte_rwlock_write_lock(&mcfg->mlock); 443 444 if (rte_eal_process_type() == RTE_PROC_PRIMARY && 445 rte_fbarray_init(&mcfg->memzones, "memzone", 446 rte_memzone_max_get(), sizeof(struct rte_memzone))) { 447 EAL_LOG(ERR, "Cannot allocate memzone list"); 448 ret = -1; 449 } else if (rte_eal_process_type() == RTE_PROC_SECONDARY && 450 rte_fbarray_attach(&mcfg->memzones)) { 451 EAL_LOG(ERR, "Cannot attach to memzone list"); 452 ret = -1; 453 } 454 455 rte_rwlock_write_unlock(&mcfg->mlock); 456 457 return ret; 458 } 459 460 /* Walk all reserved memory zones */ 461 void rte_memzone_walk(void (*func)(const struct rte_memzone *, void *), 462 void *arg) 463 { 464 struct rte_mem_config *mcfg; 465 struct rte_fbarray *arr; 466 int i; 467 468 mcfg = rte_eal_get_configuration()->mem_config; 469 arr = &mcfg->memzones; 470 471 rte_rwlock_read_lock(&mcfg->mlock); 472 i = rte_fbarray_find_next_used(arr, 0); 473 while (i >= 0) { 474 struct rte_memzone *mz = rte_fbarray_get(arr, i); 475 (*func)(mz, arg); 476 i = rte_fbarray_find_next_used(arr, i + 1); 477 } 478 rte_rwlock_read_unlock(&mcfg->mlock); 479 } 480