1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/util.h" 36 #include "spdk/env_dpdk.h" 37 #include "spdk/log.h" 38 39 #include "env_internal.h" 40 41 #include <rte_config.h> 42 #include <rte_cycles.h> 43 #include <rte_malloc.h> 44 #include <rte_mempool.h> 45 #include <rte_memzone.h> 46 #include <rte_version.h> 47 48 static uint64_t 49 virt_to_phys(void *vaddr) 50 { 51 uint64_t ret; 52 53 ret = rte_malloc_virt2iova(vaddr); 54 if (ret != RTE_BAD_IOVA) { 55 return ret; 56 } 57 58 return spdk_vtophys(vaddr, NULL); 59 } 60 61 void * 62 spdk_malloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 63 { 64 void *buf; 65 66 if (flags == 0) { 67 return NULL; 68 } 69 70 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 71 buf = rte_malloc_socket(NULL, size, align, socket_id); 72 if (buf && phys_addr) { 73 #ifdef DEBUG 74 SPDK_ERRLOG("phys_addr param in spdk_malloc() is deprecated\n"); 75 #endif 76 *phys_addr = virt_to_phys(buf); 77 } 78 return buf; 79 } 80 81 void * 82 spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 83 { 84 void *buf; 85 86 if (flags == 0) { 87 return NULL; 88 } 89 90 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 91 buf = rte_zmalloc_socket(NULL, size, align, socket_id); 92 if (buf && phys_addr) { 93 #ifdef DEBUG 94 SPDK_ERRLOG("phys_addr param in spdk_zmalloc() is deprecated\n"); 95 #endif 96 *phys_addr = virt_to_phys(buf); 97 } 98 return buf; 99 } 100 101 void * 102 spdk_realloc(void *buf, size_t size, size_t align) 103 { 104 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 105 return rte_realloc(buf, size, align); 106 } 107 108 void 109 spdk_free(void *buf) 110 { 111 rte_free(buf); 112 } 113 114 void * 115 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 116 { 117 return spdk_malloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 118 } 119 120 void * 121 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 122 { 123 return spdk_zmalloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 124 } 125 126 void * 127 spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr) 128 { 129 return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 130 } 131 132 void * 133 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr) 134 { 135 return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 136 } 137 138 void * 139 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr) 140 { 141 void *new_buf; 142 143 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 144 new_buf = rte_realloc(buf, size, align); 145 if (new_buf && phys_addr) { 146 *phys_addr = virt_to_phys(new_buf); 147 } 148 return new_buf; 149 } 150 151 void 152 spdk_dma_free(void *buf) 153 { 154 spdk_free(buf); 155 } 156 157 void * 158 spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id, 159 unsigned flags, unsigned align) 160 { 161 const struct rte_memzone *mz; 162 unsigned dpdk_flags = 0; 163 164 if ((flags & SPDK_MEMZONE_NO_IOVA_CONTIG) == 0) { 165 dpdk_flags |= RTE_MEMZONE_IOVA_CONTIG; 166 } 167 168 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 169 socket_id = SOCKET_ID_ANY; 170 } 171 172 mz = rte_memzone_reserve_aligned(name, len, socket_id, dpdk_flags, align); 173 174 if (mz != NULL) { 175 memset(mz->addr, 0, len); 176 return mz->addr; 177 } else { 178 return NULL; 179 } 180 } 181 182 void * 183 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags) 184 { 185 return spdk_memzone_reserve_aligned(name, len, socket_id, flags, 186 RTE_CACHE_LINE_SIZE); 187 } 188 189 void * 190 spdk_memzone_lookup(const char *name) 191 { 192 const struct rte_memzone *mz = rte_memzone_lookup(name); 193 194 if (mz != NULL) { 195 return mz->addr; 196 } else { 197 return NULL; 198 } 199 } 200 201 int 202 spdk_memzone_free(const char *name) 203 { 204 const struct rte_memzone *mz = rte_memzone_lookup(name); 205 206 if (mz != NULL) { 207 return rte_memzone_free(mz); 208 } 209 210 return -1; 211 } 212 213 void 214 spdk_memzone_dump(FILE *f) 215 { 216 rte_memzone_dump(f); 217 } 218 219 struct spdk_mempool * 220 spdk_mempool_create_ctor(const char *name, size_t count, 221 size_t ele_size, size_t cache_size, int socket_id, 222 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg) 223 { 224 struct rte_mempool *mp; 225 size_t tmp; 226 227 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 228 socket_id = SOCKET_ID_ANY; 229 } 230 231 /* No more than half of all elements can be in cache */ 232 tmp = (count / 2) / rte_lcore_count(); 233 if (cache_size > tmp) { 234 cache_size = tmp; 235 } 236 237 if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) { 238 cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; 239 } 240 241 mp = rte_mempool_create(name, count, ele_size, cache_size, 242 0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg, 243 socket_id, MEMPOOL_F_NO_IOVA_CONTIG); 244 245 return (struct spdk_mempool *)mp; 246 } 247 248 249 struct spdk_mempool * 250 spdk_mempool_create(const char *name, size_t count, 251 size_t ele_size, size_t cache_size, int socket_id) 252 { 253 return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id, 254 NULL, NULL); 255 } 256 257 char * 258 spdk_mempool_get_name(struct spdk_mempool *mp) 259 { 260 return ((struct rte_mempool *)mp)->name; 261 } 262 263 void 264 spdk_mempool_free(struct spdk_mempool *mp) 265 { 266 rte_mempool_free((struct rte_mempool *)mp); 267 } 268 269 void * 270 spdk_mempool_get(struct spdk_mempool *mp) 271 { 272 void *ele = NULL; 273 int rc; 274 275 rc = rte_mempool_get((struct rte_mempool *)mp, &ele); 276 if (rc != 0) { 277 return NULL; 278 } 279 return ele; 280 } 281 282 int 283 spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 284 { 285 return rte_mempool_get_bulk((struct rte_mempool *)mp, ele_arr, count); 286 } 287 288 void 289 spdk_mempool_put(struct spdk_mempool *mp, void *ele) 290 { 291 rte_mempool_put((struct rte_mempool *)mp, ele); 292 } 293 294 void 295 spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 296 { 297 rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count); 298 } 299 300 size_t 301 spdk_mempool_count(const struct spdk_mempool *pool) 302 { 303 return rte_mempool_avail_count((struct rte_mempool *)pool); 304 } 305 306 uint32_t 307 spdk_mempool_obj_iter(struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, 308 void *obj_cb_arg) 309 { 310 return rte_mempool_obj_iter((struct rte_mempool *)mp, (rte_mempool_obj_cb_t *)obj_cb, 311 obj_cb_arg); 312 } 313 314 struct spdk_mempool * 315 spdk_mempool_lookup(const char *name) 316 { 317 return (struct spdk_mempool *)rte_mempool_lookup(name); 318 } 319 320 bool 321 spdk_process_is_primary(void) 322 { 323 return (rte_eal_process_type() == RTE_PROC_PRIMARY); 324 } 325 326 uint64_t spdk_get_ticks(void) 327 { 328 return rte_get_timer_cycles(); 329 } 330 331 uint64_t spdk_get_ticks_hz(void) 332 { 333 return rte_get_timer_hz(); 334 } 335 336 void spdk_delay_us(unsigned int us) 337 { 338 rte_delay_us(us); 339 } 340 341 void spdk_pause(void) 342 { 343 rte_pause(); 344 } 345 346 void 347 spdk_unaffinitize_thread(void) 348 { 349 rte_cpuset_t new_cpuset, orig_cpuset; 350 long num_cores, i, orig_num_cores; 351 352 CPU_ZERO(&new_cpuset); 353 354 num_cores = sysconf(_SC_NPROCESSORS_CONF); 355 356 /* Create a mask containing all CPUs */ 357 for (i = 0; i < num_cores; i++) { 358 CPU_SET(i, &new_cpuset); 359 } 360 361 rte_thread_get_affinity(&orig_cpuset); 362 orig_num_cores = CPU_COUNT(&orig_cpuset); 363 if (orig_num_cores < num_cores) { 364 for (i = 0; i < orig_num_cores; i++) { 365 if (CPU_ISSET(i, &orig_cpuset)) { 366 CPU_CLR(i, &new_cpuset); 367 } 368 } 369 } 370 371 rte_thread_set_affinity(&new_cpuset); 372 } 373 374 void * 375 spdk_call_unaffinitized(void *cb(void *arg), void *arg) 376 { 377 rte_cpuset_t orig_cpuset; 378 void *ret; 379 380 if (cb == NULL) { 381 return NULL; 382 } 383 384 rte_thread_get_affinity(&orig_cpuset); 385 386 spdk_unaffinitize_thread(); 387 388 ret = cb(arg); 389 390 rte_thread_set_affinity(&orig_cpuset); 391 392 return ret; 393 } 394 395 struct spdk_ring * 396 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id) 397 { 398 char ring_name[64]; 399 static uint32_t ring_num = 0; 400 unsigned flags = RING_F_EXACT_SZ; 401 402 switch (type) { 403 case SPDK_RING_TYPE_SP_SC: 404 flags |= RING_F_SP_ENQ | RING_F_SC_DEQ; 405 break; 406 case SPDK_RING_TYPE_MP_SC: 407 flags |= RING_F_SC_DEQ; 408 break; 409 case SPDK_RING_TYPE_MP_MC: 410 flags |= 0; 411 break; 412 default: 413 return NULL; 414 } 415 416 snprintf(ring_name, sizeof(ring_name), "ring_%u_%d", 417 __atomic_fetch_add(&ring_num, 1, __ATOMIC_RELAXED), getpid()); 418 419 return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags); 420 } 421 422 void 423 spdk_ring_free(struct spdk_ring *ring) 424 { 425 rte_ring_free((struct rte_ring *)ring); 426 } 427 428 size_t 429 spdk_ring_count(struct spdk_ring *ring) 430 { 431 return rte_ring_count((struct rte_ring *)ring); 432 } 433 434 size_t 435 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count, 436 size_t *free_space) 437 { 438 return rte_ring_enqueue_bulk((struct rte_ring *)ring, objs, count, 439 (unsigned int *)free_space); 440 } 441 442 size_t 443 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count) 444 { 445 return rte_ring_dequeue_burst((struct rte_ring *)ring, objs, count, NULL); 446 } 447 448 void 449 spdk_env_dpdk_dump_mem_stats(FILE *file) 450 { 451 fprintf(file, "DPDK memory size %" PRIu64 "\n", rte_eal_get_physmem_size()); 452 fprintf(file, "DPDK memory layout\n"); 453 rte_dump_physmem_layout(file); 454 fprintf(file, "DPDK memzones.\n"); 455 rte_memzone_dump(file); 456 fprintf(file, "DPDK mempools.\n"); 457 rte_mempool_list_dump(file); 458 fprintf(file, "DPDK malloc stats.\n"); 459 rte_malloc_dump_stats(file, NULL); 460 fprintf(file, "DPDK malloc heaps.\n"); 461 rte_malloc_dump_heaps(file); 462 } 463