1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/util.h" 36 #include "spdk/env_dpdk.h" 37 #include "spdk/log.h" 38 39 #include "env_internal.h" 40 41 #include <rte_config.h> 42 #include <rte_cycles.h> 43 #include <rte_malloc.h> 44 #include <rte_mempool.h> 45 #include <rte_memzone.h> 46 #include <rte_version.h> 47 48 static uint64_t 49 virt_to_phys(void *vaddr) 50 { 51 uint64_t ret; 52 53 ret = rte_malloc_virt2iova(vaddr); 54 if (ret != RTE_BAD_IOVA) { 55 return ret; 56 } 57 58 return spdk_vtophys(vaddr, NULL); 59 } 60 61 void * 62 spdk_malloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 63 { 64 void *buf; 65 66 if (flags == 0) { 67 return NULL; 68 } 69 70 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 71 buf = rte_malloc_socket(NULL, size, align, socket_id); 72 if (buf && phys_addr) { 73 #ifdef DEBUG 74 SPDK_ERRLOG("phys_addr param in spdk_*malloc() is deprecated\n"); 75 #endif 76 *phys_addr = virt_to_phys(buf); 77 } 78 return buf; 79 } 80 81 void * 82 spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 83 { 84 void *buf = spdk_malloc(size, align, phys_addr, socket_id, flags); 85 if (buf) { 86 memset(buf, 0, size); 87 } 88 return buf; 89 } 90 91 void * 92 spdk_realloc(void *buf, size_t size, size_t align) 93 { 94 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 95 return rte_realloc(buf, size, align); 96 } 97 98 void 99 spdk_free(void *buf) 100 { 101 rte_free(buf); 102 } 103 104 void * 105 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 106 { 107 return spdk_malloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 108 } 109 110 void * 111 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 112 { 113 return spdk_zmalloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 114 } 115 116 void * 117 spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr) 118 { 119 return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 120 } 121 122 void * 123 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr) 124 { 125 return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 126 } 127 128 void * 129 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr) 130 { 131 void *new_buf; 132 133 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 134 new_buf = rte_realloc(buf, size, align); 135 if (new_buf && phys_addr) { 136 *phys_addr = virt_to_phys(new_buf); 137 } 138 return new_buf; 139 } 140 141 void 142 spdk_dma_free(void *buf) 143 { 144 spdk_free(buf); 145 } 146 147 void * 148 spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id, 149 unsigned flags, unsigned align) 150 { 151 const struct rte_memzone *mz; 152 unsigned dpdk_flags = 0; 153 154 if ((flags & SPDK_MEMZONE_NO_IOVA_CONTIG) == 0) { 155 dpdk_flags |= RTE_MEMZONE_IOVA_CONTIG; 156 } 157 158 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 159 socket_id = SOCKET_ID_ANY; 160 } 161 162 mz = rte_memzone_reserve_aligned(name, len, socket_id, dpdk_flags, align); 163 164 if (mz != NULL) { 165 memset(mz->addr, 0, len); 166 return mz->addr; 167 } else { 168 return NULL; 169 } 170 } 171 172 void * 173 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags) 174 { 175 return spdk_memzone_reserve_aligned(name, len, socket_id, flags, 176 RTE_CACHE_LINE_SIZE); 177 } 178 179 void * 180 spdk_memzone_lookup(const char *name) 181 { 182 const struct rte_memzone *mz = rte_memzone_lookup(name); 183 184 if (mz != NULL) { 185 return mz->addr; 186 } else { 187 return NULL; 188 } 189 } 190 191 int 192 spdk_memzone_free(const char *name) 193 { 194 const struct rte_memzone *mz = rte_memzone_lookup(name); 195 196 if (mz != NULL) { 197 return rte_memzone_free(mz); 198 } 199 200 return -1; 201 } 202 203 void 204 spdk_memzone_dump(FILE *f) 205 { 206 rte_memzone_dump(f); 207 } 208 209 struct spdk_mempool * 210 spdk_mempool_create_ctor(const char *name, size_t count, 211 size_t ele_size, size_t cache_size, int socket_id, 212 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg) 213 { 214 struct rte_mempool *mp; 215 size_t tmp; 216 217 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 218 socket_id = SOCKET_ID_ANY; 219 } 220 221 /* No more than half of all elements can be in cache */ 222 tmp = (count / 2) / rte_lcore_count(); 223 if (cache_size > tmp) { 224 cache_size = tmp; 225 } 226 227 if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) { 228 cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; 229 } 230 231 mp = rte_mempool_create(name, count, ele_size, cache_size, 232 0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg, 233 socket_id, MEMPOOL_F_NO_IOVA_CONTIG); 234 235 return (struct spdk_mempool *)mp; 236 } 237 238 239 struct spdk_mempool * 240 spdk_mempool_create(const char *name, size_t count, 241 size_t ele_size, size_t cache_size, int socket_id) 242 { 243 return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id, 244 NULL, NULL); 245 } 246 247 char * 248 spdk_mempool_get_name(struct spdk_mempool *mp) 249 { 250 return ((struct rte_mempool *)mp)->name; 251 } 252 253 void 254 spdk_mempool_free(struct spdk_mempool *mp) 255 { 256 rte_mempool_free((struct rte_mempool *)mp); 257 } 258 259 void * 260 spdk_mempool_get(struct spdk_mempool *mp) 261 { 262 void *ele = NULL; 263 int rc; 264 265 rc = rte_mempool_get((struct rte_mempool *)mp, &ele); 266 if (rc != 0) { 267 return NULL; 268 } 269 return ele; 270 } 271 272 int 273 spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 274 { 275 return rte_mempool_get_bulk((struct rte_mempool *)mp, ele_arr, count); 276 } 277 278 void 279 spdk_mempool_put(struct spdk_mempool *mp, void *ele) 280 { 281 rte_mempool_put((struct rte_mempool *)mp, ele); 282 } 283 284 void 285 spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 286 { 287 rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count); 288 } 289 290 size_t 291 spdk_mempool_count(const struct spdk_mempool *pool) 292 { 293 return rte_mempool_avail_count((struct rte_mempool *)pool); 294 } 295 296 uint32_t 297 spdk_mempool_obj_iter(struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, 298 void *obj_cb_arg) 299 { 300 return rte_mempool_obj_iter((struct rte_mempool *)mp, (rte_mempool_obj_cb_t *)obj_cb, 301 obj_cb_arg); 302 } 303 304 struct spdk_mempool * 305 spdk_mempool_lookup(const char *name) 306 { 307 return (struct spdk_mempool *)rte_mempool_lookup(name); 308 } 309 310 bool 311 spdk_process_is_primary(void) 312 { 313 return (rte_eal_process_type() == RTE_PROC_PRIMARY); 314 } 315 316 uint64_t spdk_get_ticks(void) 317 { 318 return rte_get_timer_cycles(); 319 } 320 321 uint64_t spdk_get_ticks_hz(void) 322 { 323 return rte_get_timer_hz(); 324 } 325 326 void spdk_delay_us(unsigned int us) 327 { 328 rte_delay_us(us); 329 } 330 331 void spdk_pause(void) 332 { 333 rte_pause(); 334 } 335 336 void 337 spdk_unaffinitize_thread(void) 338 { 339 rte_cpuset_t new_cpuset, orig_cpuset; 340 long num_cores, i, orig_num_cores; 341 342 CPU_ZERO(&new_cpuset); 343 344 num_cores = sysconf(_SC_NPROCESSORS_CONF); 345 346 /* Create a mask containing all CPUs */ 347 for (i = 0; i < num_cores; i++) { 348 CPU_SET(i, &new_cpuset); 349 } 350 351 rte_thread_get_affinity(&orig_cpuset); 352 orig_num_cores = CPU_COUNT(&orig_cpuset); 353 if (orig_num_cores < num_cores) { 354 for (i = 0; i < orig_num_cores; i++) { 355 if (CPU_ISSET(i, &orig_cpuset)) { 356 CPU_CLR(i, &new_cpuset); 357 } 358 } 359 } 360 361 rte_thread_set_affinity(&new_cpuset); 362 } 363 364 void * 365 spdk_call_unaffinitized(void *cb(void *arg), void *arg) 366 { 367 rte_cpuset_t orig_cpuset; 368 void *ret; 369 370 if (cb == NULL) { 371 return NULL; 372 } 373 374 rte_thread_get_affinity(&orig_cpuset); 375 376 spdk_unaffinitize_thread(); 377 378 ret = cb(arg); 379 380 rte_thread_set_affinity(&orig_cpuset); 381 382 return ret; 383 } 384 385 struct spdk_ring * 386 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id) 387 { 388 char ring_name[64]; 389 static uint32_t ring_num = 0; 390 unsigned flags = RING_F_EXACT_SZ; 391 392 switch (type) { 393 case SPDK_RING_TYPE_SP_SC: 394 flags |= RING_F_SP_ENQ | RING_F_SC_DEQ; 395 break; 396 case SPDK_RING_TYPE_MP_SC: 397 flags |= RING_F_SC_DEQ; 398 break; 399 case SPDK_RING_TYPE_MP_MC: 400 flags |= 0; 401 break; 402 default: 403 return NULL; 404 } 405 406 snprintf(ring_name, sizeof(ring_name), "ring_%u_%d", 407 __atomic_fetch_add(&ring_num, 1, __ATOMIC_RELAXED), getpid()); 408 409 return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags); 410 } 411 412 void 413 spdk_ring_free(struct spdk_ring *ring) 414 { 415 rte_ring_free((struct rte_ring *)ring); 416 } 417 418 size_t 419 spdk_ring_count(struct spdk_ring *ring) 420 { 421 return rte_ring_count((struct rte_ring *)ring); 422 } 423 424 size_t 425 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count, 426 size_t *free_space) 427 { 428 return rte_ring_enqueue_bulk((struct rte_ring *)ring, objs, count, 429 (unsigned int *)free_space); 430 } 431 432 size_t 433 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count) 434 { 435 return rte_ring_dequeue_burst((struct rte_ring *)ring, objs, count, NULL); 436 } 437 438 void 439 spdk_env_dpdk_dump_mem_stats(FILE *file) 440 { 441 fprintf(file, "DPDK memory size %" PRIu64 "\n", rte_eal_get_physmem_size()); 442 fprintf(file, "DPDK memory layout\n"); 443 rte_dump_physmem_layout(file); 444 fprintf(file, "DPDK memzones.\n"); 445 rte_memzone_dump(file); 446 fprintf(file, "DPDK mempools.\n"); 447 rte_mempool_list_dump(file); 448 fprintf(file, "DPDK malloc stats.\n"); 449 rte_malloc_dump_stats(file, NULL); 450 fprintf(file, "DPDK malloc heaps.\n"); 451 rte_malloc_dump_heaps(file); 452 } 453