1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 #include "spdk/util.h" 36 37 #include "env_internal.h" 38 39 #include <rte_config.h> 40 #include <rte_cycles.h> 41 #include <rte_malloc.h> 42 #include <rte_mempool.h> 43 #include <rte_memzone.h> 44 #include <rte_version.h> 45 46 static uint64_t 47 virt_to_phys(void *vaddr) 48 { 49 uint64_t ret; 50 51 ret = rte_malloc_virt2iova(vaddr); 52 if (ret != RTE_BAD_IOVA) { 53 return ret; 54 } 55 56 return spdk_vtophys(vaddr, NULL); 57 } 58 59 void * 60 spdk_malloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 61 { 62 void *buf; 63 64 if (flags == 0) { 65 return NULL; 66 } 67 68 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 69 buf = rte_malloc_socket(NULL, size, align, socket_id); 70 if (buf && phys_addr) { 71 #ifdef DEBUG 72 fprintf(stderr, "phys_addr param in spdk_*malloc() is deprecated\n"); 73 #endif 74 *phys_addr = virt_to_phys(buf); 75 } 76 return buf; 77 } 78 79 void * 80 spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags) 81 { 82 void *buf = spdk_malloc(size, align, phys_addr, socket_id, flags); 83 if (buf) { 84 memset(buf, 0, size); 85 } 86 return buf; 87 } 88 89 void * 90 spdk_realloc(void *buf, size_t size, size_t align) 91 { 92 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 93 return rte_realloc(buf, size, align); 94 } 95 96 void 97 spdk_free(void *buf) 98 { 99 rte_free(buf); 100 } 101 102 void * 103 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 104 { 105 return spdk_malloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 106 } 107 108 void * 109 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id) 110 { 111 return spdk_zmalloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE)); 112 } 113 114 void * 115 spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr) 116 { 117 return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 118 } 119 120 void * 121 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr) 122 { 123 return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY); 124 } 125 126 void * 127 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr) 128 { 129 void *new_buf; 130 131 align = spdk_max(align, RTE_CACHE_LINE_SIZE); 132 new_buf = rte_realloc(buf, size, align); 133 if (new_buf && phys_addr) { 134 *phys_addr = virt_to_phys(new_buf); 135 } 136 return new_buf; 137 } 138 139 void 140 spdk_dma_free(void *buf) 141 { 142 spdk_free(buf); 143 } 144 145 void * 146 spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id, 147 unsigned flags, unsigned align) 148 { 149 const struct rte_memzone *mz; 150 unsigned dpdk_flags = 0; 151 152 #if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0) 153 /* Older DPDKs do not offer such flag since their 154 * memzones are iova-contiguous by default. 155 */ 156 if ((flags & SPDK_MEMZONE_NO_IOVA_CONTIG) == 0) { 157 dpdk_flags |= RTE_MEMZONE_IOVA_CONTIG; 158 } 159 #endif 160 161 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 162 socket_id = SOCKET_ID_ANY; 163 } 164 165 mz = rte_memzone_reserve_aligned(name, len, socket_id, dpdk_flags, align); 166 167 if (mz != NULL) { 168 memset(mz->addr, 0, len); 169 return mz->addr; 170 } else { 171 return NULL; 172 } 173 } 174 175 void * 176 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags) 177 { 178 return spdk_memzone_reserve_aligned(name, len, socket_id, flags, 179 RTE_CACHE_LINE_SIZE); 180 } 181 182 void * 183 spdk_memzone_lookup(const char *name) 184 { 185 const struct rte_memzone *mz = rte_memzone_lookup(name); 186 187 if (mz != NULL) { 188 return mz->addr; 189 } else { 190 return NULL; 191 } 192 } 193 194 int 195 spdk_memzone_free(const char *name) 196 { 197 const struct rte_memzone *mz = rte_memzone_lookup(name); 198 199 if (mz != NULL) { 200 return rte_memzone_free(mz); 201 } 202 203 return -1; 204 } 205 206 void 207 spdk_memzone_dump(FILE *f) 208 { 209 rte_memzone_dump(f); 210 } 211 212 struct spdk_mempool * 213 spdk_mempool_create_ctor(const char *name, size_t count, 214 size_t ele_size, size_t cache_size, int socket_id, 215 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg) 216 { 217 struct rte_mempool *mp; 218 size_t tmp; 219 220 if (socket_id == SPDK_ENV_SOCKET_ID_ANY) { 221 socket_id = SOCKET_ID_ANY; 222 } 223 224 /* No more than half of all elements can be in cache */ 225 tmp = (count / 2) / rte_lcore_count(); 226 if (cache_size > tmp) { 227 cache_size = tmp; 228 } 229 230 if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) { 231 cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE; 232 } 233 234 mp = rte_mempool_create(name, count, ele_size, cache_size, 235 0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg, 236 socket_id, MEMPOOL_F_NO_PHYS_CONTIG); 237 238 return (struct spdk_mempool *)mp; 239 } 240 241 242 struct spdk_mempool * 243 spdk_mempool_create(const char *name, size_t count, 244 size_t ele_size, size_t cache_size, int socket_id) 245 { 246 return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id, 247 NULL, NULL); 248 } 249 250 char * 251 spdk_mempool_get_name(struct spdk_mempool *mp) 252 { 253 return ((struct rte_mempool *)mp)->name; 254 } 255 256 void 257 spdk_mempool_free(struct spdk_mempool *mp) 258 { 259 rte_mempool_free((struct rte_mempool *)mp); 260 } 261 262 void * 263 spdk_mempool_get(struct spdk_mempool *mp) 264 { 265 void *ele = NULL; 266 int rc; 267 268 rc = rte_mempool_get((struct rte_mempool *)mp, &ele); 269 if (rc != 0) { 270 return NULL; 271 } 272 return ele; 273 } 274 275 int 276 spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 277 { 278 return rte_mempool_get_bulk((struct rte_mempool *)mp, ele_arr, count); 279 } 280 281 void 282 spdk_mempool_put(struct spdk_mempool *mp, void *ele) 283 { 284 rte_mempool_put((struct rte_mempool *)mp, ele); 285 } 286 287 void 288 spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count) 289 { 290 rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count); 291 } 292 293 size_t 294 spdk_mempool_count(const struct spdk_mempool *pool) 295 { 296 return rte_mempool_avail_count((struct rte_mempool *)pool); 297 } 298 299 uint32_t 300 spdk_mempool_obj_iter(struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb, 301 void *obj_cb_arg) 302 { 303 return rte_mempool_obj_iter((struct rte_mempool *)mp, (rte_mempool_obj_cb_t *)obj_cb, 304 obj_cb_arg); 305 } 306 307 struct spdk_mempool * 308 spdk_mempool_lookup(const char *name) 309 { 310 return (struct spdk_mempool *)rte_mempool_lookup(name); 311 } 312 313 bool 314 spdk_process_is_primary(void) 315 { 316 return (rte_eal_process_type() == RTE_PROC_PRIMARY); 317 } 318 319 uint64_t spdk_get_ticks(void) 320 { 321 return rte_get_timer_cycles(); 322 } 323 324 uint64_t spdk_get_ticks_hz(void) 325 { 326 return rte_get_timer_hz(); 327 } 328 329 void spdk_delay_us(unsigned int us) 330 { 331 rte_delay_us(us); 332 } 333 334 void spdk_pause(void) 335 { 336 rte_pause(); 337 } 338 339 void 340 spdk_unaffinitize_thread(void) 341 { 342 rte_cpuset_t new_cpuset, orig_cpuset; 343 long num_cores, i, orig_num_cores; 344 345 CPU_ZERO(&new_cpuset); 346 347 num_cores = sysconf(_SC_NPROCESSORS_CONF); 348 349 /* Create a mask containing all CPUs */ 350 for (i = 0; i < num_cores; i++) { 351 CPU_SET(i, &new_cpuset); 352 } 353 354 rte_thread_get_affinity(&orig_cpuset); 355 orig_num_cores = CPU_COUNT(&orig_cpuset); 356 if (orig_num_cores < num_cores) { 357 for (i = 0; i < orig_num_cores; i++) { 358 if (CPU_ISSET(i, &orig_cpuset)) { 359 CPU_CLR(i, &new_cpuset); 360 } 361 } 362 } 363 364 rte_thread_set_affinity(&new_cpuset); 365 } 366 367 void * 368 spdk_call_unaffinitized(void *cb(void *arg), void *arg) 369 { 370 rte_cpuset_t orig_cpuset; 371 void *ret; 372 373 if (cb == NULL) { 374 return NULL; 375 } 376 377 rte_thread_get_affinity(&orig_cpuset); 378 379 spdk_unaffinitize_thread(); 380 381 ret = cb(arg); 382 383 rte_thread_set_affinity(&orig_cpuset); 384 385 return ret; 386 } 387 388 struct spdk_ring * 389 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id) 390 { 391 char ring_name[64]; 392 static uint32_t ring_num = 0; 393 unsigned flags = RING_F_EXACT_SZ; 394 395 switch (type) { 396 case SPDK_RING_TYPE_SP_SC: 397 flags |= RING_F_SP_ENQ | RING_F_SC_DEQ; 398 break; 399 case SPDK_RING_TYPE_MP_SC: 400 flags |= RING_F_SC_DEQ; 401 break; 402 case SPDK_RING_TYPE_MP_MC: 403 flags |= 0; 404 break; 405 default: 406 return NULL; 407 } 408 409 snprintf(ring_name, sizeof(ring_name), "ring_%u_%d", 410 __atomic_fetch_add(&ring_num, 1, __ATOMIC_RELAXED), getpid()); 411 412 return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags); 413 } 414 415 void 416 spdk_ring_free(struct spdk_ring *ring) 417 { 418 rte_ring_free((struct rte_ring *)ring); 419 } 420 421 size_t 422 spdk_ring_count(struct spdk_ring *ring) 423 { 424 return rte_ring_count((struct rte_ring *)ring); 425 } 426 427 size_t 428 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count, 429 size_t *free_space) 430 { 431 return rte_ring_enqueue_bulk((struct rte_ring *)ring, objs, count, 432 (unsigned int *)free_space); 433 } 434 435 size_t 436 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count) 437 { 438 return rte_ring_dequeue_burst((struct rte_ring *)ring, objs, count, NULL); 439 } 440