xref: /spdk/lib/env_dpdk/env.c (revision bf30e09abe1667ae2769aa367cde39c550bcac00)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 #include "spdk/util.h"
9 #include "spdk/env_dpdk.h"
10 #include "spdk/log.h"
11 #include "spdk/assert.h"
12 
13 #include "env_internal.h"
14 
15 #include <rte_config.h>
16 #include <rte_cycles.h>
17 #include <rte_malloc.h>
18 #include <rte_mempool.h>
19 #include <rte_memzone.h>
20 #include <rte_version.h>
21 #include <rte_eal.h>
22 
23 static __thread bool g_is_thread_unaffinitized;
24 
25 SPDK_STATIC_ASSERT(SOCKET_ID_ANY == SPDK_ENV_SOCKET_ID_ANY, "SOCKET_ID_ANY mismatch");
26 
27 void *
28 spdk_malloc(size_t size, size_t align, uint64_t *unused, int socket_id, uint32_t flags)
29 {
30 	if (flags == 0 || unused != NULL) {
31 		return NULL;
32 	}
33 
34 	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
35 	return rte_malloc_socket(NULL, size, align, socket_id);
36 }
37 
38 void *
39 spdk_zmalloc(size_t size, size_t align, uint64_t *unused, int socket_id, uint32_t flags)
40 {
41 	if (flags == 0 || unused != NULL) {
42 		return NULL;
43 	}
44 
45 	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
46 	return rte_zmalloc_socket(NULL, size, align, socket_id);
47 }
48 
49 void *
50 spdk_realloc(void *buf, size_t size, size_t align)
51 {
52 	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
53 	return rte_realloc(buf, size, align);
54 }
55 
56 void
57 spdk_free(void *buf)
58 {
59 	rte_free(buf);
60 }
61 
62 void *
63 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *unused, int socket_id)
64 {
65 	return spdk_malloc(size, align, unused, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE));
66 }
67 
68 void *
69 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *unused, int socket_id)
70 {
71 	return spdk_zmalloc(size, align, unused, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE));
72 }
73 
74 void *
75 spdk_dma_malloc(size_t size, size_t align, uint64_t *unused)
76 {
77 	return spdk_dma_malloc_socket(size, align, unused, SPDK_ENV_SOCKET_ID_ANY);
78 }
79 
80 void *
81 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *unused)
82 {
83 	return spdk_dma_zmalloc_socket(size, align, unused, SPDK_ENV_SOCKET_ID_ANY);
84 }
85 
86 void *
87 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *unused)
88 {
89 	if (unused != NULL) {
90 		return NULL;
91 	}
92 	align = spdk_max(align, RTE_CACHE_LINE_SIZE);
93 	return rte_realloc(buf, size, align);
94 }
95 
96 void
97 spdk_dma_free(void *buf)
98 {
99 	spdk_free(buf);
100 }
101 
102 void *
103 spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
104 			     unsigned flags, unsigned align)
105 {
106 	const struct rte_memzone *mz;
107 	unsigned dpdk_flags = 0;
108 
109 	if ((flags & SPDK_MEMZONE_NO_IOVA_CONTIG) == 0) {
110 		dpdk_flags |= RTE_MEMZONE_IOVA_CONTIG;
111 	}
112 
113 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
114 		socket_id = SOCKET_ID_ANY;
115 	}
116 
117 	mz = rte_memzone_reserve_aligned(name, len, socket_id, dpdk_flags, align);
118 
119 	if (mz != NULL) {
120 		memset(mz->addr, 0, len);
121 		return mz->addr;
122 	} else {
123 		return NULL;
124 	}
125 }
126 
127 void *
128 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags)
129 {
130 	return spdk_memzone_reserve_aligned(name, len, socket_id, flags,
131 					    RTE_CACHE_LINE_SIZE);
132 }
133 
134 void *
135 spdk_memzone_lookup(const char *name)
136 {
137 	const struct rte_memzone *mz = rte_memzone_lookup(name);
138 
139 	if (mz != NULL) {
140 		return mz->addr;
141 	} else {
142 		return NULL;
143 	}
144 }
145 
146 int
147 spdk_memzone_free(const char *name)
148 {
149 	const struct rte_memzone *mz = rte_memzone_lookup(name);
150 
151 	if (mz != NULL) {
152 		return rte_memzone_free(mz);
153 	}
154 
155 	return -1;
156 }
157 
158 void
159 spdk_memzone_dump(FILE *f)
160 {
161 	rte_memzone_dump(f);
162 }
163 
164 struct spdk_mempool *
165 spdk_mempool_create_ctor(const char *name, size_t count,
166 			 size_t ele_size, size_t cache_size, int socket_id,
167 			 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg)
168 {
169 	struct rte_mempool *mp;
170 	size_t tmp;
171 
172 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
173 		socket_id = SOCKET_ID_ANY;
174 	}
175 
176 	/* No more than half of all elements can be in cache */
177 	tmp = (count / 2) / rte_lcore_count();
178 	if (cache_size > tmp) {
179 		cache_size = tmp;
180 	}
181 
182 	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
183 		cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
184 	}
185 
186 	mp = rte_mempool_create(name, count, ele_size, cache_size,
187 				0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg,
188 				socket_id, 0);
189 
190 	return (struct spdk_mempool *)mp;
191 }
192 
193 
194 struct spdk_mempool *
195 spdk_mempool_create(const char *name, size_t count,
196 		    size_t ele_size, size_t cache_size, int socket_id)
197 {
198 	return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id,
199 					NULL, NULL);
200 }
201 
202 char *
203 spdk_mempool_get_name(struct spdk_mempool *mp)
204 {
205 	return ((struct rte_mempool *)mp)->name;
206 }
207 
208 void
209 spdk_mempool_free(struct spdk_mempool *mp)
210 {
211 	rte_mempool_free((struct rte_mempool *)mp);
212 }
213 
214 void *
215 spdk_mempool_get(struct spdk_mempool *mp)
216 {
217 	void *ele = NULL;
218 	int rc;
219 
220 	rc = rte_mempool_get((struct rte_mempool *)mp, &ele);
221 	if (rc != 0) {
222 		return NULL;
223 	}
224 	return ele;
225 }
226 
227 int
228 spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
229 {
230 	return rte_mempool_get_bulk((struct rte_mempool *)mp, ele_arr, count);
231 }
232 
233 void
234 spdk_mempool_put(struct spdk_mempool *mp, void *ele)
235 {
236 	rte_mempool_put((struct rte_mempool *)mp, ele);
237 }
238 
239 void
240 spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
241 {
242 	rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count);
243 }
244 
245 size_t
246 spdk_mempool_count(const struct spdk_mempool *pool)
247 {
248 	return rte_mempool_avail_count((struct rte_mempool *)pool);
249 }
250 
251 uint32_t
252 spdk_mempool_obj_iter(struct spdk_mempool *mp, spdk_mempool_obj_cb_t obj_cb,
253 		      void *obj_cb_arg)
254 {
255 	return rte_mempool_obj_iter((struct rte_mempool *)mp, (rte_mempool_obj_cb_t *)obj_cb,
256 				    obj_cb_arg);
257 }
258 
259 struct env_mempool_mem_iter_ctx {
260 	spdk_mempool_mem_cb_t *user_cb;
261 	void *user_arg;
262 };
263 
264 static void
265 mempool_mem_iter_remap(struct rte_mempool *mp, void *opaque, struct rte_mempool_memhdr *memhdr,
266 		       unsigned mem_idx)
267 {
268 	struct env_mempool_mem_iter_ctx *ctx = opaque;
269 
270 	ctx->user_cb((struct spdk_mempool *)mp, ctx->user_arg, memhdr->addr, memhdr->iova, memhdr->len,
271 		     mem_idx);
272 }
273 
274 uint32_t
275 spdk_mempool_mem_iter(struct spdk_mempool *mp, spdk_mempool_mem_cb_t mem_cb,
276 		      void *mem_cb_arg)
277 {
278 	struct env_mempool_mem_iter_ctx ctx = {
279 		.user_cb = mem_cb,
280 		.user_arg = mem_cb_arg
281 	};
282 
283 	return rte_mempool_mem_iter((struct rte_mempool *)mp, mempool_mem_iter_remap, &ctx);
284 }
285 
286 struct spdk_mempool *
287 spdk_mempool_lookup(const char *name)
288 {
289 	return (struct spdk_mempool *)rte_mempool_lookup(name);
290 }
291 
292 bool
293 spdk_process_is_primary(void)
294 {
295 	return (rte_eal_process_type() == RTE_PROC_PRIMARY);
296 }
297 
298 uint64_t
299 spdk_get_ticks(void)
300 {
301 	return rte_get_timer_cycles();
302 }
303 
304 uint64_t
305 spdk_get_ticks_hz(void)
306 {
307 	return rte_get_timer_hz();
308 }
309 
310 void
311 spdk_delay_us(unsigned int us)
312 {
313 	rte_delay_us(us);
314 }
315 
316 void
317 spdk_pause(void)
318 {
319 	rte_pause();
320 }
321 
322 void
323 spdk_unaffinitize_thread(void)
324 {
325 	rte_cpuset_t new_cpuset;
326 	long num_cores, i;
327 
328 	if (g_is_thread_unaffinitized) {
329 		return;
330 	}
331 
332 	CPU_ZERO(&new_cpuset);
333 
334 	num_cores = sysconf(_SC_NPROCESSORS_CONF);
335 
336 	/* Create a mask containing all CPUs */
337 	for (i = 0; i < num_cores; i++) {
338 		CPU_SET(i, &new_cpuset);
339 	}
340 
341 	rte_thread_set_affinity(&new_cpuset);
342 	g_is_thread_unaffinitized = true;
343 }
344 
345 void *
346 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
347 {
348 	rte_cpuset_t orig_cpuset;
349 	void *ret;
350 
351 	if (cb == NULL) {
352 		return NULL;
353 	}
354 
355 	if (g_is_thread_unaffinitized) {
356 		ret = cb(arg);
357 	} else {
358 		rte_thread_get_affinity(&orig_cpuset);
359 		spdk_unaffinitize_thread();
360 
361 		ret = cb(arg);
362 
363 		rte_thread_set_affinity(&orig_cpuset);
364 		g_is_thread_unaffinitized = false;
365 	}
366 
367 	return ret;
368 }
369 
370 struct spdk_ring *
371 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id)
372 {
373 	char ring_name[64];
374 	static uint32_t ring_num = 0;
375 	unsigned flags = RING_F_EXACT_SZ;
376 
377 	switch (type) {
378 	case SPDK_RING_TYPE_SP_SC:
379 		flags |= RING_F_SP_ENQ | RING_F_SC_DEQ;
380 		break;
381 	case SPDK_RING_TYPE_MP_SC:
382 		flags |= RING_F_SC_DEQ;
383 		break;
384 	case SPDK_RING_TYPE_MP_MC:
385 		flags |= 0;
386 		break;
387 	default:
388 		return NULL;
389 	}
390 
391 	snprintf(ring_name, sizeof(ring_name), "ring_%u_%d",
392 		 __atomic_fetch_add(&ring_num, 1, __ATOMIC_RELAXED), getpid());
393 
394 	return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags);
395 }
396 
397 void
398 spdk_ring_free(struct spdk_ring *ring)
399 {
400 	rte_ring_free((struct rte_ring *)ring);
401 }
402 
403 size_t
404 spdk_ring_count(struct spdk_ring *ring)
405 {
406 	return rte_ring_count((struct rte_ring *)ring);
407 }
408 
409 size_t
410 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count,
411 		  size_t *free_space)
412 {
413 	return rte_ring_enqueue_bulk((struct rte_ring *)ring, objs, count,
414 				     (unsigned int *)free_space);
415 }
416 
417 size_t
418 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count)
419 {
420 	return rte_ring_dequeue_burst((struct rte_ring *)ring, objs, count, NULL);
421 }
422 
423 void
424 spdk_env_dpdk_dump_mem_stats(FILE *file)
425 {
426 	fprintf(file, "DPDK memory size %" PRIu64 "\n", rte_eal_get_physmem_size());
427 	fprintf(file, "DPDK memory layout\n");
428 	rte_dump_physmem_layout(file);
429 	fprintf(file, "DPDK memzones.\n");
430 	rte_memzone_dump(file);
431 	fprintf(file, "DPDK mempools.\n");
432 	rte_mempool_list_dump(file);
433 	fprintf(file, "DPDK malloc stats.\n");
434 	rte_malloc_dump_stats(file, NULL);
435 	fprintf(file, "DPDK malloc heaps.\n");
436 	rte_malloc_dump_heaps(file);
437 }
438 
439 int
440 spdk_get_tid(void)
441 {
442 	return rte_sys_gettid();
443 }
444