xref: /spdk/lib/env_dpdk/env.c (revision bb488d2829a9b7863daab45917dd2174905cc0ae)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "env_internal.h"
37 
38 #include <rte_config.h>
39 #include <rte_cycles.h>
40 #include <rte_malloc.h>
41 #include <rte_mempool.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44 
45 static uint64_t
46 virt_to_phys(void *vaddr)
47 {
48 	uint64_t ret;
49 
50 	ret = rte_malloc_virt2iova(vaddr);
51 	if (ret != RTE_BAD_IOVA) {
52 		return ret;
53 	}
54 
55 	return spdk_vtophys(vaddr, NULL);
56 }
57 
58 void *
59 spdk_malloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags)
60 {
61 	if (flags == 0) {
62 		return NULL;
63 	}
64 
65 	void *buf = rte_malloc_socket(NULL, size, align, socket_id);
66 	if (buf && phys_addr) {
67 #ifdef DEBUG
68 		fprintf(stderr, "phys_addr param in spdk_*malloc() is deprecated\n");
69 #endif
70 		*phys_addr = virt_to_phys(buf);
71 	}
72 	return buf;
73 }
74 
75 void *
76 spdk_zmalloc(size_t size, size_t align, uint64_t *phys_addr, int socket_id, uint32_t flags)
77 {
78 	void *buf = spdk_malloc(size, align, phys_addr, socket_id, flags);
79 	if (buf) {
80 		memset(buf, 0, size);
81 	}
82 	return buf;
83 }
84 
85 void *
86 spdk_realloc(void *buf, size_t size, size_t align)
87 {
88 	return rte_realloc(buf, size, align);
89 }
90 
91 void
92 spdk_free(void *buf)
93 {
94 	rte_free(buf);
95 }
96 
97 void *
98 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
99 {
100 	return spdk_malloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE));
101 }
102 
103 void *
104 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
105 {
106 	return spdk_zmalloc(size, align, phys_addr, socket_id, (SPDK_MALLOC_DMA | SPDK_MALLOC_SHARE));
107 }
108 
109 void *
110 spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
111 {
112 	return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
113 }
114 
115 void *
116 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
117 {
118 	return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
119 }
120 
121 void *
122 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
123 {
124 	void *new_buf = rte_realloc(buf, size, align);
125 	if (new_buf && phys_addr) {
126 		*phys_addr = virt_to_phys(new_buf);
127 	}
128 	return new_buf;
129 }
130 
131 void
132 spdk_dma_free(void *buf)
133 {
134 	spdk_free(buf);
135 }
136 
137 void *
138 spdk_memzone_reserve_aligned(const char *name, size_t len, int socket_id,
139 			     unsigned flags, unsigned align)
140 {
141 	const struct rte_memzone *mz;
142 	unsigned dpdk_flags = 0;
143 
144 #if RTE_VERSION >= RTE_VERSION_NUM(18, 05, 0, 0)
145 	/* Older DPDKs do not offer such flag since their
146 	 * memzones are iova-contiguous by default.
147 	 */
148 	if ((flags & SPDK_MEMZONE_NO_IOVA_CONTIG) == 0) {
149 		dpdk_flags |= RTE_MEMZONE_IOVA_CONTIG;
150 	}
151 #endif
152 
153 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
154 		socket_id = SOCKET_ID_ANY;
155 	}
156 
157 	mz = rte_memzone_reserve_aligned(name, len, socket_id, dpdk_flags, align);
158 
159 	if (mz != NULL) {
160 		memset(mz->addr, 0, len);
161 		return mz->addr;
162 	} else {
163 		return NULL;
164 	}
165 }
166 
167 void *
168 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags)
169 {
170 	return spdk_memzone_reserve_aligned(name, len, socket_id, flags,
171 					    RTE_CACHE_LINE_SIZE);
172 }
173 
174 void *
175 spdk_memzone_lookup(const char *name)
176 {
177 	const struct rte_memzone *mz = rte_memzone_lookup(name);
178 
179 	if (mz != NULL) {
180 		return mz->addr;
181 	} else {
182 		return NULL;
183 	}
184 }
185 
186 int
187 spdk_memzone_free(const char *name)
188 {
189 	const struct rte_memzone *mz = rte_memzone_lookup(name);
190 
191 	if (mz != NULL) {
192 		return rte_memzone_free(mz);
193 	}
194 
195 	return -1;
196 }
197 
198 void
199 spdk_memzone_dump(FILE *f)
200 {
201 	rte_memzone_dump(f);
202 }
203 
204 struct spdk_mempool *
205 spdk_mempool_create_ctor(const char *name, size_t count,
206 			 size_t ele_size, size_t cache_size, int socket_id,
207 			 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg)
208 {
209 	struct rte_mempool *mp;
210 	size_t tmp;
211 
212 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
213 		socket_id = SOCKET_ID_ANY;
214 	}
215 
216 	/* No more than half of all elements can be in cache */
217 	tmp = (count / 2) / rte_lcore_count();
218 	if (cache_size > tmp) {
219 		cache_size = tmp;
220 	}
221 
222 	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
223 		cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
224 	}
225 
226 	mp = rte_mempool_create(name, count, ele_size, cache_size,
227 				0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg,
228 				socket_id, MEMPOOL_F_NO_PHYS_CONTIG);
229 
230 	return (struct spdk_mempool *)mp;
231 }
232 
233 
234 struct spdk_mempool *
235 spdk_mempool_create(const char *name, size_t count,
236 		    size_t ele_size, size_t cache_size, int socket_id)
237 {
238 	return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id,
239 					NULL, NULL);
240 }
241 
242 char *
243 spdk_mempool_get_name(struct spdk_mempool *mp)
244 {
245 	return ((struct rte_mempool *)mp)->name;
246 }
247 
248 void
249 spdk_mempool_free(struct spdk_mempool *mp)
250 {
251 	rte_mempool_free((struct rte_mempool *)mp);
252 }
253 
254 void *
255 spdk_mempool_get(struct spdk_mempool *mp)
256 {
257 	void *ele = NULL;
258 	int rc;
259 
260 	rc = rte_mempool_get((struct rte_mempool *)mp, &ele);
261 	if (rc != 0) {
262 		return NULL;
263 	}
264 	return ele;
265 }
266 
267 int
268 spdk_mempool_get_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
269 {
270 	return rte_mempool_get_bulk((struct rte_mempool *)mp, ele_arr, count);
271 }
272 
273 void
274 spdk_mempool_put(struct spdk_mempool *mp, void *ele)
275 {
276 	rte_mempool_put((struct rte_mempool *)mp, ele);
277 }
278 
279 void
280 spdk_mempool_put_bulk(struct spdk_mempool *mp, void **ele_arr, size_t count)
281 {
282 	rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count);
283 }
284 
285 size_t
286 spdk_mempool_count(const struct spdk_mempool *pool)
287 {
288 	return rte_mempool_avail_count((struct rte_mempool *)pool);
289 }
290 
291 bool
292 spdk_process_is_primary(void)
293 {
294 	return (rte_eal_process_type() == RTE_PROC_PRIMARY);
295 }
296 
297 uint64_t spdk_get_ticks(void)
298 {
299 	return rte_get_timer_cycles();
300 }
301 
302 uint64_t spdk_get_ticks_hz(void)
303 {
304 	return rte_get_timer_hz();
305 }
306 
307 void spdk_delay_us(unsigned int us)
308 {
309 	rte_delay_us(us);
310 }
311 
312 void spdk_pause(void)
313 {
314 	rte_pause();
315 }
316 
317 void
318 spdk_unaffinitize_thread(void)
319 {
320 	rte_cpuset_t new_cpuset;
321 	long num_cores, i;
322 
323 	CPU_ZERO(&new_cpuset);
324 
325 	num_cores = sysconf(_SC_NPROCESSORS_CONF);
326 
327 	/* Create a mask containing all CPUs */
328 	for (i = 0; i < num_cores; i++) {
329 		CPU_SET(i, &new_cpuset);
330 	}
331 
332 	rte_thread_set_affinity(&new_cpuset);
333 }
334 
335 void *
336 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
337 {
338 	rte_cpuset_t orig_cpuset;
339 	void *ret;
340 
341 	if (cb == NULL) {
342 		return NULL;
343 	}
344 
345 	rte_thread_get_affinity(&orig_cpuset);
346 
347 	spdk_unaffinitize_thread();
348 
349 	ret = cb(arg);
350 
351 	rte_thread_set_affinity(&orig_cpuset);
352 
353 	return ret;
354 }
355 
356 struct spdk_ring *
357 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id)
358 {
359 	char ring_name[64];
360 	static uint32_t ring_num = 0;
361 	unsigned flags = RING_F_EXACT_SZ;
362 
363 	switch (type) {
364 	case SPDK_RING_TYPE_SP_SC:
365 		flags |= RING_F_SP_ENQ | RING_F_SC_DEQ;
366 		break;
367 	case SPDK_RING_TYPE_MP_SC:
368 		flags |= RING_F_SC_DEQ;
369 		break;
370 	case SPDK_RING_TYPE_MP_MC:
371 		flags |= 0;
372 		break;
373 	default:
374 		return NULL;
375 	}
376 
377 	snprintf(ring_name, sizeof(ring_name), "ring_%u_%d",
378 		 __sync_fetch_and_add(&ring_num, 1), getpid());
379 
380 	return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags);
381 }
382 
383 void
384 spdk_ring_free(struct spdk_ring *ring)
385 {
386 	rte_ring_free((struct rte_ring *)ring);
387 }
388 
389 size_t
390 spdk_ring_count(struct spdk_ring *ring)
391 {
392 	return rte_ring_count((struct rte_ring *)ring);
393 }
394 
395 size_t
396 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count)
397 {
398 	return rte_ring_enqueue_bulk((struct rte_ring *)ring, objs, count, NULL);
399 }
400 
401 size_t
402 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count)
403 {
404 	return rte_ring_dequeue_burst((struct rte_ring *)ring, objs, count, NULL);
405 }
406