xref: /spdk/lib/env_dpdk/env.c (revision b066126b0b9afdcae328cbb989244620398cc64d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/env.h"
37 
38 #include <rte_config.h>
39 #include <rte_cycles.h>
40 #include <rte_malloc.h>
41 #include <rte_mempool.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44 
45 static uint64_t
46 virt_to_phys(void *vaddr)
47 {
48 	uint64_t ret;
49 
50 #if RTE_VERSION >= RTE_VERSION_NUM(17, 11, 0, 3)
51 	ret = rte_malloc_virt2iova(vaddr);
52 	if (ret != RTE_BAD_IOVA) {
53 		return ret;
54 	}
55 #else
56 	ret = rte_malloc_virt2phy(vaddr);
57 	if (ret != RTE_BAD_PHYS_ADDR) {
58 		return ret;
59 	}
60 #endif
61 
62 	return spdk_vtophys(vaddr);
63 }
64 
65 void *
66 spdk_dma_malloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
67 {
68 	void *buf = rte_malloc_socket(NULL, size, align, socket_id);
69 	if (buf && phys_addr) {
70 		*phys_addr = virt_to_phys(buf);
71 	}
72 	return buf;
73 }
74 
75 void *
76 spdk_dma_zmalloc_socket(size_t size, size_t align, uint64_t *phys_addr, int socket_id)
77 {
78 	void *buf = spdk_dma_malloc_socket(size, align, phys_addr, socket_id);
79 	if (buf) {
80 		memset(buf, 0, size);
81 	}
82 	return buf;
83 }
84 
85 void *
86 spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
87 {
88 	return spdk_dma_malloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
89 }
90 
91 void *
92 spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
93 {
94 	return spdk_dma_zmalloc_socket(size, align, phys_addr, SPDK_ENV_SOCKET_ID_ANY);
95 }
96 
97 void *
98 spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
99 {
100 	void *new_buf = rte_realloc(buf, size, align);
101 	if (new_buf && phys_addr) {
102 		*phys_addr = virt_to_phys(new_buf);
103 	}
104 	return new_buf;
105 }
106 
107 void
108 spdk_dma_free(void *buf)
109 {
110 	rte_free(buf);
111 }
112 
113 void *
114 spdk_memzone_reserve(const char *name, size_t len, int socket_id, unsigned flags)
115 {
116 	const struct rte_memzone *mz;
117 
118 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
119 		socket_id = SOCKET_ID_ANY;
120 	}
121 
122 	mz = rte_memzone_reserve(name, len, socket_id, flags);
123 
124 	if (mz != NULL) {
125 		memset(mz->addr, 0, len);
126 		return mz->addr;
127 	} else {
128 		return NULL;
129 	}
130 }
131 
132 void *
133 spdk_memzone_lookup(const char *name)
134 {
135 	const struct rte_memzone *mz = rte_memzone_lookup(name);
136 
137 	if (mz != NULL) {
138 		return mz->addr;
139 	} else {
140 		return NULL;
141 	}
142 }
143 
144 int
145 spdk_memzone_free(const char *name)
146 {
147 	const struct rte_memzone *mz = rte_memzone_lookup(name);
148 
149 	if (mz != NULL) {
150 		return rte_memzone_free(mz);
151 	}
152 
153 	return -1;
154 }
155 
156 void
157 spdk_memzone_dump(FILE *f)
158 {
159 	rte_memzone_dump(f);
160 }
161 
162 struct spdk_mempool *
163 spdk_mempool_create_ctor(const char *name, size_t count,
164 			 size_t ele_size, size_t cache_size, int socket_id,
165 			 spdk_mempool_obj_cb_t *obj_init, void *obj_init_arg)
166 {
167 	struct rte_mempool *mp;
168 	size_t tmp;
169 
170 	if (socket_id == SPDK_ENV_SOCKET_ID_ANY) {
171 		socket_id = SOCKET_ID_ANY;
172 	}
173 
174 	/* No more than half of all elements can be in cache */
175 	tmp = (count / 2) / rte_lcore_count();
176 	if (cache_size > tmp) {
177 		cache_size = tmp;
178 	}
179 
180 	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
181 		cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
182 	}
183 
184 	mp = rte_mempool_create(name, count, ele_size, cache_size,
185 				0, NULL, NULL, (rte_mempool_obj_cb_t *)obj_init, obj_init_arg,
186 				socket_id, MEMPOOL_F_NO_PHYS_CONTIG);
187 
188 	return (struct spdk_mempool *)mp;
189 }
190 
191 
192 struct spdk_mempool *
193 spdk_mempool_create(const char *name, size_t count,
194 		    size_t ele_size, size_t cache_size, int socket_id)
195 {
196 	return spdk_mempool_create_ctor(name, count, ele_size, cache_size, socket_id,
197 					NULL, NULL);
198 }
199 
200 char *
201 spdk_mempool_get_name(struct spdk_mempool *mp)
202 {
203 	return ((struct rte_mempool *)mp)->name;
204 }
205 
206 void
207 spdk_mempool_free(struct spdk_mempool *mp)
208 {
209 #if RTE_VERSION >= RTE_VERSION_NUM(16, 7, 0, 1)
210 	rte_mempool_free((struct rte_mempool *)mp);
211 #endif
212 }
213 
214 void *
215 spdk_mempool_get(struct spdk_mempool *mp)
216 {
217 	void *ele = NULL;
218 	int rc;
219 
220 	rc = rte_mempool_get((struct rte_mempool *)mp, &ele);
221 	if (rc != 0) {
222 		return NULL;
223 	}
224 	return ele;
225 }
226 
227 void
228 spdk_mempool_put(struct spdk_mempool *mp, void *ele)
229 {
230 	rte_mempool_put((struct rte_mempool *)mp, ele);
231 }
232 
233 void
234 spdk_mempool_put_bulk(struct spdk_mempool *mp, void *const *ele_arr, size_t count)
235 {
236 	rte_mempool_put_bulk((struct rte_mempool *)mp, ele_arr, count);
237 }
238 
239 size_t
240 spdk_mempool_count(const struct spdk_mempool *pool)
241 {
242 #if RTE_VERSION < RTE_VERSION_NUM(16, 7, 0, 1)
243 	return rte_mempool_count((struct rte_mempool *)pool);
244 #else
245 	return rte_mempool_avail_count((struct rte_mempool *)pool);
246 #endif
247 }
248 
249 bool
250 spdk_process_is_primary(void)
251 {
252 	return (rte_eal_process_type() == RTE_PROC_PRIMARY);
253 }
254 
255 uint64_t spdk_get_ticks(void)
256 {
257 	return rte_get_timer_cycles();
258 }
259 
260 uint64_t spdk_get_ticks_hz(void)
261 {
262 	return rte_get_timer_hz();
263 }
264 
265 void spdk_delay_us(unsigned int us)
266 {
267 	rte_delay_us(us);
268 }
269 
270 void
271 spdk_unaffinitize_thread(void)
272 {
273 	rte_cpuset_t new_cpuset;
274 	long num_cores, i;
275 
276 	CPU_ZERO(&new_cpuset);
277 
278 	num_cores = sysconf(_SC_NPROCESSORS_CONF);
279 
280 	/* Create a mask containing all CPUs */
281 	for (i = 0; i < num_cores; i++) {
282 		CPU_SET(i, &new_cpuset);
283 	}
284 
285 	rte_thread_set_affinity(&new_cpuset);
286 }
287 
288 void *
289 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
290 {
291 	rte_cpuset_t orig_cpuset;
292 	void *ret;
293 
294 	if (cb == NULL) {
295 		return NULL;
296 	}
297 
298 	rte_thread_get_affinity(&orig_cpuset);
299 
300 	spdk_unaffinitize_thread();
301 
302 	ret = cb(arg);
303 
304 	rte_thread_set_affinity(&orig_cpuset);
305 
306 	return ret;
307 }
308 
309 struct spdk_ring *
310 spdk_ring_create(enum spdk_ring_type type, size_t count, int socket_id)
311 {
312 	char ring_name[64];
313 	static uint32_t ring_num = 0;
314 	unsigned flags = 0;
315 
316 	switch (type) {
317 	case SPDK_RING_TYPE_SP_SC:
318 		flags = RING_F_SP_ENQ | RING_F_SC_DEQ;
319 		break;
320 	case SPDK_RING_TYPE_MP_SC:
321 		flags = RING_F_SC_DEQ;
322 		break;
323 	default:
324 		return NULL;
325 	}
326 
327 	snprintf(ring_name, sizeof(ring_name), "ring_%u_%d",
328 		 __sync_fetch_and_add(&ring_num, 1), getpid());
329 
330 	return (struct spdk_ring *)rte_ring_create(ring_name, count, socket_id, flags);
331 }
332 
333 void
334 spdk_ring_free(struct spdk_ring *ring)
335 {
336 	rte_ring_free((struct rte_ring *)ring);
337 }
338 
339 size_t
340 spdk_ring_count(struct spdk_ring *ring)
341 {
342 	return rte_ring_count((struct rte_ring *)ring);
343 }
344 
345 size_t
346 spdk_ring_enqueue(struct spdk_ring *ring, void **objs, size_t count)
347 {
348 	int rc;
349 #if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0)
350 	rc = rte_ring_mp_enqueue_bulk((struct rte_ring *)ring, objs, count);
351 	if (rc == 0) {
352 		return count;
353 	}
354 
355 	return 0;
356 #else
357 	rc = rte_ring_mp_enqueue_bulk((struct rte_ring *)ring, objs, count, NULL);
358 	return rc;
359 #endif
360 }
361 
362 size_t
363 spdk_ring_dequeue(struct spdk_ring *ring, void **objs, size_t count)
364 {
365 #if RTE_VERSION < RTE_VERSION_NUM(17, 5, 0, 0)
366 	return rte_ring_sc_dequeue_burst((struct rte_ring *)ring, objs, count);
367 #else
368 	return rte_ring_sc_dequeue_burst((struct rte_ring *)ring, objs, count, NULL);
369 #endif
370 }
371