1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
3 */
4
5 #include <errno.h>
6 #include <rte_malloc.h>
7 #include <malloc.h>
8 #include <stdbool.h>
9 #include <string.h>
10
11 #include "mlx5_common_log.h"
12 #include "mlx5_common_os.h"
13 #include "mlx5_malloc.h"
14
15 struct mlx5_sys_mem {
16 uint32_t init:1; /* Memory allocator initialized. */
17 uint32_t enable:1; /* System memory select. */
18 uint32_t reserve:30; /* Reserve. */
19 RTE_ATOMIC(struct rte_memseg_list *) last_msl;
20 /* last allocated rte memory memseg list. */
21 #ifdef RTE_LIBRTE_MLX5_DEBUG
22 uint64_t malloc_sys;
23 /* Memory allocated from system count. */
24 uint64_t malloc_rte;
25 /* Memory allocated from hugepage count. */
26 uint64_t realloc_sys;
27 /* Memory reallocate from system count. */
28 uint64_t realloc_rte;
29 /* Memory reallocate from hugepage count. */
30 uint64_t free_sys;
31 /* Memory free to system count. */
32 uint64_t free_rte;
33 /* Memory free to hugepage count. */
34 uint64_t msl_miss;
35 /* MSL miss count. */
36 uint64_t msl_update;
37 /* MSL update count. */
38 #endif
39 };
40
41 /* Initialize default as not */
42 static struct mlx5_sys_mem mlx5_sys_mem = {
43 .init = 0,
44 .enable = 0,
45 #ifdef RTE_LIBRTE_MLX5_DEBUG
46 .malloc_sys = 0,
47 .malloc_rte = 0,
48 .realloc_sys = 0,
49 .realloc_rte = 0,
50 .free_sys = 0,
51 .free_rte = 0,
52 .msl_miss = 0,
53 .msl_update = 0,
54 #endif
55 };
56
57 /**
58 * Check if the address belongs to memory seg list.
59 *
60 * @param addr
61 * Memory address to be checked.
62 * @param msl
63 * Memory seg list.
64 *
65 * @return
66 * True if it belongs, false otherwise.
67 */
68 static bool
mlx5_mem_check_msl(void * addr,struct rte_memseg_list * msl)69 mlx5_mem_check_msl(void *addr, struct rte_memseg_list *msl)
70 {
71 void *start, *end;
72
73 if (!msl)
74 return false;
75 start = msl->base_va;
76 end = RTE_PTR_ADD(start, msl->len);
77 if (addr >= start && addr < end)
78 return true;
79 return false;
80 }
81
82 /**
83 * Update the msl if memory belongs to new msl.
84 *
85 * @param addr
86 * Memory address.
87 */
88 static void
mlx5_mem_update_msl(void * addr)89 mlx5_mem_update_msl(void *addr)
90 {
91 /*
92 * Update the cache msl if the new addr comes from the new msl
93 * different with the cached msl.
94 */
95 if (addr && !mlx5_mem_check_msl(addr,
96 (struct rte_memseg_list *)rte_atomic_load_explicit
97 (&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
98 rte_atomic_store_explicit(&mlx5_sys_mem.last_msl,
99 rte_mem_virt2memseg_list(addr),
100 rte_memory_order_relaxed);
101 #ifdef RTE_LIBRTE_MLX5_DEBUG
102 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_update, 1,
103 rte_memory_order_relaxed);
104 #endif
105 }
106 }
107
108 /**
109 * Check if the address belongs to rte memory.
110 *
111 * @param addr
112 * Memory address to be checked.
113 *
114 * @return
115 * True if it belongs, false otherwise.
116 */
117 static bool
mlx5_mem_is_rte(void * addr)118 mlx5_mem_is_rte(void *addr)
119 {
120 /*
121 * Check if the last cache msl matches. Drop to slow path
122 * to check if the memory belongs to rte memory.
123 */
124 if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)
125 rte_atomic_load_explicit(&mlx5_sys_mem.last_msl, rte_memory_order_relaxed))) {
126 if (!rte_mem_virt2memseg_list(addr))
127 return false;
128 #ifdef RTE_LIBRTE_MLX5_DEBUG
129 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.msl_miss, 1, rte_memory_order_relaxed);
130 #endif
131 }
132 return true;
133 }
134
135 /**
136 * Allocate memory with alignment.
137 *
138 * @param size
139 * Memory size to be allocated.
140 * @param align
141 * Memory alignment.
142 * @param zero
143 * Clear the allocated memory or not.
144 *
145 * @return
146 * Pointer of the allocated memory, NULL otherwise.
147 */
148 static void *
mlx5_alloc_align(size_t size,unsigned int align,unsigned int zero)149 mlx5_alloc_align(size_t size, unsigned int align, unsigned int zero)
150 {
151 void *buf;
152
153 buf = mlx5_os_malloc(align, size);
154 if (!buf) {
155 DRV_LOG(ERR, "Couldn't allocate buf size=%zu align=%u.",
156 size, align);
157 return NULL;
158 }
159 if (zero)
160 memset(buf, 0, size);
161 return buf;
162 }
163
164 void *
mlx5_malloc(uint32_t flags,size_t size,unsigned int align,int socket)165 mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket)
166 {
167 void *addr;
168 bool rte_mem;
169
170 /*
171 * If neither system memory nor rte memory is required, allocate
172 * memory according to mlx5_sys_mem.enable.
173 */
174 if (flags & MLX5_MEM_RTE)
175 rte_mem = true;
176 else if (flags & MLX5_MEM_SYS)
177 rte_mem = false;
178 else
179 rte_mem = mlx5_sys_mem.enable ? false : true;
180 if (rte_mem) {
181 if (flags & MLX5_MEM_ZERO)
182 addr = rte_zmalloc_socket(NULL, size, align, socket);
183 else
184 addr = rte_malloc_socket(NULL, size, align, socket);
185 mlx5_mem_update_msl(addr);
186 #ifdef RTE_LIBRTE_MLX5_DEBUG
187 if (addr)
188 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_rte, 1,
189 rte_memory_order_relaxed);
190 #endif
191 return addr;
192 }
193 /* The memory will be allocated from system. */
194 if (align > MLX5_MALLOC_ALIGNMENT)
195 addr = mlx5_alloc_align(size, align, !!(flags & MLX5_MEM_ZERO));
196 else if (flags & MLX5_MEM_ZERO)
197 addr = calloc(1, size);
198 else
199 addr = malloc(size);
200 #ifdef RTE_LIBRTE_MLX5_DEBUG
201 if (addr)
202 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.malloc_sys, 1,
203 rte_memory_order_relaxed);
204 #endif
205 return addr;
206 }
207
208 void *
mlx5_realloc(void * addr,uint32_t flags,size_t size,unsigned int align,int socket)209 mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align,
210 int socket)
211 {
212 void *new_addr;
213 bool rte_mem;
214
215 /* Allocate directly if old memory address is NULL. */
216 if (!addr)
217 return mlx5_malloc(flags, size, align, socket);
218 /* Get the memory type. */
219 if (flags & MLX5_MEM_RTE)
220 rte_mem = true;
221 else if (flags & MLX5_MEM_SYS)
222 rte_mem = false;
223 else
224 rte_mem = mlx5_sys_mem.enable ? false : true;
225 /* Check if old memory and to be allocated memory are the same type. */
226 if (rte_mem != mlx5_mem_is_rte(addr)) {
227 DRV_LOG(ERR, "Couldn't reallocate to different memory type.");
228 return NULL;
229 }
230 /* Allocate memory from rte memory. */
231 if (rte_mem) {
232 new_addr = rte_realloc_socket(addr, size, align, socket);
233 mlx5_mem_update_msl(new_addr);
234 #ifdef RTE_LIBRTE_MLX5_DEBUG
235 if (new_addr)
236 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_rte, 1,
237 rte_memory_order_relaxed);
238 #endif
239 return new_addr;
240 }
241 /* Align is not supported for system memory. */
242 if (align) {
243 DRV_LOG(ERR, "Couldn't reallocate with alignment");
244 return NULL;
245 }
246 new_addr = realloc(addr, size);
247 #ifdef RTE_LIBRTE_MLX5_DEBUG
248 if (new_addr)
249 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.realloc_sys, 1,
250 rte_memory_order_relaxed);
251 #endif
252 return new_addr;
253 }
254
255 void
mlx5_free(void * addr)256 mlx5_free(void *addr)
257 {
258 if (addr == NULL)
259 return;
260 if (!mlx5_mem_is_rte(addr)) {
261 #ifdef RTE_LIBRTE_MLX5_DEBUG
262 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_sys, 1,
263 rte_memory_order_relaxed);
264 #endif
265 mlx5_os_free(addr);
266 } else {
267 #ifdef RTE_LIBRTE_MLX5_DEBUG
268 rte_atomic_fetch_add_explicit(&mlx5_sys_mem.free_rte, 1,
269 rte_memory_order_relaxed);
270 #endif
271 rte_free(addr);
272 }
273 }
274
275 void
mlx5_memory_stat_dump(void)276 mlx5_memory_stat_dump(void)
277 {
278 #ifdef RTE_LIBRTE_MLX5_DEBUG
279 DRV_LOG(INFO, "System memory malloc:%"PRIi64", realloc:%"PRIi64","
280 " free:%"PRIi64"\nRTE memory malloc:%"PRIi64","
281 " realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64","
282 " update:%"PRIi64"",
283 rte_atomic_load_explicit(&mlx5_sys_mem.malloc_sys, rte_memory_order_relaxed),
284 rte_atomic_load_explicit(&mlx5_sys_mem.realloc_sys, rte_memory_order_relaxed),
285 rte_atomic_load_explicit(&mlx5_sys_mem.free_sys, rte_memory_order_relaxed),
286 rte_atomic_load_explicit(&mlx5_sys_mem.malloc_rte, rte_memory_order_relaxed),
287 rte_atomic_load_explicit(&mlx5_sys_mem.realloc_rte, rte_memory_order_relaxed),
288 rte_atomic_load_explicit(&mlx5_sys_mem.free_rte, rte_memory_order_relaxed),
289 rte_atomic_load_explicit(&mlx5_sys_mem.msl_miss, rte_memory_order_relaxed),
290 rte_atomic_load_explicit(&mlx5_sys_mem.msl_update, rte_memory_order_relaxed));
291 #endif
292 }
293
294 void
mlx5_malloc_mem_select(uint32_t sys_mem_en)295 mlx5_malloc_mem_select(uint32_t sys_mem_en)
296 {
297 /*
298 * The initialization should be called only once and all devices
299 * should use the same memory type. Otherwise, when new device is
300 * being attached with some different memory allocation configuration,
301 * the memory will get wrong behavior or a failure will be raised.
302 */
303 if (!mlx5_sys_mem.init) {
304 if (sys_mem_en)
305 mlx5_sys_mem.enable = 1;
306 mlx5_sys_mem.init = 1;
307 DRV_LOG(INFO, "%s is selected.", sys_mem_en ? "SYS_MEM" : "RTE_MEM");
308 } else if (mlx5_sys_mem.enable != sys_mem_en) {
309 DRV_LOG(WARNING, "%s is already selected.",
310 mlx5_sys_mem.enable ? "SYS_MEM" : "RTE_MEM");
311 }
312 }
313