1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 */ 4 5 #include <errno.h> 6 #include <rte_malloc.h> 7 #include <malloc.h> 8 #include <stdbool.h> 9 #include <string.h> 10 11 #include <rte_atomic.h> 12 13 #include "mlx5_common_utils.h" 14 #include "mlx5_malloc.h" 15 16 struct mlx5_sys_mem { 17 uint32_t init:1; /* Memory allocator initialized. */ 18 uint32_t enable:1; /* System memory select. */ 19 uint32_t reserve:30; /* Reserve. */ 20 union { 21 struct rte_memseg_list *last_msl; 22 rte_atomic64_t a64_last_msl; 23 }; 24 /* last allocated rte memory memseg list. */ 25 #ifdef RTE_LIBRTE_MLX5_DEBUG 26 rte_atomic64_t malloc_sys; 27 /* Memory allocated from system count. */ 28 rte_atomic64_t malloc_rte; 29 /* Memory allocated from hugepage count. */ 30 rte_atomic64_t realloc_sys; 31 /* Memory reallocate from system count. */ 32 rte_atomic64_t realloc_rte; 33 /* Memory reallocate from hugepage count. */ 34 rte_atomic64_t free_sys; 35 /* Memory free to system count. */ 36 rte_atomic64_t free_rte; 37 /* Memory free to hugepage count. */ 38 rte_atomic64_t msl_miss; 39 /* MSL miss count. */ 40 rte_atomic64_t msl_update; 41 /* MSL update count. */ 42 #endif 43 }; 44 45 /* Initialize default as not */ 46 static struct mlx5_sys_mem mlx5_sys_mem = { 47 .init = 0, 48 .enable = 0, 49 #ifdef RTE_LIBRTE_MLX5_DEBUG 50 .malloc_sys = RTE_ATOMIC64_INIT(0), 51 .malloc_rte = RTE_ATOMIC64_INIT(0), 52 .realloc_sys = RTE_ATOMIC64_INIT(0), 53 .realloc_rte = RTE_ATOMIC64_INIT(0), 54 .free_sys = RTE_ATOMIC64_INIT(0), 55 .free_rte = RTE_ATOMIC64_INIT(0), 56 .msl_miss = RTE_ATOMIC64_INIT(0), 57 .msl_update = RTE_ATOMIC64_INIT(0), 58 #endif 59 }; 60 61 /** 62 * Check if the address belongs to memory seg list. 63 * 64 * @param addr 65 * Memory address to be ckeced. 66 * @param msl 67 * Memory seg list. 68 * 69 * @return 70 * True if it belongs, false otherwise. 71 */ 72 static bool 73 mlx5_mem_check_msl(void *addr, struct rte_memseg_list *msl) 74 { 75 void *start, *end; 76 77 if (!msl) 78 return false; 79 start = msl->base_va; 80 end = RTE_PTR_ADD(start, msl->len); 81 if (addr >= start && addr < end) 82 return true; 83 return false; 84 } 85 86 /** 87 * Update the msl if memory belongs to new msl. 88 * 89 * @param addr 90 * Memory address. 91 */ 92 static void 93 mlx5_mem_update_msl(void *addr) 94 { 95 /* 96 * Update the cache msl if the new addr comes from the new msl 97 * different with the cached msl. 98 */ 99 if (addr && !mlx5_mem_check_msl(addr, 100 (struct rte_memseg_list *)(uintptr_t)rte_atomic64_read 101 (&mlx5_sys_mem.a64_last_msl))) { 102 rte_atomic64_set(&mlx5_sys_mem.a64_last_msl, 103 (int64_t)(uintptr_t)rte_mem_virt2memseg_list(addr)); 104 #ifdef RTE_LIBRTE_MLX5_DEBUG 105 rte_atomic64_inc(&mlx5_sys_mem.msl_update); 106 #endif 107 } 108 } 109 110 /** 111 * Check if the address belongs to rte memory. 112 * 113 * @param addr 114 * Memory address to be ckeced. 115 * 116 * @return 117 * True if it belongs, false otherwise. 118 */ 119 static bool 120 mlx5_mem_is_rte(void *addr) 121 { 122 /* 123 * Check if the last cache msl matches. Drop to slow path 124 * to check if the memory belongs to rte memory. 125 */ 126 if (!mlx5_mem_check_msl(addr, (struct rte_memseg_list *)(uintptr_t) 127 rte_atomic64_read(&mlx5_sys_mem.a64_last_msl))) { 128 if (!rte_mem_virt2memseg_list(addr)) 129 return false; 130 #ifdef RTE_LIBRTE_MLX5_DEBUG 131 rte_atomic64_inc(&mlx5_sys_mem.msl_miss); 132 #endif 133 } 134 return true; 135 } 136 137 /** 138 * Allocate memory with alignment. 139 * 140 * @param size 141 * Memory size to be allocated. 142 * @param align 143 * Memory alignment. 144 * @param zero 145 * Clear the allocated memory or not. 146 * 147 * @return 148 * Pointer of the allocated memory, NULL otherwise. 149 */ 150 static void * 151 mlx5_alloc_align(size_t size, unsigned int align, unsigned int zero) 152 { 153 void *buf; 154 int ret; 155 156 ret = posix_memalign(&buf, align, size); 157 if (ret) { 158 DRV_LOG(ERR, 159 "Couldn't allocate buf size=%zu align=%u. Err=%d\n", 160 size, align, ret); 161 162 return NULL; 163 } 164 if (zero) 165 memset(buf, 0, size); 166 return buf; 167 } 168 169 void * 170 mlx5_malloc(uint32_t flags, size_t size, unsigned int align, int socket) 171 { 172 void *addr; 173 bool rte_mem; 174 175 /* 176 * If neither system memory nor rte memory is required, allocate 177 * memory according to mlx5_sys_mem.enable. 178 */ 179 if (flags & MLX5_MEM_RTE) 180 rte_mem = true; 181 else if (flags & MLX5_MEM_SYS) 182 rte_mem = false; 183 else 184 rte_mem = mlx5_sys_mem.enable ? false : true; 185 if (rte_mem) { 186 if (flags & MLX5_MEM_ZERO) 187 addr = rte_zmalloc_socket(NULL, size, align, socket); 188 else 189 addr = rte_malloc_socket(NULL, size, align, socket); 190 mlx5_mem_update_msl(addr); 191 #ifdef RTE_LIBRTE_MLX5_DEBUG 192 if (addr) 193 rte_atomic64_inc(&mlx5_sys_mem.malloc_rte); 194 #endif 195 return addr; 196 } 197 /* The memory will be allocated from system. */ 198 if (align > MLX5_MALLOC_ALIGNMENT) 199 addr = mlx5_alloc_align(size, align, !!(flags & MLX5_MEM_ZERO)); 200 else if (flags & MLX5_MEM_ZERO) 201 addr = calloc(1, size); 202 else 203 addr = malloc(size); 204 #ifdef RTE_LIBRTE_MLX5_DEBUG 205 if (addr) 206 rte_atomic64_inc(&mlx5_sys_mem.malloc_sys); 207 #endif 208 return addr; 209 } 210 211 void * 212 mlx5_realloc(void *addr, uint32_t flags, size_t size, unsigned int align, 213 int socket) 214 { 215 void *new_addr; 216 bool rte_mem; 217 218 /* Allocate directly if old memory address is NULL. */ 219 if (!addr) 220 return mlx5_malloc(flags, size, align, socket); 221 /* Get the memory type. */ 222 if (flags & MLX5_MEM_RTE) 223 rte_mem = true; 224 else if (flags & MLX5_MEM_SYS) 225 rte_mem = false; 226 else 227 rte_mem = mlx5_sys_mem.enable ? false : true; 228 /* Check if old memory and to be allocated memory are the same type. */ 229 if (rte_mem != mlx5_mem_is_rte(addr)) { 230 DRV_LOG(ERR, "Couldn't reallocate to different memory type."); 231 return NULL; 232 } 233 /* Allocate memory from rte memory. */ 234 if (rte_mem) { 235 new_addr = rte_realloc_socket(addr, size, align, socket); 236 mlx5_mem_update_msl(new_addr); 237 #ifdef RTE_LIBRTE_MLX5_DEBUG 238 if (new_addr) 239 rte_atomic64_inc(&mlx5_sys_mem.realloc_rte); 240 #endif 241 return new_addr; 242 } 243 /* Align is not supported for system memory. */ 244 if (align) { 245 DRV_LOG(ERR, "Couldn't reallocate with alignment"); 246 return NULL; 247 } 248 new_addr = realloc(addr, size); 249 #ifdef RTE_LIBRTE_MLX5_DEBUG 250 if (new_addr) 251 rte_atomic64_inc(&mlx5_sys_mem.realloc_sys); 252 #endif 253 return new_addr; 254 } 255 256 void 257 mlx5_free(void *addr) 258 { 259 if (addr == NULL) 260 return; 261 if (!mlx5_mem_is_rte(addr)) { 262 #ifdef RTE_LIBRTE_MLX5_DEBUG 263 rte_atomic64_inc(&mlx5_sys_mem.free_sys); 264 #endif 265 free(addr); 266 } else { 267 #ifdef RTE_LIBRTE_MLX5_DEBUG 268 rte_atomic64_inc(&mlx5_sys_mem.free_rte); 269 #endif 270 rte_free(addr); 271 } 272 } 273 274 void 275 mlx5_memory_stat_dump(void) 276 { 277 #ifdef RTE_LIBRTE_MLX5_DEBUG 278 DRV_LOG(INFO, "System memory malloc:%"PRIi64", realloc:%"PRIi64"," 279 " free:%"PRIi64"\nRTE memory malloc:%"PRIi64"," 280 " realloc:%"PRIi64", free:%"PRIi64"\nMSL miss:%"PRIi64"," 281 " update:%"PRIi64"", 282 rte_atomic64_read(&mlx5_sys_mem.malloc_sys), 283 rte_atomic64_read(&mlx5_sys_mem.realloc_sys), 284 rte_atomic64_read(&mlx5_sys_mem.free_sys), 285 rte_atomic64_read(&mlx5_sys_mem.malloc_rte), 286 rte_atomic64_read(&mlx5_sys_mem.realloc_rte), 287 rte_atomic64_read(&mlx5_sys_mem.free_rte), 288 rte_atomic64_read(&mlx5_sys_mem.msl_miss), 289 rte_atomic64_read(&mlx5_sys_mem.msl_update)); 290 #endif 291 } 292 293 void 294 mlx5_malloc_mem_select(uint32_t sys_mem_en) 295 { 296 /* 297 * The initialization should be called only once and all devices 298 * should use the same memory type. Otherwise, when new device is 299 * being attached with some different memory allocation configuration, 300 * the memory will get wrong behavior or a failure will be raised. 301 */ 302 if (!mlx5_sys_mem.init) { 303 if (sys_mem_en) 304 mlx5_sys_mem.enable = 1; 305 mlx5_sys_mem.init = 1; 306 DRV_LOG(INFO, "%s is selected.", sys_mem_en ? "SYS_MEM" : "RTE_MEM"); 307 } else if (mlx5_sys_mem.enable != sys_mem_en) { 308 DRV_LOG(WARNING, "%s is already selected.", 309 mlx5_sys_mem.enable ? "SYS_MEM" : "RTE_MEM"); 310 } 311 } 312