1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 8 #include <rte_errno.h> 9 #include <rte_fbarray.h> 10 #include <rte_memory.h> 11 #include <rte_string_fns.h> 12 13 #include "eal_private.h" 14 #include "eal_internal_cfg.h" 15 #include "eal_memalloc.h" 16 17 struct mem_event_callback_entry { 18 TAILQ_ENTRY(mem_event_callback_entry) next; 19 char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN]; 20 rte_mem_event_callback_t clb; 21 void *arg; 22 }; 23 24 struct mem_alloc_validator_entry { 25 TAILQ_ENTRY(mem_alloc_validator_entry) next; 26 char name[RTE_MEM_ALLOC_VALIDATOR_NAME_LEN]; 27 rte_mem_alloc_validator_t clb; 28 int socket_id; 29 size_t limit; 30 }; 31 32 /** Double linked list of actions. */ 33 TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry); 34 TAILQ_HEAD(mem_alloc_validator_entry_list, mem_alloc_validator_entry); 35 36 static struct mem_event_callback_entry_list mem_event_callback_list = 37 TAILQ_HEAD_INITIALIZER(mem_event_callback_list); 38 static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER; 39 40 static struct mem_alloc_validator_entry_list mem_alloc_validator_list = 41 TAILQ_HEAD_INITIALIZER(mem_alloc_validator_list); 42 static rte_rwlock_t mem_alloc_validator_rwlock = RTE_RWLOCK_INITIALIZER; 43 44 static struct mem_event_callback_entry * 45 find_mem_event_callback(const char *name, void *arg) 46 { 47 struct mem_event_callback_entry *r; 48 49 TAILQ_FOREACH(r, &mem_event_callback_list, next) { 50 if (!strcmp(r->name, name) && r->arg == arg) 51 break; 52 } 53 return r; 54 } 55 56 static struct mem_alloc_validator_entry * 57 find_mem_alloc_validator(const char *name, int socket_id) 58 { 59 struct mem_alloc_validator_entry *r; 60 61 TAILQ_FOREACH(r, &mem_alloc_validator_list, next) { 62 if (!strcmp(r->name, name) && r->socket_id == socket_id) 63 break; 64 } 65 return r; 66 } 67 68 bool 69 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start, 70 size_t len) 71 { 72 void *end, *aligned_start, *aligned_end; 73 size_t pgsz = (size_t)msl->page_sz; 74 const struct rte_memseg *ms; 75 const struct internal_config *internal_conf = 76 eal_get_internal_configuration(); 77 78 /* for IOVA_VA, it's always contiguous */ 79 if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external) 80 return true; 81 82 /* for legacy memory, it's always contiguous */ 83 if (internal_conf->legacy_mem) 84 return true; 85 86 end = RTE_PTR_ADD(start, len); 87 88 /* for nohuge, we check pagemap, otherwise check memseg */ 89 if (!rte_eal_has_hugepages()) { 90 rte_iova_t cur, expected; 91 92 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); 93 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); 94 95 /* if start and end are on the same page, bail out early */ 96 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) 97 return true; 98 99 /* skip first iteration */ 100 cur = rte_mem_virt2iova(aligned_start); 101 expected = cur + pgsz; 102 aligned_start = RTE_PTR_ADD(aligned_start, pgsz); 103 104 while (aligned_start < aligned_end) { 105 cur = rte_mem_virt2iova(aligned_start); 106 if (cur != expected) 107 return false; 108 aligned_start = RTE_PTR_ADD(aligned_start, pgsz); 109 expected += pgsz; 110 } 111 } else { 112 int start_seg, end_seg, cur_seg; 113 rte_iova_t cur, expected; 114 115 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); 116 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); 117 118 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) / 119 pgsz; 120 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) / 121 pgsz; 122 123 /* if start and end are on the same page, bail out early */ 124 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) 125 return true; 126 127 /* skip first iteration */ 128 ms = rte_fbarray_get(&msl->memseg_arr, start_seg); 129 cur = ms->iova; 130 expected = cur + pgsz; 131 132 /* if we can't access IOVA addresses, assume non-contiguous */ 133 if (cur == RTE_BAD_IOVA) 134 return false; 135 136 for (cur_seg = start_seg + 1; cur_seg < end_seg; 137 cur_seg++, expected += pgsz) { 138 ms = rte_fbarray_get(&msl->memseg_arr, cur_seg); 139 140 if (ms->iova != expected) 141 return false; 142 } 143 } 144 return true; 145 } 146 147 int 148 eal_memalloc_mem_event_callback_register(const char *name, 149 rte_mem_event_callback_t clb, void *arg) 150 { 151 struct mem_event_callback_entry *entry; 152 int ret, len; 153 if (name == NULL || clb == NULL) { 154 rte_errno = EINVAL; 155 return -1; 156 } 157 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 158 if (len == 0) { 159 rte_errno = EINVAL; 160 return -1; 161 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { 162 rte_errno = ENAMETOOLONG; 163 return -1; 164 } 165 rte_rwlock_write_lock(&mem_event_rwlock); 166 167 entry = find_mem_event_callback(name, arg); 168 if (entry != NULL) { 169 rte_errno = EEXIST; 170 ret = -1; 171 goto unlock; 172 } 173 174 entry = malloc(sizeof(*entry)); 175 if (entry == NULL) { 176 rte_errno = ENOMEM; 177 ret = -1; 178 goto unlock; 179 } 180 181 /* callback successfully created and is valid, add it to the list */ 182 entry->clb = clb; 183 entry->arg = arg; 184 strlcpy(entry->name, name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 185 TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next); 186 187 ret = 0; 188 189 EAL_LOG(DEBUG, "Mem event callback '%s:%p' registered", 190 name, arg); 191 192 unlock: 193 rte_rwlock_write_unlock(&mem_event_rwlock); 194 return ret; 195 } 196 197 int 198 eal_memalloc_mem_event_callback_unregister(const char *name, void *arg) 199 { 200 struct mem_event_callback_entry *entry; 201 int ret, len; 202 203 if (name == NULL) { 204 rte_errno = EINVAL; 205 return -1; 206 } 207 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 208 if (len == 0) { 209 rte_errno = EINVAL; 210 return -1; 211 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { 212 rte_errno = ENAMETOOLONG; 213 return -1; 214 } 215 rte_rwlock_write_lock(&mem_event_rwlock); 216 217 entry = find_mem_event_callback(name, arg); 218 if (entry == NULL) { 219 rte_errno = ENOENT; 220 ret = -1; 221 goto unlock; 222 } 223 TAILQ_REMOVE(&mem_event_callback_list, entry, next); 224 free(entry); 225 226 ret = 0; 227 228 EAL_LOG(DEBUG, "Mem event callback '%s:%p' unregistered", 229 name, arg); 230 231 unlock: 232 rte_rwlock_write_unlock(&mem_event_rwlock); 233 return ret; 234 } 235 236 void 237 eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start, 238 size_t len) 239 { 240 struct mem_event_callback_entry *entry; 241 242 rte_rwlock_read_lock(&mem_event_rwlock); 243 244 TAILQ_FOREACH(entry, &mem_event_callback_list, next) { 245 EAL_LOG(DEBUG, "Calling mem event callback '%s:%p'", 246 entry->name, entry->arg); 247 entry->clb(event, start, len, entry->arg); 248 } 249 250 rte_rwlock_read_unlock(&mem_event_rwlock); 251 } 252 253 int 254 eal_memalloc_mem_alloc_validator_register(const char *name, 255 rte_mem_alloc_validator_t clb, int socket_id, size_t limit) 256 { 257 struct mem_alloc_validator_entry *entry; 258 int ret, len; 259 if (name == NULL || clb == NULL || socket_id < 0) { 260 rte_errno = EINVAL; 261 return -1; 262 } 263 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 264 if (len == 0) { 265 rte_errno = EINVAL; 266 return -1; 267 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { 268 rte_errno = ENAMETOOLONG; 269 return -1; 270 } 271 rte_rwlock_write_lock(&mem_alloc_validator_rwlock); 272 273 entry = find_mem_alloc_validator(name, socket_id); 274 if (entry != NULL) { 275 rte_errno = EEXIST; 276 ret = -1; 277 goto unlock; 278 } 279 280 entry = malloc(sizeof(*entry)); 281 if (entry == NULL) { 282 rte_errno = ENOMEM; 283 ret = -1; 284 goto unlock; 285 } 286 287 /* callback successfully created and is valid, add it to the list */ 288 entry->clb = clb; 289 entry->socket_id = socket_id; 290 entry->limit = limit; 291 strlcpy(entry->name, name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 292 TAILQ_INSERT_TAIL(&mem_alloc_validator_list, entry, next); 293 294 ret = 0; 295 296 EAL_LOG(DEBUG, "Mem alloc validator '%s' on socket %i with limit %zu registered", 297 name, socket_id, limit); 298 299 unlock: 300 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); 301 return ret; 302 } 303 304 int 305 eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id) 306 { 307 struct mem_alloc_validator_entry *entry; 308 int ret, len; 309 310 if (name == NULL || socket_id < 0) { 311 rte_errno = EINVAL; 312 return -1; 313 } 314 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 315 if (len == 0) { 316 rte_errno = EINVAL; 317 return -1; 318 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { 319 rte_errno = ENAMETOOLONG; 320 return -1; 321 } 322 rte_rwlock_write_lock(&mem_alloc_validator_rwlock); 323 324 entry = find_mem_alloc_validator(name, socket_id); 325 if (entry == NULL) { 326 rte_errno = ENOENT; 327 ret = -1; 328 goto unlock; 329 } 330 TAILQ_REMOVE(&mem_alloc_validator_list, entry, next); 331 free(entry); 332 333 ret = 0; 334 335 EAL_LOG(DEBUG, "Mem alloc validator '%s' on socket %i unregistered", 336 name, socket_id); 337 338 unlock: 339 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); 340 return ret; 341 } 342 343 int 344 eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len) 345 { 346 struct mem_alloc_validator_entry *entry; 347 int ret = 0; 348 349 rte_rwlock_read_lock(&mem_alloc_validator_rwlock); 350 351 TAILQ_FOREACH(entry, &mem_alloc_validator_list, next) { 352 if (entry->socket_id != socket_id || entry->limit > new_len) 353 continue; 354 EAL_LOG(DEBUG, "Calling mem alloc validator '%s' on socket %i", 355 entry->name, entry->socket_id); 356 if (entry->clb(socket_id, entry->limit, new_len) < 0) 357 ret = -1; 358 } 359 360 rte_rwlock_read_unlock(&mem_alloc_validator_rwlock); 361 362 return ret; 363 } 364