1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017-2018 Intel Corporation 3 */ 4 5 #include <string.h> 6 7 #include <rte_errno.h> 8 #include <rte_fbarray.h> 9 #include <rte_memory.h> 10 #include <rte_string_fns.h> 11 12 #include "eal_private.h" 13 #include "eal_internal_cfg.h" 14 #include "eal_memalloc.h" 15 16 struct mem_event_callback_entry { 17 TAILQ_ENTRY(mem_event_callback_entry) next; 18 char name[RTE_MEM_EVENT_CALLBACK_NAME_LEN]; 19 rte_mem_event_callback_t clb; 20 void *arg; 21 }; 22 23 struct mem_alloc_validator_entry { 24 TAILQ_ENTRY(mem_alloc_validator_entry) next; 25 char name[RTE_MEM_ALLOC_VALIDATOR_NAME_LEN]; 26 rte_mem_alloc_validator_t clb; 27 int socket_id; 28 size_t limit; 29 }; 30 31 /** Double linked list of actions. */ 32 TAILQ_HEAD(mem_event_callback_entry_list, mem_event_callback_entry); 33 TAILQ_HEAD(mem_alloc_validator_entry_list, mem_alloc_validator_entry); 34 35 static struct mem_event_callback_entry_list mem_event_callback_list = 36 TAILQ_HEAD_INITIALIZER(mem_event_callback_list); 37 static rte_rwlock_t mem_event_rwlock = RTE_RWLOCK_INITIALIZER; 38 39 static struct mem_alloc_validator_entry_list mem_alloc_validator_list = 40 TAILQ_HEAD_INITIALIZER(mem_alloc_validator_list); 41 static rte_rwlock_t mem_alloc_validator_rwlock = RTE_RWLOCK_INITIALIZER; 42 43 static struct mem_event_callback_entry * 44 find_mem_event_callback(const char *name, void *arg) 45 { 46 struct mem_event_callback_entry *r; 47 48 TAILQ_FOREACH(r, &mem_event_callback_list, next) { 49 if (!strcmp(r->name, name) && r->arg == arg) 50 break; 51 } 52 return r; 53 } 54 55 static struct mem_alloc_validator_entry * 56 find_mem_alloc_validator(const char *name, int socket_id) 57 { 58 struct mem_alloc_validator_entry *r; 59 60 TAILQ_FOREACH(r, &mem_alloc_validator_list, next) { 61 if (!strcmp(r->name, name) && r->socket_id == socket_id) 62 break; 63 } 64 return r; 65 } 66 67 bool 68 eal_memalloc_is_contig(const struct rte_memseg_list *msl, void *start, 69 size_t len) 70 { 71 void *end, *aligned_start, *aligned_end; 72 size_t pgsz = (size_t)msl->page_sz; 73 const struct rte_memseg *ms; 74 const struct internal_config *internal_conf = 75 eal_get_internal_configuration(); 76 77 /* for IOVA_VA, it's always contiguous */ 78 if (rte_eal_iova_mode() == RTE_IOVA_VA && !msl->external) 79 return true; 80 81 /* for legacy memory, it's always contiguous */ 82 if (internal_conf->legacy_mem) 83 return true; 84 85 end = RTE_PTR_ADD(start, len); 86 87 /* for nohuge, we check pagemap, otherwise check memseg */ 88 if (!rte_eal_has_hugepages()) { 89 rte_iova_t cur, expected; 90 91 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); 92 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); 93 94 /* if start and end are on the same page, bail out early */ 95 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) 96 return true; 97 98 /* skip first iteration */ 99 cur = rte_mem_virt2iova(aligned_start); 100 expected = cur + pgsz; 101 aligned_start = RTE_PTR_ADD(aligned_start, pgsz); 102 103 while (aligned_start < aligned_end) { 104 cur = rte_mem_virt2iova(aligned_start); 105 if (cur != expected) 106 return false; 107 aligned_start = RTE_PTR_ADD(aligned_start, pgsz); 108 expected += pgsz; 109 } 110 } else { 111 int start_seg, end_seg, cur_seg; 112 rte_iova_t cur, expected; 113 114 aligned_start = RTE_PTR_ALIGN_FLOOR(start, pgsz); 115 aligned_end = RTE_PTR_ALIGN_CEIL(end, pgsz); 116 117 start_seg = RTE_PTR_DIFF(aligned_start, msl->base_va) / 118 pgsz; 119 end_seg = RTE_PTR_DIFF(aligned_end, msl->base_va) / 120 pgsz; 121 122 /* if start and end are on the same page, bail out early */ 123 if (RTE_PTR_DIFF(aligned_end, aligned_start) == pgsz) 124 return true; 125 126 /* skip first iteration */ 127 ms = rte_fbarray_get(&msl->memseg_arr, start_seg); 128 cur = ms->iova; 129 expected = cur + pgsz; 130 131 /* if we can't access IOVA addresses, assume non-contiguous */ 132 if (cur == RTE_BAD_IOVA) 133 return false; 134 135 for (cur_seg = start_seg + 1; cur_seg < end_seg; 136 cur_seg++, expected += pgsz) { 137 ms = rte_fbarray_get(&msl->memseg_arr, cur_seg); 138 139 if (ms->iova != expected) 140 return false; 141 } 142 } 143 return true; 144 } 145 146 int 147 eal_memalloc_mem_event_callback_register(const char *name, 148 rte_mem_event_callback_t clb, void *arg) 149 { 150 struct mem_event_callback_entry *entry; 151 int ret, len; 152 if (name == NULL || clb == NULL) { 153 rte_errno = EINVAL; 154 return -1; 155 } 156 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 157 if (len == 0) { 158 rte_errno = EINVAL; 159 return -1; 160 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { 161 rte_errno = ENAMETOOLONG; 162 return -1; 163 } 164 rte_rwlock_write_lock(&mem_event_rwlock); 165 166 entry = find_mem_event_callback(name, arg); 167 if (entry != NULL) { 168 rte_errno = EEXIST; 169 ret = -1; 170 goto unlock; 171 } 172 173 entry = malloc(sizeof(*entry)); 174 if (entry == NULL) { 175 rte_errno = ENOMEM; 176 ret = -1; 177 goto unlock; 178 } 179 180 /* callback successfully created and is valid, add it to the list */ 181 entry->clb = clb; 182 entry->arg = arg; 183 strlcpy(entry->name, name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 184 TAILQ_INSERT_TAIL(&mem_event_callback_list, entry, next); 185 186 ret = 0; 187 188 RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' registered\n", 189 name, arg); 190 191 unlock: 192 rte_rwlock_write_unlock(&mem_event_rwlock); 193 return ret; 194 } 195 196 int 197 eal_memalloc_mem_event_callback_unregister(const char *name, void *arg) 198 { 199 struct mem_event_callback_entry *entry; 200 int ret, len; 201 202 if (name == NULL) { 203 rte_errno = EINVAL; 204 return -1; 205 } 206 len = strnlen(name, RTE_MEM_EVENT_CALLBACK_NAME_LEN); 207 if (len == 0) { 208 rte_errno = EINVAL; 209 return -1; 210 } else if (len == RTE_MEM_EVENT_CALLBACK_NAME_LEN) { 211 rte_errno = ENAMETOOLONG; 212 return -1; 213 } 214 rte_rwlock_write_lock(&mem_event_rwlock); 215 216 entry = find_mem_event_callback(name, arg); 217 if (entry == NULL) { 218 rte_errno = ENOENT; 219 ret = -1; 220 goto unlock; 221 } 222 TAILQ_REMOVE(&mem_event_callback_list, entry, next); 223 free(entry); 224 225 ret = 0; 226 227 RTE_LOG(DEBUG, EAL, "Mem event callback '%s:%p' unregistered\n", 228 name, arg); 229 230 unlock: 231 rte_rwlock_write_unlock(&mem_event_rwlock); 232 return ret; 233 } 234 235 void 236 eal_memalloc_mem_event_notify(enum rte_mem_event event, const void *start, 237 size_t len) 238 { 239 struct mem_event_callback_entry *entry; 240 241 rte_rwlock_read_lock(&mem_event_rwlock); 242 243 TAILQ_FOREACH(entry, &mem_event_callback_list, next) { 244 RTE_LOG(DEBUG, EAL, "Calling mem event callback '%s:%p'\n", 245 entry->name, entry->arg); 246 entry->clb(event, start, len, entry->arg); 247 } 248 249 rte_rwlock_read_unlock(&mem_event_rwlock); 250 } 251 252 int 253 eal_memalloc_mem_alloc_validator_register(const char *name, 254 rte_mem_alloc_validator_t clb, int socket_id, size_t limit) 255 { 256 struct mem_alloc_validator_entry *entry; 257 int ret, len; 258 if (name == NULL || clb == NULL || socket_id < 0) { 259 rte_errno = EINVAL; 260 return -1; 261 } 262 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 263 if (len == 0) { 264 rte_errno = EINVAL; 265 return -1; 266 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { 267 rte_errno = ENAMETOOLONG; 268 return -1; 269 } 270 rte_rwlock_write_lock(&mem_alloc_validator_rwlock); 271 272 entry = find_mem_alloc_validator(name, socket_id); 273 if (entry != NULL) { 274 rte_errno = EEXIST; 275 ret = -1; 276 goto unlock; 277 } 278 279 entry = malloc(sizeof(*entry)); 280 if (entry == NULL) { 281 rte_errno = ENOMEM; 282 ret = -1; 283 goto unlock; 284 } 285 286 /* callback successfully created and is valid, add it to the list */ 287 entry->clb = clb; 288 entry->socket_id = socket_id; 289 entry->limit = limit; 290 strlcpy(entry->name, name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 291 TAILQ_INSERT_TAIL(&mem_alloc_validator_list, entry, next); 292 293 ret = 0; 294 295 RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i with limit %zu registered\n", 296 name, socket_id, limit); 297 298 unlock: 299 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); 300 return ret; 301 } 302 303 int 304 eal_memalloc_mem_alloc_validator_unregister(const char *name, int socket_id) 305 { 306 struct mem_alloc_validator_entry *entry; 307 int ret, len; 308 309 if (name == NULL || socket_id < 0) { 310 rte_errno = EINVAL; 311 return -1; 312 } 313 len = strnlen(name, RTE_MEM_ALLOC_VALIDATOR_NAME_LEN); 314 if (len == 0) { 315 rte_errno = EINVAL; 316 return -1; 317 } else if (len == RTE_MEM_ALLOC_VALIDATOR_NAME_LEN) { 318 rte_errno = ENAMETOOLONG; 319 return -1; 320 } 321 rte_rwlock_write_lock(&mem_alloc_validator_rwlock); 322 323 entry = find_mem_alloc_validator(name, socket_id); 324 if (entry == NULL) { 325 rte_errno = ENOENT; 326 ret = -1; 327 goto unlock; 328 } 329 TAILQ_REMOVE(&mem_alloc_validator_list, entry, next); 330 free(entry); 331 332 ret = 0; 333 334 RTE_LOG(DEBUG, EAL, "Mem alloc validator '%s' on socket %i unregistered\n", 335 name, socket_id); 336 337 unlock: 338 rte_rwlock_write_unlock(&mem_alloc_validator_rwlock); 339 return ret; 340 } 341 342 int 343 eal_memalloc_mem_alloc_validate(int socket_id, size_t new_len) 344 { 345 struct mem_alloc_validator_entry *entry; 346 int ret = 0; 347 348 rte_rwlock_read_lock(&mem_alloc_validator_rwlock); 349 350 TAILQ_FOREACH(entry, &mem_alloc_validator_list, next) { 351 if (entry->socket_id != socket_id || entry->limit > new_len) 352 continue; 353 RTE_LOG(DEBUG, EAL, "Calling mem alloc validator '%s' on socket %i\n", 354 entry->name, entry->socket_id); 355 if (entry->clb(socket_id, entry->limit, new_len) < 0) 356 ret = -1; 357 } 358 359 rte_rwlock_read_unlock(&mem_alloc_validator_rwlock); 360 361 return ret; 362 } 363