1 /* Copyright (C) 2021 Free Software Foundation, Inc. 2 Contributed by Oracle. 3 4 This file is part of GNU Binutils. 5 6 This program is free software; you can redistribute it and/or modify 7 it under the terms of the GNU General Public License as published by 8 the Free Software Foundation; either version 3, or (at your option) 9 any later version. 10 11 This program is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 GNU General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, 51 Franklin Street - Fifth Floor, Boston, 19 MA 02110-1301, USA. */ 20 21 #include "config.h" 22 #include <sys/mman.h> 23 #include <unistd.h> 24 #include <stdlib.h> 25 #include <string.h> 26 #include <errno.h> 27 28 #include "collector.h" 29 #include "libcol_util.h" 30 #include "gp-experiment.h" 31 #include "memmgr.h" 32 33 /* TprintfT(<level>,...) definitions. Adjust per module as needed */ 34 #define DBG_LT0 0 // for high-level configuration, unexpected errors/warnings 35 #define DBG_LT1 1 // for configuration details, warnings 36 #define DBG_LT2 2 37 #define DBG_LT3 3 38 #define DBG_LT4 4 39 40 /* 41 * Memory allocation. 42 * 43 * Heap: 44 * chain[0] - linked list of chunks; 45 * chain[1] - linked list of free 16-byte objects; 46 * chain[2] - linked list of free 32-byte objects; 47 * ... 48 * 49 * Chunk: 50 * 51 * base lo hi 52 * V V V 53 * +------------------+---------+-------------------+--+--+-----+ 54 * | Var size object | -> <-| Const size objects| | |Chunk| 55 * +------------------+---------+-------------------+--+--+-----+ 56 * 57 * Limitations: 58 * - one var size object per chunk 59 * - can't allocate const size objects larger than 2^MAXCHAIN 60 */ 61 62 #define MAXCHAIN 32 63 #define ALIGNMENT 4 /* 2^ALIGNMENT == minimal size and alignment */ 64 #define ALIGN(x) ((((x) - 1)/(1 << ALIGNMENT) + 1) * (1 << ALIGNMENT)) 65 66 struct Heap 67 { 68 collector_mutex_t lock; /* master lock */ 69 void *chain[MAXCHAIN]; /* chain[0] - chunks */ 70 /* chain[i] - structs of size 2^i */ 71 }; 72 73 typedef struct Chunk 74 { 75 size_t size; 76 char *base; 77 char *lo; 78 char *hi; 79 struct Chunk *next; 80 } Chunk; 81 82 static void 83 not_implemented () 84 { 85 __collector_log_write ("<event kind=\"%s\" id=\"%d\">error memmgr not_implemented()</event>\n", 86 SP_JCMD_CERROR, COL_ERROR_NOZMEM); 87 return; 88 } 89 90 /* 91 * void __collector_mmgr_init_mutex_locks( Heap *heap ) 92 * Iinitialize mmgr mutex locks. 93 */ 94 void 95 __collector_mmgr_init_mutex_locks (Heap *heap) 96 { 97 if (heap == NULL) 98 return; 99 if (__collector_mutex_trylock (&heap->lock)) 100 { 101 /* 102 * We are in a child process immediately after the fork(). 103 * Parent process was in the middle of critical section when the fork() happened. 104 * This is a placeholder for the cleanup. 105 * See CR 6997020 for details. 106 */ 107 __collector_mutex_init (&heap->lock); 108 } 109 __collector_mutex_init (&heap->lock); 110 } 111 112 /* 113 * alloc_chunk( unsigned sz ) allocates a chunk of at least sz bytes. 114 * If sz == 0, allocates a chunk of the default size. 115 */ 116 static Chunk * 117 alloc_chunk (unsigned sz, int log) 118 { 119 static long pgsz = 0; 120 char *ptr; 121 Chunk *chnk; 122 size_t chunksz; 123 if (pgsz == 0) 124 { 125 pgsz = CALL_UTIL (sysconf)(_SC_PAGESIZE); 126 Tprintf (DBG_LT2, "memmgr: pgsz = %ld (0x%lx)\n", pgsz, pgsz); 127 } 128 /* Allocate 2^n >= sz bytes */ 129 unsigned nsz = ALIGN (sizeof (Chunk)) + sz; 130 for (chunksz = pgsz; chunksz < nsz; chunksz *= 2); 131 if (log == 1) 132 Tprintf (DBG_LT2, "alloc_chunk mapping %u, rounded up from %u\n", (unsigned int) chunksz, sz); 133 /* mmap64 is only in 32-bits; this call goes to mmap in 64-bits */ 134 ptr = (char*) CALL_UTIL (mmap64)(0, chunksz, PROT_READ | PROT_WRITE, 135 MAP_PRIVATE | MAP_ANON, (int) -1, (off64_t) 0); 136 if (ptr == MAP_FAILED) 137 { 138 Tprintf (0, "alloc_chunk mapping failed COL_ERROR_NOZMEMMAP: %s\n", CALL_UTIL (strerror)(errno)); 139 __collector_log_write ("<event kind=\"%s\" id=\"%d\" ec=\"%d\">%s</event>\n", 140 SP_JCMD_CERROR, COL_ERROR_NOZMEMMAP, errno, "0"); 141 return NULL; 142 } 143 /* Put the chunk descriptor at the end of the chunk */ 144 chnk = (Chunk*) (ptr + chunksz - ALIGN (sizeof (Chunk))); 145 chnk->size = chunksz; 146 chnk->base = ptr; 147 chnk->lo = chnk->base; 148 chnk->hi = (char*) chnk; 149 chnk->next = (Chunk*) NULL; 150 if (log == 1) 151 Tprintf (DBG_LT2, "memmgr: returning new chunk @%p, chunksx=%ld sz=%ld\n", 152 ptr, (long) chunksz, (long) sz); 153 return chnk; 154 } 155 156 Heap * 157 __collector_newHeap () 158 { 159 Heap *heap; 160 Chunk *chnk; 161 Tprintf (DBG_LT2, "__collector_newHeap calling alloc_chunk(0)\n"); 162 chnk = alloc_chunk (0, 1); 163 if (chnk == NULL) 164 return NULL; 165 166 /* A bit of hackery: allocate heap from its own chunk */ 167 chnk->hi -= ALIGN (sizeof (Heap)); 168 heap = (Heap*) chnk->hi; 169 heap->chain[0] = (void*) chnk; 170 __collector_mutex_init (&heap->lock); 171 return heap; 172 } 173 174 void 175 __collector_deleteHeap (Heap *heap) 176 { 177 if (heap == NULL) 178 return; 179 /* Note: heap itself is in the last chunk */ 180 for (Chunk *chnk = heap->chain[0]; chnk;) 181 { 182 Chunk *next = chnk->next; 183 CALL_UTIL (munmap)((void*) chnk->base, chnk->size); 184 chnk = next; 185 } 186 } 187 188 void * 189 __collector_allocCSize (Heap *heap, unsigned sz, int log) 190 { 191 void *res; 192 Chunk *chnk; 193 if (heap == NULL) 194 return NULL; 195 196 /* block all signals and acquire lock */ 197 sigset_t old_mask, new_mask; 198 CALL_UTIL (sigfillset)(&new_mask); 199 CALL_UTIL (sigprocmask)(SIG_SETMASK, &new_mask, &old_mask); 200 __collector_mutex_lock (&heap->lock); 201 202 /* Allocate nsz = 2^idx >= sz bytes */ 203 unsigned idx = ALIGNMENT; 204 unsigned nsz = 1 << idx; 205 while (nsz < sz) 206 nsz = 1 << ++idx; 207 208 /* Look in the corresponding chain first */ 209 if (idx < MAXCHAIN) 210 { 211 if (heap->chain[idx] != NULL) 212 { 213 res = heap->chain[idx]; 214 heap->chain[idx] = *(void**) res; 215 __collector_mutex_unlock (&heap->lock); 216 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 217 if (log == 1) 218 Tprintf (DBG_LT2, "memmgr: allocCSize %p sz %d (0x%x) req = 0x%x, from chain idx = %d\n", res, nsz, nsz, sz, idx); 219 return res; 220 } 221 } 222 else 223 { 224 not_implemented (); 225 __collector_mutex_unlock (&heap->lock); 226 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 227 return NULL; 228 } 229 230 /* Chain is empty, allocate from chunks */ 231 for (chnk = (Chunk*) heap->chain[0]; chnk; chnk = chnk->next) 232 if (chnk->lo + nsz < chnk->hi) 233 break; 234 if (chnk == NULL) 235 { 236 /* Get a new chunk */ 237 if (log == 1) 238 Tprintf (DBG_LT2, "__collector_allocCSize (%u) calling alloc_chunk(%u)\n", sz, nsz); 239 chnk = alloc_chunk (nsz, 1); 240 if (chnk == NULL) 241 { 242 __collector_mutex_unlock (&heap->lock); 243 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 244 return NULL; 245 } 246 chnk->next = (Chunk*) heap->chain[0]; 247 heap->chain[0] = chnk; 248 } 249 250 /* Allocate from the chunk */ 251 chnk->hi -= nsz; 252 res = (void*) chnk->hi; 253 __collector_mutex_unlock (&heap->lock); 254 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 255 if (log == 1) 256 Tprintf (DBG_LT2, "memmgr: allocCSize %p sz %d (0x%x) req = 0x%x, new chunk\n", res, nsz, nsz, sz); 257 return res; 258 } 259 260 void 261 __collector_freeCSize (Heap *heap, void *ptr, unsigned sz) 262 { 263 if (heap == NULL || ptr == NULL) 264 return; 265 266 /* block all signals and acquire lock */ 267 sigset_t old_mask, new_mask; 268 CALL_UTIL (sigfillset)(&new_mask); 269 CALL_UTIL (sigprocmask)(SIG_SETMASK, &new_mask, &old_mask); 270 __collector_mutex_lock (&heap->lock); 271 272 /* Free 2^idx >= sz bytes */ 273 unsigned idx = ALIGNMENT; 274 unsigned nsz = 1 << idx; 275 while (nsz < sz) 276 nsz = 1 << ++idx; 277 if (idx < MAXCHAIN) 278 { 279 *(void**) ptr = heap->chain[idx]; 280 heap->chain[idx] = ptr; 281 } 282 else 283 not_implemented (); 284 __collector_mutex_unlock (&heap->lock); 285 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 286 Tprintf (DBG_LT4, "memmgr: freeC %p sz %ld\n", ptr, (long) sz); 287 } 288 289 static void * 290 allocVSize_nolock (Heap *heap, unsigned sz) 291 { 292 void *res; 293 Chunk *chnk; 294 if (sz == 0) 295 return NULL; 296 297 /* Find a good chunk */ 298 for (chnk = (Chunk*) heap->chain[0]; chnk; chnk = chnk->next) 299 if (chnk->lo == chnk->base && chnk->lo + sz < chnk->hi) 300 break; 301 if (chnk == NULL) 302 { 303 /* Get a new chunk */ 304 Tprintf (DBG_LT2, "allocVsize_nolock calling alloc_chunk(%u)\n", sz); 305 chnk = alloc_chunk (sz, 0); 306 if (chnk == NULL) 307 return NULL; 308 chnk->next = (Chunk*) heap->chain[0]; 309 heap->chain[0] = chnk; 310 } 311 chnk->lo = chnk->base + sz; 312 res = (void*) (chnk->base); 313 Tprintf (DBG_LT4, "memmgr: allocV %p for %ld\n", res, (long) sz); 314 return res; 315 } 316 317 void * 318 __collector_allocVSize (Heap *heap, unsigned sz) 319 { 320 void *res; 321 if (heap == NULL) 322 return NULL; 323 324 /* block all signals and acquire lock */ 325 sigset_t old_mask, new_mask; 326 CALL_UTIL (sigfillset)(&new_mask); 327 CALL_UTIL (sigprocmask)(SIG_SETMASK, &new_mask, &old_mask); 328 __collector_mutex_lock (&heap->lock); 329 res = allocVSize_nolock (heap, sz); 330 __collector_mutex_unlock (&heap->lock); 331 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 332 return res; 333 } 334 335 /* 336 * reallocVSize( Heap *heap, void *ptr, unsigned newsz ) 337 * Changes the size of memory pointed by ptr to newsz. 338 * If ptr == NULL, allocates new memory of size newsz. 339 * If newsz == 0, frees ptr and returns NULL. 340 */ 341 void * 342 __collector_reallocVSize (Heap *heap, void *ptr, unsigned newsz) 343 { 344 Chunk *chnk; 345 void *res; 346 if (heap == NULL) 347 return NULL; 348 if (ptr == NULL) 349 return __collector_allocVSize (heap, newsz); 350 351 /* block all signals and acquire lock */ 352 sigset_t old_mask, new_mask; 353 CALL_UTIL (sigfillset)(&new_mask); 354 CALL_UTIL (sigprocmask)(SIG_SETMASK, &new_mask, &old_mask); 355 __collector_mutex_lock (&heap->lock); 356 357 /* Find its chunk */ 358 for (chnk = (Chunk*) heap->chain[0]; chnk; chnk = chnk->next) 359 if (ptr == chnk->base) 360 break; 361 if (chnk == NULL) 362 { 363 /* memory corrpution */ 364 not_implemented (); 365 __collector_mutex_unlock (&heap->lock); 366 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 367 return NULL; 368 } 369 if (chnk->base + newsz < chnk->hi) 370 { 371 /* easy case */ 372 chnk->lo = chnk->base + newsz; 373 res = newsz ? chnk->base : NULL; 374 __collector_mutex_unlock (&heap->lock); 375 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 376 Tprintf (DBG_LT4, "memmgr: reallocV %p for %ld\n", ptr, (long) newsz); 377 return res; 378 } 379 res = allocVSize_nolock (heap, newsz); 380 /* Copy to new location */ 381 if (res) 382 { 383 int size = chnk->lo - chnk->base; 384 if (newsz < size) 385 size = newsz; 386 char *s1 = (char*) res; 387 char *s2 = chnk->base; 388 while (size--) 389 *s1++ = *s2++; 390 } 391 /* Free old memory*/ 392 chnk->lo = chnk->base; 393 __collector_mutex_unlock (&heap->lock); 394 CALL_UTIL (sigprocmask)(SIG_SETMASK, &old_mask, NULL); 395 return res; 396 } 397