1 /* Copyright (C) 1993, 1996, 1997, 1998, 1999 Aladdin Enterprises. All rights reserved. 2 3 This file is part of AFPL Ghostscript. 4 5 AFPL Ghostscript is distributed with NO WARRANTY OF ANY KIND. No author or 6 distributor accepts any responsibility for the consequences of using it, or 7 for whether it serves any particular purpose or works at all, unless he or 8 she says so in writing. Refer to the Aladdin Free Public License (the 9 "License") for full details. 10 11 Every copy of AFPL Ghostscript must include a copy of the License, normally 12 in a plain ASCII text file named PUBLIC. The License grants you the right 13 to copy, modify and redistribute AFPL Ghostscript, but only under certain 14 conditions described in the License. Among other things, the License 15 requires that the copyright notice and this notice be preserved on all 16 copies. 17 */ 18 19 /*$Id: igc.c,v 1.5 2001/10/12 21:37:08 ghostgum Exp $ */ 20 /* Garbage collector for Ghostscript */ 21 #include "memory_.h" 22 #include "ghost.h" 23 #include "errors.h" 24 #include "gsexit.h" 25 #include "gsmdebug.h" 26 #include "gsstruct.h" 27 #include "gsutil.h" 28 #include "iastate.h" 29 #include "isave.h" 30 #include "isstate.h" 31 #include "idict.h" 32 #include "ipacked.h" 33 #include "istruct.h" 34 #include "igc.h" 35 #include "igcstr.h" 36 #include "inamedef.h" 37 #include "opdef.h" /* for marking oparray names */ 38 39 /* Define whether to force all garbage collections to be global. */ 40 private const bool I_FORCE_GLOBAL_GC = false; 41 42 /* Define whether to bypass the collector entirely. */ 43 private const bool I_BYPASS_GC = false; 44 45 /* Avoid including all of iname.h. */ 46 extern name_table *the_gs_name_table; 47 48 /* Define an entry on the mark stack. */ 49 typedef struct { 50 void *ptr; 51 uint index; 52 bool is_refs; 53 } ms_entry; 54 55 /* Define (a segment of) the mark stack. */ 56 /* entries[0] has ptr = 0 to indicate the bottom of the stack. */ 57 /* count additional entries follow this structure. */ 58 typedef struct gc_mark_stack_s gc_mark_stack; 59 struct gc_mark_stack_s { 60 gc_mark_stack *prev; 61 gc_mark_stack *next; 62 uint count; 63 bool on_heap; /* if true, allocated during GC */ 64 ms_entry entries[1]; 65 }; 66 67 /* Define the mark stack sizing parameters. */ 68 #define ms_size_default 100 /* default, allocated on C stack */ 69 /* This should probably be defined as a parameter somewhere.... */ 70 #define ms_size_desired /* for additional allocation */\ 71 ((max_ushort - sizeof(gc_mark_stack)) / sizeof(ms_entry) - 10) 72 #define ms_size_min 50 /* min size for segment in free block */ 73 74 /* Forward references */ 75 private void gc_init_mark_stack(P2(gc_mark_stack *, uint)); 76 private void gc_objects_clear_marks(P1(chunk_t *)); 77 private void gc_unmark_names(P1(name_table *)); 78 private int gc_trace(P3(gs_gc_root_t *, gc_state_t *, gc_mark_stack *)); 79 private int gc_rescan_chunk(P3(chunk_t *, gc_state_t *, gc_mark_stack *)); 80 private int gc_trace_chunk(P3(chunk_t *, gc_state_t *, gc_mark_stack *)); 81 private bool gc_trace_finish(P1(gc_state_t *)); 82 private void gc_clear_reloc(P1(chunk_t *)); 83 private void gc_objects_set_reloc(P1(chunk_t *)); 84 private void gc_do_reloc(P3(chunk_t *, gs_ref_memory_t *, gc_state_t *)); 85 private void gc_objects_compact(P2(chunk_t *, gc_state_t *)); 86 private void gc_free_empty_chunks(P1(gs_ref_memory_t *)); 87 88 /* Forward references for pointer types */ 89 private ptr_proc_unmark(ptr_struct_unmark); 90 private ptr_proc_mark(ptr_struct_mark); 91 private ptr_proc_unmark(ptr_string_unmark); 92 private ptr_proc_mark(ptr_string_mark); 93 /*ptr_proc_unmark(ptr_ref_unmark); *//* in igc.h */ 94 /*ptr_proc_mark(ptr_ref_mark); *//* in igc.h */ 95 private ptr_proc_reloc(igc_reloc_struct_ptr, void); 96 97 ptr_proc_reloc(igc_reloc_ref_ptr, ref_packed); /* in igcref.c */ 98 refs_proc_reloc(igc_reloc_refs); /* in igcref.c */ 99 100 /* Define this GC's procedure vector. */ 101 private const gc_procs_with_refs_t igc_procs = { 102 igc_reloc_struct_ptr, igc_reloc_string, igc_reloc_const_string, 103 igc_reloc_ref_ptr, igc_reloc_refs 104 }; 105 106 /* Pointer type descriptors. */ 107 /* Note that the trace/mark routine has special knowledge of ptr_ref_type */ 108 /* and ptr_struct_type -- it assumes that no other types have embedded */ 109 /* pointers. Note also that the reloc procedures for string and ref */ 110 /* pointers are never called. */ 111 typedef ptr_proc_reloc((*ptr_proc_reloc_t), void); 112 const gs_ptr_procs_t ptr_struct_procs = 113 {ptr_struct_unmark, ptr_struct_mark, (ptr_proc_reloc_t) igc_reloc_struct_ptr}; 114 const gs_ptr_procs_t ptr_string_procs = 115 {ptr_string_unmark, ptr_string_mark, NULL}; 116 const gs_ptr_procs_t ptr_const_string_procs = 117 {ptr_string_unmark, ptr_string_mark, NULL}; 118 const gs_ptr_procs_t ptr_ref_procs = 119 {ptr_ref_unmark, ptr_ref_mark, NULL}; 120 121 /* ------ Main program ------ */ 122 123 /* Top level of garbage collector. */ 124 #ifdef DEBUG 125 private void 126 end_phase(const char *str) 127 { 128 if (gs_debug_c('6')) { 129 dlprintf1("[6]---------------- end %s ----------------\n", 130 (const char *)str); 131 dflush(); 132 } 133 } 134 static const char *const depth_dots_string = ".........."; 135 private const char * 136 depth_dots(const ms_entry * sp, const gc_mark_stack * pms) 137 { 138 int depth = sp - pms->entries - 1; 139 const gc_mark_stack *pss = pms; 140 141 while ((pss = pss->prev) != 0) 142 depth += pss->count - 1; 143 return depth_dots_string + (depth >= 10 ? 0 : 10 - depth); 144 } 145 private void 146 gc_validate_spaces(gs_ref_memory_t **spaces, int max_space, gc_state_t *gcst) 147 { 148 int i; 149 gs_ref_memory_t *mem; 150 151 for (i = 1; i <= max_space; ++i) 152 if ((mem = spaces[i]) != 0) 153 ialloc_validate_memory(mem, gcst); 154 } 155 #else /* !DEBUG */ 156 # define end_phase(str) DO_NOTHING 157 #endif /* DEBUG */ 158 void 159 gs_gc_reclaim(vm_spaces * pspaces, bool global) 160 { 161 #define nspaces ((i_vm_max + 1) * 2) /* * 2 for stable allocators */ 162 163 vm_spaces spaces; 164 gs_ref_memory_t *space_memories[nspaces]; 165 gs_gc_root_t space_roots[nspaces]; 166 int max_trace; /* max space_ to trace */ 167 int min_collect; /* min space_ to collect */ 168 int min_collect_vm_space; /* min VM space to collect */ 169 int ispace; 170 gs_ref_memory_t *mem; 171 chunk_t *cp; 172 gs_gc_root_t *rp; 173 gc_state_t state; 174 struct _msd { 175 gc_mark_stack stack; 176 ms_entry body[ms_size_default]; 177 } ms_default; 178 gc_mark_stack *mark_stack = &ms_default.stack; 179 180 /* Optionally force global GC for debugging. */ 181 182 if (I_FORCE_GLOBAL_GC) 183 global = true; 184 185 /* Determine which spaces we are tracing and collecting. */ 186 187 spaces = *pspaces; 188 space_memories[1] = space_system; 189 space_memories[2] = space_global; 190 min_collect = max_trace = 2; 191 min_collect_vm_space = i_vm_global; 192 if (space_global->stable_memory != (gs_memory_t *)space_global) 193 space_memories[++max_trace] = 194 (gs_ref_memory_t *)space_global->stable_memory; 195 if (space_global != space_local) { 196 space_memories[++max_trace] = space_local; 197 min_collect = max_trace; 198 min_collect_vm_space = i_vm_local; 199 if (space_local->stable_memory != (gs_memory_t *)space_local) 200 space_memories[++max_trace] = 201 (gs_ref_memory_t *)space_local->stable_memory; 202 } 203 if (global) 204 min_collect = min_collect_vm_space = 1; 205 206 #define for_spaces(i, n)\ 207 for (i = 1; i <= n; ++i) 208 #define for_collected_spaces(i)\ 209 for (i = min_collect; i <= max_trace; ++i) 210 #define for_space_mems(i, mem)\ 211 for (mem = space_memories[i]; mem != 0; mem = &mem->saved->state) 212 #define for_mem_chunks(mem, cp)\ 213 for (cp = (mem)->cfirst; cp != 0; cp = cp->cnext) 214 #define for_space_chunks(i, mem, cp)\ 215 for_space_mems(i, mem) for_mem_chunks(mem, cp) 216 #define for_chunks(n, mem, cp)\ 217 for_spaces(ispace, n) for_space_chunks(ispace, mem, cp) 218 #define for_collected_chunks(mem, cp)\ 219 for_collected_spaces(ispace) for_space_chunks(ispace, mem, cp) 220 #define for_roots(n, mem, rp)\ 221 for_spaces(ispace, n)\ 222 for (mem = space_memories[ispace], rp = mem->roots; rp != 0; rp = rp->next) 223 224 /* Initialize the state. */ 225 226 state.procs = &igc_procs; 227 state.loc.memory = space_global; /* any one will do */ 228 229 state.loc.cp = 0; 230 state.spaces = spaces; 231 state.min_collect = min_collect_vm_space << r_space_shift; 232 state.relocating_untraced = false; 233 state.heap = state.loc.memory->parent; 234 state.ntable = the_gs_name_table; 235 236 /* Register the allocators themselves as roots, */ 237 /* so we mark and relocate the change and save lists properly. */ 238 239 for_spaces(ispace, max_trace) 240 gs_register_struct_root((gs_memory_t *)space_memories[ispace], 241 &space_roots[ispace], 242 (void **)&space_memories[ispace], 243 "gc_top_level"); 244 245 end_phase("register space roots"); 246 247 #ifdef DEBUG 248 249 /* Pre-validate the state. This shouldn't be necessary.... */ 250 251 gc_validate_spaces(space_memories, max_trace, &state); 252 253 end_phase("pre-validate pointers"); 254 255 #endif 256 257 if (I_BYPASS_GC) { /* Don't collect at all. */ 258 goto no_collect; 259 } 260 261 /* Clear marks in spaces to be collected. */ 262 263 for_collected_spaces(ispace) 264 for_space_chunks(ispace, mem, cp) { 265 gc_objects_clear_marks(cp); 266 gc_strings_set_marks(cp, false); 267 } 268 269 end_phase("clear chunk marks"); 270 271 /* Clear the marks of roots. We must do this explicitly, */ 272 /* since some roots are not in any chunk. */ 273 274 for_roots(max_trace, mem, rp) { 275 enum_ptr_t eptr; 276 277 eptr.ptr = *rp->p; 278 if_debug_root('6', "[6]unmarking root", rp); 279 (*rp->ptype->unmark)(&eptr, &state); 280 } 281 282 end_phase("clear root marks"); 283 284 if (global) 285 gc_unmark_names(state.ntable); 286 287 /* Initialize the (default) mark stack. */ 288 289 gc_init_mark_stack(&ms_default.stack, ms_size_default); 290 ms_default.stack.prev = 0; 291 ms_default.stack.on_heap = false; 292 293 /* Add all large-enough free blocks to the mark stack. */ 294 /* Also initialize the rescan pointers. */ 295 296 { 297 gc_mark_stack *end = mark_stack; 298 299 for_chunks(max_trace, mem, cp) { 300 uint avail = cp->ctop - cp->cbot; 301 302 if (avail >= sizeof(gc_mark_stack) + sizeof(ms_entry) * 303 ms_size_min && 304 !cp->inner_count 305 ) { 306 gc_mark_stack *pms = (gc_mark_stack *) cp->cbot; 307 308 gc_init_mark_stack(pms, (avail - sizeof(gc_mark_stack)) / 309 sizeof(ms_entry)); 310 end->next = pms; 311 pms->prev = end; 312 pms->on_heap = false; 313 if_debug2('6', "[6]adding free 0x%lx(%u) to mark stack\n", 314 (ulong) pms, pms->count); 315 } 316 cp->rescan_bot = cp->cend; 317 cp->rescan_top = cp->cbase; 318 } 319 } 320 321 /* Mark reachable objects. */ 322 323 { 324 int more = 0; 325 326 /* Mark from roots. */ 327 328 for_roots(max_trace, mem, rp) { 329 if_debug_root('6', "[6]marking root", rp); 330 more |= gc_trace(rp, &state, mark_stack); 331 } 332 333 end_phase("mark"); 334 335 /* If this is a local GC, mark from non-local chunks. */ 336 337 if (!global) 338 for_chunks(min_collect - 1, mem, cp) 339 more |= gc_trace_chunk(cp, &state, mark_stack); 340 341 /* Handle mark stack overflow. */ 342 343 while (more < 0) { /* stack overflowed */ 344 more = 0; 345 for_chunks(max_trace, mem, cp) 346 more |= gc_rescan_chunk(cp, &state, mark_stack); 347 } 348 349 end_phase("mark overflow"); 350 } 351 352 /* Free the mark stack. */ 353 354 { 355 gc_mark_stack *pms = mark_stack; 356 357 while (pms->next) 358 pms = pms->next; 359 while (pms) { 360 gc_mark_stack *prev = pms->prev; 361 362 if (pms->on_heap) 363 gs_free_object(state.heap, pms, "gc mark stack"); 364 else 365 gs_alloc_fill(pms, gs_alloc_fill_free, 366 sizeof(*pms) + sizeof(ms_entry) * pms->count); 367 pms = prev; 368 } 369 } 370 371 end_phase("free mark stack"); 372 373 if (global) { 374 gc_trace_finish(&state); 375 names_trace_finish(state.ntable, &state); 376 377 end_phase("finish trace"); 378 } 379 /* Clear marks and relocation in spaces that are only being traced. */ 380 /* We have to clear the marks first, because we want the */ 381 /* relocation to wind up as o_untraced, not o_unmarked. */ 382 383 for_chunks(min_collect - 1, mem, cp) 384 gc_objects_clear_marks(cp); 385 386 end_phase("post-clear marks"); 387 388 for_chunks(min_collect - 1, mem, cp) 389 gc_clear_reloc(cp); 390 391 end_phase("clear reloc"); 392 393 /* Set the relocation of roots outside any chunk to o_untraced, */ 394 /* so we won't try to relocate pointers to them. */ 395 /* (Currently, there aren't any.) */ 396 397 /* Disable freeing in the allocators of the spaces we are */ 398 /* collecting, so finalization procedures won't cause problems. */ 399 { 400 int i; 401 402 for_collected_spaces(i) 403 gs_enable_free((gs_memory_t *)space_memories[i], false); 404 } 405 406 /* Compute relocation based on marks, in the spaces */ 407 /* we are going to compact. Also finalize freed objects. */ 408 409 for_collected_chunks(mem, cp) { 410 gc_objects_set_reloc(cp); 411 gc_strings_set_reloc(cp); 412 } 413 414 /* Re-enable freeing. */ 415 { 416 int i; 417 418 for_collected_spaces(i) 419 gs_enable_free((gs_memory_t *)space_memories[i], true); 420 } 421 422 end_phase("set reloc"); 423 424 /* Relocate pointers. */ 425 426 state.relocating_untraced = true; 427 for_chunks(min_collect - 1, mem, cp) 428 gc_do_reloc(cp, mem, &state); 429 state.relocating_untraced = false; 430 for_collected_chunks(mem, cp) 431 gc_do_reloc(cp, mem, &state); 432 433 end_phase("relocate chunks"); 434 435 for_roots(max_trace, mem, rp) { 436 if_debug3('6', "[6]relocating root 0x%lx: 0x%lx -> 0x%lx\n", 437 (ulong) rp, (ulong) rp->p, (ulong) * rp->p); 438 if (rp->ptype == ptr_ref_type) { 439 ref *pref = (ref *) * rp->p; 440 441 igc_reloc_refs((ref_packed *) pref, 442 (ref_packed *) (pref + 1), 443 &state); 444 } else 445 *rp->p = (*rp->ptype->reloc) (*rp->p, &state); 446 if_debug3('6', "[6]relocated root 0x%lx: 0x%lx -> 0x%lx\n", 447 (ulong) rp, (ulong) rp->p, (ulong) * rp->p); 448 } 449 450 end_phase("relocate roots"); 451 452 /* Compact data. We only do this for spaces we are collecting. */ 453 454 for_collected_spaces(ispace) { 455 for_space_mems(ispace, mem) { 456 for_mem_chunks(mem, cp) { 457 if_debug_chunk('6', "[6]compacting chunk", cp); 458 gc_objects_compact(cp, &state); 459 gc_strings_compact(cp); 460 if_debug_chunk('6', "[6]after compaction:", cp); 461 if (mem->pcc == cp) 462 mem->cc = *cp; 463 } 464 mem->saved = mem->reloc_saved; 465 ialloc_reset_free(mem); 466 } 467 } 468 469 end_phase("compact"); 470 471 /* Free empty chunks. */ 472 473 for_collected_spaces(ispace) { 474 for_space_mems(ispace, mem) { 475 gc_free_empty_chunks(mem); 476 } 477 } 478 479 end_phase("free empty chunks"); 480 481 /* 482 * Update previous_status to reflect any freed chunks, 483 * and set inherited to the negative of allocated, 484 * so it has no effect. We must update previous_status by 485 * working back-to-front along the save chain, using pointer reversal. 486 * (We could update inherited in any order, since it only uses 487 * information local to the individual save level.) 488 */ 489 490 for_collected_spaces(ispace) { /* Reverse the pointers. */ 491 alloc_save_t *curr; 492 alloc_save_t *prev = 0; 493 alloc_save_t *next; 494 gs_memory_status_t total; 495 496 for (curr = space_memories[ispace]->saved; curr != 0; 497 prev = curr, curr = next 498 ) { 499 next = curr->state.saved; 500 curr->state.saved = prev; 501 } 502 /* Now work the other way, accumulating the values. */ 503 total.allocated = 0, total.used = 0; 504 for (curr = prev, prev = 0; curr != 0; 505 prev = curr, curr = next 506 ) { 507 mem = &curr->state; 508 next = mem->saved; 509 mem->saved = prev; 510 mem->previous_status = total; 511 if_debug3('6', 512 "[6]0x%lx previous allocated=%lu, used=%lu\n", 513 (ulong) mem, total.allocated, total.used); 514 gs_memory_status((gs_memory_t *) mem, &total); 515 mem->gc_allocated = mem->allocated + total.allocated; 516 mem->inherited = -mem->allocated; 517 } 518 mem = space_memories[ispace]; 519 mem->previous_status = total; 520 mem->gc_allocated = mem->allocated + total.allocated; 521 if_debug3('6', "[6]0x%lx previous allocated=%lu, used=%lu\n", 522 (ulong) mem, total.allocated, total.used); 523 } 524 525 end_phase("update stats"); 526 527 no_collect: 528 529 /* Unregister the allocator roots. */ 530 531 for_spaces(ispace, max_trace) 532 gs_unregister_root((gs_memory_t *)space_memories[ispace], 533 &space_roots[ispace], "gc_top_level"); 534 535 end_phase("unregister space roots"); 536 537 #ifdef DEBUG 538 539 /* Validate the state. This shouldn't be necessary.... */ 540 541 gc_validate_spaces(space_memories, max_trace, &state); 542 543 end_phase("validate pointers"); 544 545 #endif 546 } 547 548 /* ------ Debugging utilities ------ */ 549 550 /* Validate a pointer to an object header. */ 551 #ifdef DEBUG 552 # define debug_check_object(pre, cp, gcst)\ 553 ialloc_validate_object((pre) + 1, cp, gcst) 554 #else 555 # define debug_check_object(pre, cp, gcst) DO_NOTHING 556 #endif 557 558 /* ------ Unmarking phase ------ */ 559 560 /* Unmark a single struct. */ 561 private void 562 ptr_struct_unmark(enum_ptr_t *pep, gc_state_t * ignored) 563 { 564 void *const vptr = (void *)pep->ptr; /* break const */ 565 566 if (vptr != 0) 567 o_set_unmarked(((obj_header_t *) vptr - 1)); 568 } 569 570 /* Unmark a single string. */ 571 private void 572 ptr_string_unmark(enum_ptr_t *pep, gc_state_t * gcst) 573 { 574 discard(gc_string_mark(pep->ptr, pep->size, false, gcst)); 575 } 576 577 /* Unmark the objects in a chunk. */ 578 private void 579 gc_objects_clear_marks(chunk_t * cp) 580 { 581 if_debug_chunk('6', "[6]unmarking chunk", cp); 582 SCAN_CHUNK_OBJECTS(cp) 583 DO_ALL 584 struct_proc_clear_marks((*proc)) = 585 pre->o_type->clear_marks; 586 #ifdef DEBUG 587 if (pre->o_type != &st_free) 588 debug_check_object(pre, cp, NULL); 589 #endif 590 if_debug3('7', " [7](un)marking %s(%lu) 0x%lx\n", 591 struct_type_name_string(pre->o_type), 592 (ulong) size, (ulong) pre); 593 o_set_unmarked(pre); 594 if (proc != 0) 595 (*proc) (pre + 1, size, pre->o_type); 596 END_OBJECTS_SCAN 597 } 598 599 /* Mark 0- and 1-character names, and those referenced from the */ 600 /* op_array_nx_table, and unmark all the rest. */ 601 private void 602 gc_unmark_names(name_table * nt) 603 { 604 uint i; 605 606 names_unmark_all(nt); 607 for (i = 0; i < op_array_table_global.count; i++) { 608 name_index_t nidx = op_array_table_global.nx_table[i]; 609 610 names_mark_index(nt, nidx); 611 } 612 for (i = 0; i < op_array_table_local.count; i++) { 613 name_index_t nidx = op_array_table_local.nx_table[i]; 614 615 names_mark_index(nt, nidx); 616 } 617 } 618 619 /* ------ Marking phase ------ */ 620 621 /* Initialize (a segment of) the mark stack. */ 622 private void 623 gc_init_mark_stack(gc_mark_stack * pms, uint count) 624 { 625 pms->next = 0; 626 pms->count = count; 627 pms->entries[0].ptr = 0; 628 pms->entries[0].index = 0; 629 pms->entries[0].is_refs = false; 630 } 631 632 /* Mark starting from all marked objects in the interval of a chunk */ 633 /* needing rescanning. */ 634 private int 635 gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) 636 { 637 byte *sbot = cp->rescan_bot; 638 byte *stop = cp->rescan_top; 639 gs_gc_root_t root; 640 void *comp; 641 int more = 0; 642 643 if (sbot > stop) 644 return 0; 645 root.p = ∁ 646 if_debug_chunk('6', "[6]rescanning chunk", cp); 647 cp->rescan_bot = cp->cend; 648 cp->rescan_top = cp->cbase; 649 SCAN_CHUNK_OBJECTS(cp) 650 DO_ALL 651 if ((byte *) (pre + 1) + size < sbot); 652 else if ((byte *) (pre + 1) > stop) 653 return more; /* 'break' won't work here */ 654 else { 655 if_debug2('7', " [7]scanning/marking 0x%lx(%lu)\n", 656 (ulong) pre, (ulong) size); 657 if (pre->o_type == &st_refs) { 658 ref_packed *rp = (ref_packed *) (pre + 1); 659 char *end = (char *)rp + size; 660 661 root.ptype = ptr_ref_type; 662 while ((char *)rp < end) { 663 comp = rp; 664 if (r_is_packed(rp)) { 665 if (r_has_pmark(rp)) { 666 r_clear_pmark(rp); 667 more |= gc_trace(&root, pstate, 668 pmstack); 669 } 670 rp++; 671 } else { 672 ref *const pref = (ref *)rp; 673 674 if (r_has_attr(pref, l_mark)) { 675 r_clear_attrs(pref, l_mark); 676 more |= gc_trace(&root, pstate, pmstack); 677 } 678 rp += packed_per_ref; 679 } 680 } 681 } else if (!o_is_unmarked(pre)) { 682 struct_proc_clear_marks((*proc)) = 683 pre->o_type->clear_marks; 684 root.ptype = ptr_struct_type; 685 comp = pre + 1; 686 if (!o_is_untraced(pre)) 687 o_set_unmarked(pre); 688 if (proc != 0) 689 (*proc) (comp, size, pre->o_type); 690 more |= gc_trace(&root, pstate, pmstack); 691 } 692 } 693 END_OBJECTS_SCAN 694 return more; 695 } 696 697 /* Mark starting from all the objects in a chunk. */ 698 /* We assume that pstate->min_collect > avm_system, */ 699 /* so we don't have to trace names. */ 700 private int 701 gc_trace_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) 702 { 703 gs_gc_root_t root; 704 void *comp; 705 int more = 0; 706 int min_trace = pstate->min_collect; 707 708 root.p = ∁ 709 if_debug_chunk('6', "[6]marking from chunk", cp); 710 SCAN_CHUNK_OBJECTS(cp) 711 DO_ALL 712 { 713 if_debug2('7', " [7]scanning/marking 0x%lx(%lu)\n", 714 (ulong) pre, (ulong) size); 715 if (pre->o_type == &st_refs) { 716 ref_packed *rp = (ref_packed *) (pre + 1); 717 char *end = (char *)rp + size; 718 719 root.ptype = ptr_ref_type; 720 while ((char *)rp < end) { 721 comp = rp; 722 if (r_is_packed(rp)) { /* No packed refs need tracing. */ 723 rp++; 724 } else { 725 ref *const pref = (ref *)rp; 726 727 if (r_space(pref) >= min_trace) { 728 r_clear_attrs(pref, l_mark); 729 more |= gc_trace(&root, pstate, pmstack); 730 } 731 rp += packed_per_ref; 732 } 733 } 734 } else if (!o_is_unmarked(pre)) { 735 if (!o_is_untraced(pre)) 736 o_set_unmarked(pre); 737 if (pre->o_type != &st_free) { 738 struct_proc_clear_marks((*proc)) = 739 pre->o_type->clear_marks; 740 741 root.ptype = ptr_struct_type; 742 comp = pre + 1; 743 if (proc != 0) 744 (*proc) (comp, size, pre->o_type); 745 more |= gc_trace(&root, pstate, pmstack); 746 } 747 } 748 } 749 END_OBJECTS_SCAN 750 return more; 751 } 752 753 /* Recursively mark from a (root) pointer. */ 754 /* Return -1 if we overflowed the mark stack, */ 755 /* 0 if we completed successfully without marking any new objects, */ 756 /* 1 if we completed and marked some new objects. */ 757 private int gc_extend_stack(P2(gc_mark_stack *, gc_state_t *)); 758 private int 759 gc_trace(gs_gc_root_t * rp, gc_state_t * pstate, gc_mark_stack * pmstack) 760 { 761 int min_trace = pstate->min_collect; 762 gc_mark_stack *pms = pmstack; 763 ms_entry *sp = pms->entries + 1; 764 765 /* We stop the mark stack 1 entry early, because we store into */ 766 /* the entry beyond the top. */ 767 ms_entry *stop = sp + pms->count - 2; 768 int new = 0; 769 enum_ptr_t nep; 770 void *nptr; 771 name_table *nt = pstate->ntable; 772 773 #define mark_name(nidx)\ 774 BEGIN\ 775 if (names_mark_index(nt, nidx)) {\ 776 new |= 1;\ 777 if_debug2('8', " [8]marked name 0x%lx(%u)\n",\ 778 (ulong)names_index_ptr(nt, nidx), nidx);\ 779 }\ 780 END 781 782 nptr = *rp->p; 783 if (nptr == 0) 784 return 0; 785 786 /* Initialize the stack */ 787 sp->ptr = nptr; 788 if (rp->ptype == ptr_ref_type) 789 sp->index = 1, sp->is_refs = true; 790 else { 791 sp->index = 0, sp->is_refs = false; 792 nep.ptr = nptr; 793 if ((*rp->ptype->mark) (&nep, pstate)) 794 new |= 1; 795 } 796 for (;;) { 797 gs_ptr_type_t ptp; 798 799 /* 800 * The following should really be an if..else, but that 801 * would force unnecessary is_refs tests. 802 */ 803 if (sp->is_refs) 804 goto do_refs; 805 806 /* ---------------- Structure ---------------- */ 807 808 do_struct: 809 { 810 obj_header_t *ptr = sp->ptr; 811 812 struct_proc_enum_ptrs((*mproc)); 813 814 if (ptr == 0) { /* We've reached the bottom of a stack segment. */ 815 pms = pms->prev; 816 if (pms == 0) 817 break; /* all done */ 818 stop = pms->entries + pms->count - 1; 819 sp = stop; 820 continue; 821 } 822 debug_check_object(ptr - 1, NULL, NULL); 823 ts:if_debug4('7', " [7]%smarking %s 0x%lx[%u]", 824 depth_dots(sp, pms), 825 struct_type_name_string(ptr[-1].o_type), 826 (ulong) ptr, sp->index); 827 mproc = ptr[-1].o_type->enum_ptrs; 828 if (mproc == gs_no_struct_enum_ptrs || 829 (ptp = (*mproc) 830 (ptr, pre_obj_contents_size(ptr - 1), 831 sp->index, &nep, ptr[-1].o_type, pstate)) == 0 832 ) { 833 if_debug0('7', " - done\n"); 834 sp--; 835 continue; 836 } 837 /* The cast in the following statement is the one */ 838 /* place we need to break 'const' to make the */ 839 /* template for pointer enumeration work. */ 840 nptr = (void *)nep.ptr; 841 sp->index++; 842 if_debug1('7', " = 0x%lx\n", (ulong) nptr); 843 /* Descend into nep.ptr, whose pointer type is ptp. */ 844 if (ptp == ptr_struct_type) { 845 sp[1].index = 0; 846 sp[1].is_refs = false; 847 if (sp == stop) 848 goto push; 849 if (!ptr_struct_mark(&nep, pstate)) 850 goto ts; 851 new |= 1; 852 (++sp)->ptr = nptr; 853 goto do_struct; 854 } else if (ptp == ptr_ref_type) { 855 sp[1].index = 1; 856 sp[1].is_refs = true; 857 if (sp == stop) 858 goto push; 859 new |= 1; 860 (++sp)->ptr = nptr; 861 goto do_refs; 862 } else { /* We assume this is some non-pointer- */ 863 /* containing type. */ 864 if ((*ptp->mark) (&nep, pstate)) 865 new |= 1; 866 goto ts; 867 } 868 } 869 870 /* ---------------- Refs ---------------- */ 871 872 do_refs: 873 { 874 ref_packed *pptr = sp->ptr; 875 ref *rptr; 876 877 tr:if (!sp->index) { 878 --sp; 879 continue; 880 } 881 --(sp->index); 882 if_debug3('8', " [8]%smarking refs 0x%lx[%u]\n", 883 depth_dots(sp, pms), (ulong) pptr, sp->index); 884 if (r_is_packed(pptr)) { 885 if (!r_has_pmark(pptr)) { 886 r_set_pmark(pptr); 887 new |= 1; 888 if (r_packed_is_name(pptr)) { 889 name_index_t nidx = packed_name_index(pptr); 890 891 mark_name(nidx); 892 } 893 } 894 ++pptr; 895 goto tr; 896 } 897 rptr = (ref *) pptr; /* * const beyond here */ 898 if (r_has_attr(rptr, l_mark)) { 899 pptr = (ref_packed *)(rptr + 1); 900 goto tr; 901 } 902 r_set_attrs(rptr, l_mark); 903 new |= 1; 904 if (r_space(rptr) < min_trace) { /* Note that this always picks up all scalars. */ 905 pptr = (ref_packed *) (rptr + 1); 906 goto tr; 907 } 908 sp->ptr = rptr + 1; 909 switch (r_type(rptr)) { 910 /* Struct cases */ 911 case t_file: 912 nptr = rptr->value.pfile; 913 rs:sp[1].is_refs = false; 914 sp[1].index = 0; 915 if (sp == stop) { 916 ptp = ptr_struct_type; 917 break; 918 } 919 nep.ptr = nptr; 920 if (!ptr_struct_mark(&nep, pstate)) 921 goto nr; 922 new |= 1; 923 (++sp)->ptr = nptr; 924 goto do_struct; 925 case t_device: 926 nptr = rptr->value.pdevice; 927 goto rs; 928 case t_fontID: 929 case t_struct: 930 case t_astruct: 931 nptr = rptr->value.pstruct; 932 goto rs; 933 /* Non-trivial non-struct cases */ 934 case t_dictionary: 935 nptr = rptr->value.pdict; 936 sp[1].index = sizeof(dict) / sizeof(ref); 937 goto rrp; 938 case t_array: 939 nptr = rptr->value.refs; 940 rr:if ((sp[1].index = r_size(rptr)) == 0) { /* Set the base pointer to 0, */ 941 /* so we never try to relocate it. */ 942 rptr->value.refs = 0; 943 goto nr; 944 } 945 rrp: 946 rrc:sp[1].is_refs = true; 947 if (sp == stop) { 948 /* 949 * The following initialization is unnecessary: 950 * ptp will not be used if sp[1].is_refs = true. 951 * We put this here solely to get rid of bogus 952 * "possibly uninitialized variable" warnings 953 * from certain compilers. 954 */ 955 ptp = ptr_ref_type; 956 break; 957 } 958 new |= 1; 959 (++sp)->ptr = nptr; 960 goto do_refs; 961 case t_mixedarray: 962 case t_shortarray: 963 nptr = rptr->value.writable_packed; 964 goto rr; 965 case t_name: 966 mark_name(names_index(nt, rptr)); 967 nr:pptr = (ref_packed *) (rptr + 1); 968 goto tr; 969 case t_string: 970 if (gc_string_mark(rptr->value.bytes, r_size(rptr), true, pstate)) 971 new |= 1; 972 goto nr; 973 case t_oparray: 974 nptr = rptr->value.refs; /* discard const */ 975 sp[1].index = 1; 976 goto rrc; 977 default: 978 goto nr; 979 } 980 } 981 982 /* ---------------- Recursion ---------------- */ 983 984 push: 985 if (sp == stop) { /* The current segment is full. */ 986 int new_added = gc_extend_stack(pms, pstate); 987 988 if (new_added) { 989 new |= new_added; 990 continue; 991 } 992 pms = pms->next; 993 stop = pms->entries + pms->count - 1; 994 pms->entries[1] = sp[1]; 995 sp = pms->entries; 996 } 997 /* index and is_refs are already set */ 998 if (!sp[1].is_refs) { 999 nep.ptr = nptr; 1000 if (!(*ptp->mark) (&nep, pstate)) 1001 continue; 1002 new |= 1; 1003 } 1004 (++sp)->ptr = nptr; 1005 } 1006 return new; 1007 } 1008 /* Link to, attempting to allocate if necessary, */ 1009 /* another chunk of mark stack. */ 1010 private int 1011 gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate) 1012 { 1013 if (pms->next == 0) { /* Try to allocate another segment. */ 1014 uint count; 1015 1016 for (count = ms_size_desired; count >= ms_size_min; count >>= 1) { 1017 pms->next = (gc_mark_stack *) 1018 gs_alloc_bytes_immovable(pstate->heap, 1019 sizeof(gc_mark_stack) + 1020 sizeof(ms_entry) * count, 1021 "gc mark stack"); 1022 if (pms->next != 0) 1023 break; 1024 } 1025 if (pms->next == 0) { /* The mark stack overflowed. */ 1026 ms_entry *sp = pms->entries + pms->count - 1; 1027 byte *cptr = sp->ptr; /* container */ 1028 chunk_t *cp = gc_locate(cptr, pstate); 1029 int new = 1; 1030 1031 if (cp == 0) { /* We were tracing outside collectible */ 1032 /* storage. This can't happen. */ 1033 lprintf1("mark stack overflowed while outside collectible space at 0x%lx!\n", 1034 (ulong) cptr); 1035 gs_abort(); 1036 } 1037 if (cptr < cp->rescan_bot) 1038 cp->rescan_bot = cptr, new = -1; 1039 if (cptr > cp->rescan_top) 1040 cp->rescan_top = cptr, new = -1; 1041 return new; 1042 } 1043 gc_init_mark_stack(pms->next, count); 1044 pms->next->prev = pms; 1045 pms->next->on_heap = true; 1046 } 1047 return 0; 1048 } 1049 1050 /* Mark a struct. Return true if new mark. */ 1051 private bool 1052 ptr_struct_mark(enum_ptr_t *pep, gc_state_t * ignored) 1053 { 1054 obj_header_t *ptr = (obj_header_t *)pep->ptr; 1055 1056 if (ptr == 0) 1057 return false; 1058 ptr--; /* point to header */ 1059 if (!o_is_unmarked(ptr)) 1060 return false; 1061 o_mark(ptr); 1062 return true; 1063 } 1064 1065 /* Mark a string. Return true if new mark. */ 1066 private bool 1067 ptr_string_mark(enum_ptr_t *pep, gc_state_t * gcst) 1068 { 1069 return gc_string_mark(pep->ptr, pep->size, true, gcst); 1070 } 1071 1072 /* Finish tracing by marking names. */ 1073 private bool 1074 gc_trace_finish(gc_state_t * pstate) 1075 { 1076 name_table *nt = pstate->ntable; 1077 name_index_t nidx = 0; 1078 bool marked = false; 1079 1080 while ((nidx = names_next_valid_index(nt, nidx)) != 0) { 1081 name_string_t *pnstr = names_index_string_inline(nt, nidx); 1082 1083 if (pnstr->mark) { 1084 enum_ptr_t enst, ensst; 1085 1086 if (!pnstr->foreign_string && 1087 gc_string_mark(pnstr->string_bytes, pnstr->string_size, 1088 true, pstate) 1089 ) 1090 marked = true; 1091 enst.ptr = names_index_sub_table(nt, nidx); 1092 ensst.ptr = names_index_string_sub_table(nt, nidx); 1093 marked |= 1094 ptr_struct_mark(&enst, pstate) | 1095 ptr_struct_mark(&ensst, pstate); 1096 } 1097 } 1098 return marked; 1099 } 1100 1101 /* ------ Relocation planning phase ------ */ 1102 1103 /* Initialize the relocation information in the chunk header. */ 1104 private void 1105 gc_init_reloc(chunk_t * cp) 1106 { 1107 chunk_head_t *chead = cp->chead; 1108 1109 chead->dest = cp->cbase; 1110 chead->free.o_back = 1111 offset_of(chunk_head_t, free) >> obj_back_shift; 1112 chead->free.o_size = sizeof(obj_header_t); 1113 chead->free.o_nreloc = 0; 1114 } 1115 1116 /* Set marks and clear relocation for chunks that won't be compacted. */ 1117 private void 1118 gc_clear_reloc(chunk_t * cp) 1119 { 1120 byte *pfree = (byte *) & cp->chead->free; 1121 1122 gc_init_reloc(cp); 1123 SCAN_CHUNK_OBJECTS(cp) 1124 DO_ALL 1125 const struct_shared_procs_t *procs = 1126 pre->o_type->shared; 1127 1128 if (procs != 0) 1129 (*procs->clear_reloc) (pre, size); 1130 o_set_untraced(pre); 1131 pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; 1132 END_OBJECTS_SCAN 1133 gc_strings_set_marks(cp, true); 1134 gc_strings_clear_reloc(cp); 1135 } 1136 1137 /* Set the relocation for the objects in a chunk. */ 1138 /* This will never be called for a chunk with any o_untraced objects. */ 1139 private void 1140 gc_objects_set_reloc(chunk_t * cp) 1141 { 1142 uint reloc = 0; 1143 chunk_head_t *chead = cp->chead; 1144 byte *pfree = (byte *) & chead->free; /* most recent free object */ 1145 1146 if_debug_chunk('6', "[6]setting reloc for chunk", cp); 1147 gc_init_reloc(cp); 1148 SCAN_CHUNK_OBJECTS(cp) 1149 DO_ALL 1150 struct_proc_finalize((*finalize)); 1151 const struct_shared_procs_t *procs = 1152 pre->o_type->shared; 1153 1154 if ((procs == 0 ? o_is_unmarked(pre) : 1155 !(*procs->set_reloc) (pre, reloc, size)) 1156 ) { /* Free object */ 1157 reloc += sizeof(obj_header_t) + obj_align_round(size); 1158 if ((finalize = pre->o_type->finalize) != 0) { 1159 if_debug2('u', "[u]GC finalizing %s 0x%lx\n", 1160 struct_type_name_string(pre->o_type), 1161 (ulong) (pre + 1)); 1162 (*finalize) (pre + 1); 1163 } 1164 pfree = (byte *) pre; 1165 pre->o_back = (pfree - (byte *) chead) >> obj_back_shift; 1166 pre->o_nreloc = reloc; 1167 if_debug3('7', " [7]at 0x%lx, unmarked %lu, new reloc = %u\n", 1168 (ulong) pre, (ulong) size, reloc); 1169 } else { /* Useful object */ 1170 debug_check_object(pre, cp, NULL); 1171 pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; 1172 } 1173 END_OBJECTS_SCAN 1174 #ifdef DEBUG 1175 if (reloc != 0) { 1176 if_debug1('6', "[6]freed %u", reloc); 1177 if_debug_chunk('6', " in", cp); 1178 } 1179 #endif 1180 } 1181 1182 /* ------ Relocation phase ------ */ 1183 1184 /* Relocate the pointers in all the objects in a chunk. */ 1185 private void 1186 gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate) 1187 { 1188 chunk_head_t *chead = cp->chead; 1189 1190 if_debug_chunk('6', "[6]relocating in chunk", cp); 1191 SCAN_CHUNK_OBJECTS(cp) 1192 DO_ALL 1193 /* We need to relocate the pointers in an object iff */ 1194 /* it is o_untraced, or it is a useful object. */ 1195 /* An object is free iff its back pointer points to */ 1196 /* the chunk_head structure. */ 1197 if (o_is_untraced(pre) || 1198 pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead 1199 ) { 1200 struct_proc_reloc_ptrs((*proc)) = 1201 pre->o_type->reloc_ptrs; 1202 1203 if_debug3('7', 1204 " [7]relocating ptrs in %s(%lu) 0x%lx\n", 1205 struct_type_name_string(pre->o_type), 1206 (ulong) size, (ulong) pre); 1207 if (proc != 0) 1208 (*proc) (pre + 1, size, pre->o_type, pstate); 1209 } 1210 END_OBJECTS_SCAN 1211 } 1212 1213 /* Print pointer relocation if debugging. */ 1214 /* We have to provide this procedure even if DEBUG is not defined, */ 1215 /* in case one of the other GC modules was compiled with DEBUG. */ 1216 const void * 1217 print_reloc_proc(const void *obj, const char *cname, const void *robj) 1218 { 1219 if_debug3('9', " [9]relocate %s * 0x%lx to 0x%lx\n", 1220 cname, (ulong)obj, (ulong)robj); 1221 return robj; 1222 } 1223 1224 /* Relocate a pointer to an (aligned) object. */ 1225 /* See gsmemory.h for why the argument is const and the result is not. */ 1226 private void /*obj_header_t */ * 1227 igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst) 1228 { 1229 const obj_header_t *const optr = (const obj_header_t *)obj; 1230 const void *robj; 1231 1232 if (obj == 0) { 1233 discard(print_reloc(obj, "NULL", 0)); 1234 return 0; 1235 } 1236 debug_check_object(optr - 1, NULL, gcst); 1237 { 1238 uint back = optr[-1].o_back; 1239 1240 if (back == o_untraced) 1241 robj = obj; 1242 else { 1243 #ifdef DEBUG 1244 /* Do some sanity checking. */ 1245 if (back > gcst->space_local->chunk_size >> obj_back_shift) { 1246 lprintf2("Invalid back pointer %u at 0x%lx!\n", 1247 back, (ulong) obj); 1248 gs_abort(); 1249 } 1250 #endif 1251 { 1252 const obj_header_t *pfree = (const obj_header_t *) 1253 ((const char *)(optr - 1) - 1254 (back << obj_back_shift)); 1255 const chunk_head_t *chead = (const chunk_head_t *) 1256 ((const char *)pfree - 1257 (pfree->o_back << obj_back_shift)); 1258 1259 robj = chead->dest + 1260 ((const char *)obj - (const char *)(chead + 1) - 1261 pfree->o_nreloc); 1262 } 1263 } 1264 } 1265 /* Use a severely deprecated pun to remove the const property. */ 1266 { 1267 union { const void *r; void *w; } u; 1268 1269 u.r = print_reloc(obj, struct_type_name_string(optr[-1].o_type), robj); 1270 return u.w; 1271 } 1272 } 1273 1274 /* ------ Compaction phase ------ */ 1275 1276 /* Compact the objects in a chunk. */ 1277 /* This will never be called for a chunk with any o_untraced objects. */ 1278 private void 1279 gc_objects_compact(chunk_t * cp, gc_state_t * gcst) 1280 { 1281 chunk_head_t *chead = cp->chead; 1282 obj_header_t *dpre = (obj_header_t *) chead->dest; 1283 1284 SCAN_CHUNK_OBJECTS(cp) 1285 DO_ALL 1286 /* An object is free iff its back pointer points to */ 1287 /* the chunk_head structure. */ 1288 if (pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead) { 1289 const struct_shared_procs_t *procs = pre->o_type->shared; 1290 1291 debug_check_object(pre, cp, gcst); 1292 if_debug4('7', 1293 " [7]compacting %s 0x%lx(%lu) to 0x%lx\n", 1294 struct_type_name_string(pre->o_type), 1295 (ulong) pre, (ulong) size, (ulong) dpre); 1296 if (procs == 0) { 1297 if (dpre != pre) 1298 memmove(dpre, pre, 1299 sizeof(obj_header_t) + size); 1300 } else 1301 (*procs->compact) (pre, dpre, size); 1302 dpre = (obj_header_t *) 1303 ((byte *) dpre + obj_size_round(size)); 1304 } 1305 END_OBJECTS_SCAN 1306 if (cp->outer == 0 && chead->dest != cp->cbase) 1307 dpre = (obj_header_t *) cp->cbase; /* compacted this chunk into another */ 1308 gs_alloc_fill(dpre, gs_alloc_fill_collected, cp->cbot - (byte *) dpre); 1309 cp->cbot = (byte *) dpre; 1310 cp->rcur = 0; 1311 cp->rtop = 0; /* just to be sure */ 1312 } 1313 1314 /* ------ Cleanup ------ */ 1315 1316 /* Free empty chunks. */ 1317 private void 1318 gc_free_empty_chunks(gs_ref_memory_t * mem) 1319 { 1320 chunk_t *cp; 1321 chunk_t *csucc; 1322 1323 /* Free the chunks in reverse order, */ 1324 /* to encourage LIFO behavior. */ 1325 for (cp = mem->clast; cp != 0; cp = csucc) { /* Make sure this isn't an inner chunk, */ 1326 /* or a chunk that has inner chunks. */ 1327 csucc = cp->cprev; /* save before freeing */ 1328 if (cp->cbot == cp->cbase && cp->ctop == cp->climit && 1329 cp->outer == 0 && cp->inner_count == 0 1330 ) { 1331 alloc_free_chunk(cp, mem); 1332 if (mem->pcc == cp) 1333 mem->pcc = 0; 1334 } 1335 } 1336 } 1337