1 /* Copyright (C) 1993, 1996, 1997, 1998, 1999 Aladdin Enterprises. All rights reserved. 2 3 This software is provided AS-IS with no warranty, either express or 4 implied. 5 6 This software is distributed under license and may not be copied, 7 modified or distributed except as expressly authorized under the terms 8 of the license contained in the file LICENSE in this distribution. 9 10 For more information about licensing, please refer to 11 http://www.ghostscript.com/licensing/. For information on 12 commercial licensing, go to http://www.artifex.com/licensing/ or 13 contact Artifex Software, Inc., 101 Lucas Valley Road #110, 14 San Rafael, CA 94903, U.S.A., +1(415)492-9861. 15 */ 16 17 /* $Id: igc.c,v 1.15 2005/09/05 13:58:55 leonardo Exp $ */ 18 /* Garbage collector for Ghostscript */ 19 #include "memory_.h" 20 #include "ghost.h" 21 #include "ierrors.h" 22 #include "gsexit.h" 23 #include "gsmdebug.h" 24 #include "gsstruct.h" 25 #include "gsutil.h" 26 #include "iastate.h" 27 #include "isave.h" 28 #include "isstate.h" 29 #include "idict.h" 30 #include "ipacked.h" 31 #include "istruct.h" 32 #include "igc.h" 33 #include "igcstr.h" 34 #include "inamedef.h" 35 #include "opdef.h" /* for marking oparray names */ 36 37 /* Define whether to force all garbage collections to be global. */ 38 private const bool I_FORCE_GLOBAL_GC = false; 39 40 /* Define whether to bypass the collector entirely. */ 41 private const bool I_BYPASS_GC = false; 42 43 /* Define an entry on the mark stack. */ 44 typedef struct { 45 void *ptr; 46 uint index; 47 bool is_refs; 48 } ms_entry; 49 50 /* Define (a segment of) the mark stack. */ 51 /* entries[0] has ptr = 0 to indicate the bottom of the stack. */ 52 /* count additional entries follow this structure. */ 53 typedef struct gc_mark_stack_s gc_mark_stack; 54 struct gc_mark_stack_s { 55 gc_mark_stack *prev; 56 gc_mark_stack *next; 57 uint count; 58 bool on_heap; /* if true, allocated during GC */ 59 ms_entry entries[1]; 60 }; 61 62 /* Define the mark stack sizing parameters. */ 63 #define ms_size_default 100 /* default, allocated on C stack */ 64 /* This should probably be defined as a parameter somewhere.... */ 65 #define ms_size_desired /* for additional allocation */\ 66 ((max_ushort - sizeof(gc_mark_stack)) / sizeof(ms_entry) - 10) 67 #define ms_size_min 50 /* min size for segment in free block */ 68 69 /* Forward references */ 70 71 private void gc_init_mark_stack(gc_mark_stack *, uint); 72 private void gc_objects_clear_marks(const gs_memory_t *mem, chunk_t *); 73 private void gc_unmark_names(name_table *); 74 private int gc_trace(gs_gc_root_t *, gc_state_t *, gc_mark_stack *); 75 private int gc_rescan_chunk(chunk_t *, gc_state_t *, gc_mark_stack *); 76 private int gc_trace_chunk(const gs_memory_t *mem, chunk_t *, gc_state_t *, gc_mark_stack *); 77 private bool gc_trace_finish(gc_state_t *); 78 private void gc_clear_reloc(chunk_t *); 79 private void gc_objects_set_reloc(chunk_t *); 80 private void gc_do_reloc(chunk_t *, gs_ref_memory_t *, gc_state_t *); 81 private void gc_objects_compact(chunk_t *, gc_state_t *); 82 private void gc_free_empty_chunks(gs_ref_memory_t *); 83 84 /* Forward references for pointer types */ 85 private ptr_proc_unmark(ptr_struct_unmark); 86 private ptr_proc_mark(ptr_struct_mark); 87 private ptr_proc_unmark(ptr_string_unmark); 88 private ptr_proc_mark(ptr_string_mark); 89 /*ptr_proc_unmark(ptr_ref_unmark); *//* in igc.h */ 90 /*ptr_proc_mark(ptr_ref_mark); *//* in igc.h */ 91 private ptr_proc_reloc(igc_reloc_struct_ptr, void); 92 93 ptr_proc_reloc(igc_reloc_ref_ptr, ref_packed); /* in igcref.c */ 94 refs_proc_reloc(igc_reloc_refs); /* in igcref.c */ 95 96 /* Define this GC's procedure vector. */ 97 private const gc_procs_with_refs_t igc_procs = { 98 igc_reloc_struct_ptr, igc_reloc_string, igc_reloc_const_string, 99 igc_reloc_param_string, igc_reloc_ref_ptr, igc_reloc_refs 100 }; 101 102 /* Pointer type descriptors. */ 103 /* Note that the trace/mark routine has special knowledge of ptr_ref_type */ 104 /* and ptr_struct_type -- it assumes that no other types have embedded */ 105 /* pointers. Note also that the reloc procedures for string and ref */ 106 /* pointers are never called. */ 107 typedef ptr_proc_reloc((*ptr_proc_reloc_t), void); 108 const gs_ptr_procs_t ptr_struct_procs = 109 {ptr_struct_unmark, ptr_struct_mark, (ptr_proc_reloc_t) igc_reloc_struct_ptr}; 110 const gs_ptr_procs_t ptr_string_procs = 111 {ptr_string_unmark, ptr_string_mark, NULL}; 112 const gs_ptr_procs_t ptr_const_string_procs = 113 {ptr_string_unmark, ptr_string_mark, NULL}; 114 const gs_ptr_procs_t ptr_ref_procs = 115 {ptr_ref_unmark, ptr_ref_mark, NULL}; 116 117 /* ------ Main program ------ */ 118 119 /* Top level of garbage collector. */ 120 #ifdef DEBUG 121 private void 122 end_phase(const char *str) 123 { 124 if (gs_debug_c('6')) { 125 dlprintf1("[6]---------------- end %s ----------------\n", 126 (const char *)str); 127 dflush(); 128 } 129 } 130 static const char *const depth_dots_string = ".........."; 131 private const char * 132 depth_dots(const ms_entry * sp, const gc_mark_stack * pms) 133 { 134 int depth = sp - pms->entries - 1; 135 const gc_mark_stack *pss = pms; 136 137 while ((pss = pss->prev) != 0) 138 depth += pss->count - 1; 139 return depth_dots_string + (depth >= 10 ? 0 : 10 - depth); 140 } 141 private void 142 gc_validate_spaces(gs_ref_memory_t **spaces, int max_space, gc_state_t *gcst) 143 { 144 int i; 145 gs_ref_memory_t *mem; 146 147 for (i = 1; i <= max_space; ++i) 148 if ((mem = spaces[i]) != 0) 149 ialloc_validate_memory(mem, gcst); 150 } 151 #else /* !DEBUG */ 152 # define end_phase(str) DO_NOTHING 153 #endif /* DEBUG */ 154 void 155 gs_gc_reclaim(vm_spaces * pspaces, bool global) 156 { 157 #define nspaces ((i_vm_max + 1) * 2) /* * 2 for stable allocators */ 158 159 vm_spaces spaces; 160 gs_ref_memory_t *space_memories[nspaces]; 161 gs_gc_root_t space_roots[nspaces]; 162 int max_trace; /* max space_ to trace */ 163 int min_collect; /* min space_ to collect */ 164 int min_collect_vm_space; /* min VM space to collect */ 165 int ispace; 166 gs_ref_memory_t *mem; 167 chunk_t *cp; 168 gs_gc_root_t *rp; 169 gc_state_t state; 170 struct _msd { 171 gc_mark_stack stack; 172 ms_entry body[ms_size_default]; 173 } ms_default; 174 gc_mark_stack *mark_stack = &ms_default.stack; 175 const gs_memory_t *cmem; 176 177 /* Optionally force global GC for debugging. */ 178 179 if (I_FORCE_GLOBAL_GC) 180 global = true; 181 182 /* Determine which spaces we are tracing and collecting. */ 183 184 spaces = *pspaces; 185 cmem = space_system->stable_memory; 186 space_memories[1] = space_system; 187 space_memories[2] = space_global; 188 min_collect = max_trace = 2; 189 min_collect_vm_space = i_vm_global; 190 if (space_global->stable_memory != (gs_memory_t *)space_global) 191 space_memories[++max_trace] = 192 (gs_ref_memory_t *)space_global->stable_memory; 193 if (space_global != space_local) { 194 space_memories[++max_trace] = space_local; 195 min_collect = max_trace; 196 min_collect_vm_space = i_vm_local; 197 if (space_local->stable_memory != (gs_memory_t *)space_local) 198 space_memories[++max_trace] = 199 (gs_ref_memory_t *)space_local->stable_memory; 200 } 201 if (global) 202 min_collect = min_collect_vm_space = 1; 203 204 #define for_spaces(i, n)\ 205 for (i = 1; i <= n; ++i) 206 #define for_collected_spaces(i)\ 207 for (i = min_collect; i <= max_trace; ++i) 208 #define for_space_mems(i, mem)\ 209 for (mem = space_memories[i]; mem != 0; mem = &mem->saved->state) 210 #define for_mem_chunks(mem, cp)\ 211 for (cp = (mem)->cfirst; cp != 0; cp = cp->cnext) 212 #define for_space_chunks(i, mem, cp)\ 213 for_space_mems(i, mem) for_mem_chunks(mem, cp) 214 #define for_chunks(n, mem, cp)\ 215 for_spaces(ispace, n) for_space_chunks(ispace, mem, cp) 216 #define for_collected_chunks(mem, cp)\ 217 for_collected_spaces(ispace) for_space_chunks(ispace, mem, cp) 218 #define for_roots(n, mem, rp)\ 219 for_spaces(ispace, n)\ 220 for (mem = space_memories[ispace], rp = mem->roots; rp != 0; rp = rp->next) 221 222 /* Initialize the state. */ 223 224 state.procs = &igc_procs; 225 state.loc.memory = space_global; /* any one will do */ 226 227 state.loc.cp = 0; 228 state.spaces = spaces; 229 state.min_collect = min_collect_vm_space << r_space_shift; 230 state.relocating_untraced = false; 231 state.heap = state.loc.memory->non_gc_memory; 232 state.ntable = state.heap->gs_lib_ctx->gs_name_table; 233 234 /* Register the allocators themselves as roots, */ 235 /* so we mark and relocate the change and save lists properly. */ 236 237 for_spaces(ispace, max_trace) 238 gs_register_struct_root((gs_memory_t *)space_memories[ispace], 239 &space_roots[ispace], 240 (void **)&space_memories[ispace], 241 "gc_top_level"); 242 243 end_phase("register space roots"); 244 245 #ifdef DEBUG 246 247 /* Pre-validate the state. This shouldn't be necessary.... */ 248 249 gc_validate_spaces(space_memories, max_trace, &state); 250 251 end_phase("pre-validate pointers"); 252 253 #endif 254 255 if (I_BYPASS_GC) { /* Don't collect at all. */ 256 goto no_collect; 257 } 258 259 /* Clear marks in spaces to be collected. */ 260 261 for_collected_spaces(ispace) 262 for_space_chunks(ispace, mem, cp) { 263 gc_objects_clear_marks((const gs_memory_t *)mem, cp); 264 gc_strings_set_marks(cp, false); 265 } 266 267 end_phase("clear chunk marks"); 268 269 /* Clear the marks of roots. We must do this explicitly, */ 270 /* since some roots are not in any chunk. */ 271 272 for_roots(max_trace, mem, rp) { 273 enum_ptr_t eptr; 274 275 eptr.ptr = *rp->p; 276 if_debug_root('6', "[6]unmarking root", rp); 277 (*rp->ptype->unmark)(&eptr, &state); 278 } 279 280 end_phase("clear root marks"); 281 282 if (global) 283 gc_unmark_names(state.ntable); 284 285 /* Initialize the (default) mark stack. */ 286 287 gc_init_mark_stack(&ms_default.stack, ms_size_default); 288 ms_default.stack.prev = 0; 289 ms_default.stack.on_heap = false; 290 291 /* Add all large-enough free blocks to the mark stack. */ 292 /* Also initialize the rescan pointers. */ 293 294 { 295 gc_mark_stack *end = mark_stack; 296 297 for_chunks(max_trace, mem, cp) { 298 uint avail = cp->ctop - cp->cbot; 299 300 if (avail >= sizeof(gc_mark_stack) + sizeof(ms_entry) * 301 ms_size_min && 302 !cp->inner_count 303 ) { 304 gc_mark_stack *pms = (gc_mark_stack *) cp->cbot; 305 306 gc_init_mark_stack(pms, (avail - sizeof(gc_mark_stack)) / 307 sizeof(ms_entry)); 308 end->next = pms; 309 pms->prev = end; 310 pms->on_heap = false; 311 if_debug2('6', "[6]adding free 0x%lx(%u) to mark stack\n", 312 (ulong) pms, pms->count); 313 } 314 cp->rescan_bot = cp->cend; 315 cp->rescan_top = cp->cbase; 316 } 317 } 318 319 /* Mark reachable objects. */ 320 321 { 322 int more = 0; 323 324 /* Mark from roots. */ 325 326 for_roots(max_trace, mem, rp) { 327 if_debug_root('6', "[6]marking root", rp); 328 more |= gc_trace(rp, &state, mark_stack); 329 } 330 331 end_phase("mark"); 332 333 /* If this is a local GC, mark from non-local chunks. */ 334 335 if (!global) 336 for_chunks(min_collect - 1, mem, cp) 337 more |= gc_trace_chunk((const gs_memory_t *)mem, cp, &state, mark_stack); 338 339 /* Handle mark stack overflow. */ 340 341 while (more < 0) { /* stack overflowed */ 342 more = 0; 343 for_chunks(max_trace, mem, cp) 344 more |= gc_rescan_chunk(cp, &state, mark_stack); 345 } 346 347 end_phase("mark overflow"); 348 } 349 350 /* Free the mark stack. */ 351 352 { 353 gc_mark_stack *pms = mark_stack; 354 355 while (pms->next) 356 pms = pms->next; 357 while (pms) { 358 gc_mark_stack *prev = pms->prev; 359 360 if (pms->on_heap) 361 gs_free_object(state.heap, pms, "gc mark stack"); 362 else 363 gs_alloc_fill(pms, gs_alloc_fill_free, 364 sizeof(*pms) + sizeof(ms_entry) * pms->count); 365 pms = prev; 366 } 367 } 368 369 end_phase("free mark stack"); 370 371 if (global) { 372 gc_trace_finish(&state); 373 names_trace_finish(state.ntable, &state); 374 375 end_phase("finish trace"); 376 } 377 /* Clear marks and relocation in spaces that are only being traced. */ 378 /* We have to clear the marks first, because we want the */ 379 /* relocation to wind up as o_untraced, not o_unmarked. */ 380 381 for_chunks(min_collect - 1, mem, cp) 382 gc_objects_clear_marks((const gs_memory_t *)mem, cp); 383 384 end_phase("post-clear marks"); 385 386 for_chunks(min_collect - 1, mem, cp) 387 gc_clear_reloc(cp); 388 389 end_phase("clear reloc"); 390 391 /* Set the relocation of roots outside any chunk to o_untraced, */ 392 /* so we won't try to relocate pointers to them. */ 393 /* (Currently, there aren't any.) */ 394 395 /* Disable freeing in the allocators of the spaces we are */ 396 /* collecting, so finalization procedures won't cause problems. */ 397 { 398 int i; 399 400 for_collected_spaces(i) 401 gs_enable_free((gs_memory_t *)space_memories[i], false); 402 } 403 404 /* Compute relocation based on marks, in the spaces */ 405 /* we are going to compact. Also finalize freed objects. */ 406 407 for_collected_chunks(mem, cp) { 408 gc_objects_set_reloc(cp); 409 gc_strings_set_reloc(cp); 410 } 411 412 /* Re-enable freeing. */ 413 { 414 int i; 415 416 for_collected_spaces(i) 417 gs_enable_free((gs_memory_t *)space_memories[i], true); 418 } 419 420 end_phase("set reloc"); 421 422 /* Relocate pointers. */ 423 424 state.relocating_untraced = true; 425 for_chunks(min_collect - 1, mem, cp) 426 gc_do_reloc(cp, mem, &state); 427 state.relocating_untraced = false; 428 for_collected_chunks(mem, cp) 429 gc_do_reloc(cp, mem, &state); 430 431 end_phase("relocate chunks"); 432 433 for_roots(max_trace, mem, rp) { 434 if_debug3('6', "[6]relocating root 0x%lx: 0x%lx -> 0x%lx\n", 435 (ulong) rp, (ulong) rp->p, (ulong) * rp->p); 436 if (rp->ptype == ptr_ref_type) { 437 ref *pref = (ref *) * rp->p; 438 439 igc_reloc_refs((ref_packed *) pref, 440 (ref_packed *) (pref + 1), 441 &state); 442 } else 443 *rp->p = (*rp->ptype->reloc) (*rp->p, &state); 444 if_debug3('6', "[6]relocated root 0x%lx: 0x%lx -> 0x%lx\n", 445 (ulong) rp, (ulong) rp->p, (ulong) * rp->p); 446 } 447 448 end_phase("relocate roots"); 449 450 /* Compact data. We only do this for spaces we are collecting. */ 451 452 for_collected_spaces(ispace) { 453 for_space_mems(ispace, mem) { 454 for_mem_chunks(mem, cp) { 455 if_debug_chunk('6', "[6]compacting chunk", cp); 456 gc_objects_compact(cp, &state); 457 gc_strings_compact(cp); 458 if_debug_chunk('6', "[6]after compaction:", cp); 459 if (mem->pcc == cp) 460 mem->cc = *cp; 461 } 462 mem->saved = mem->reloc_saved; 463 ialloc_reset_free(mem); 464 } 465 } 466 467 end_phase("compact"); 468 469 /* Free empty chunks. */ 470 471 for_collected_spaces(ispace) { 472 for_space_mems(ispace, mem) { 473 gc_free_empty_chunks(mem); 474 } 475 } 476 477 end_phase("free empty chunks"); 478 479 /* 480 * Update previous_status to reflect any freed chunks, 481 * and set inherited to the negative of allocated, 482 * so it has no effect. We must update previous_status by 483 * working back-to-front along the save chain, using pointer reversal. 484 * (We could update inherited in any order, since it only uses 485 * information local to the individual save level.) 486 */ 487 488 for_collected_spaces(ispace) { /* Reverse the pointers. */ 489 alloc_save_t *curr; 490 alloc_save_t *prev = 0; 491 alloc_save_t *next; 492 gs_memory_status_t total; 493 494 for (curr = space_memories[ispace]->saved; curr != 0; 495 prev = curr, curr = next 496 ) { 497 next = curr->state.saved; 498 curr->state.saved = prev; 499 } 500 /* Now work the other way, accumulating the values. */ 501 total.allocated = 0, total.used = 0; 502 for (curr = prev, prev = 0; curr != 0; 503 prev = curr, curr = next 504 ) { 505 mem = &curr->state; 506 next = mem->saved; 507 mem->saved = prev; 508 mem->previous_status = total; 509 if_debug3('6', 510 "[6]0x%lx previous allocated=%lu, used=%lu\n", 511 (ulong) mem, total.allocated, total.used); 512 gs_memory_status((gs_memory_t *) mem, &total); 513 mem->gc_allocated = mem->allocated + total.allocated; 514 mem->inherited = -(int)mem->allocated; 515 } 516 mem = space_memories[ispace]; 517 mem->previous_status = total; 518 mem->gc_allocated = mem->allocated + total.allocated; 519 if_debug3('6', "[6]0x%lx previous allocated=%lu, used=%lu\n", 520 (ulong) mem, total.allocated, total.used); 521 } 522 523 end_phase("update stats"); 524 525 no_collect: 526 527 /* Unregister the allocator roots. */ 528 529 for_spaces(ispace, max_trace) 530 gs_unregister_root((gs_memory_t *)space_memories[ispace], 531 &space_roots[ispace], "gc_top_level"); 532 533 end_phase("unregister space roots"); 534 535 #ifdef DEBUG 536 537 /* Validate the state. This shouldn't be necessary.... */ 538 539 gc_validate_spaces(space_memories, max_trace, &state); 540 541 end_phase("validate pointers"); 542 543 #endif 544 } 545 546 /* ------ Debugging utilities ------ */ 547 548 /* Validate a pointer to an object header. */ 549 #ifdef DEBUG 550 # define debug_check_object(pre, cp, gcst)\ 551 ialloc_validate_object((pre) + 1, cp, gcst) 552 #else 553 # define debug_check_object(pre, cp, gcst) DO_NOTHING 554 #endif 555 556 /* ------ Unmarking phase ------ */ 557 558 /* Unmark a single struct. */ 559 private void 560 ptr_struct_unmark(enum_ptr_t *pep, gc_state_t * ignored) 561 { 562 void *const vptr = (void *)pep->ptr; /* break const */ 563 564 if (vptr != 0) 565 o_set_unmarked(((obj_header_t *) vptr - 1)); 566 } 567 568 /* Unmark a single string. */ 569 private void 570 ptr_string_unmark(enum_ptr_t *pep, gc_state_t * gcst) 571 { 572 discard(gc_string_mark(pep->ptr, pep->size, false, gcst)); 573 } 574 575 /* Unmark the objects in a chunk. */ 576 private void 577 gc_objects_clear_marks(const gs_memory_t *mem, chunk_t * cp) 578 { 579 if_debug_chunk('6', "[6]unmarking chunk", cp); 580 SCAN_CHUNK_OBJECTS(cp) 581 DO_ALL 582 struct_proc_clear_marks((*proc)) = 583 pre->o_type->clear_marks; 584 #ifdef DEBUG 585 if (pre->o_type != &st_free) 586 debug_check_object(pre, cp, NULL); 587 #endif 588 if_debug3('7', " [7](un)marking %s(%lu) 0x%lx\n", 589 struct_type_name_string(pre->o_type), 590 (ulong) size, (ulong) pre); 591 o_set_unmarked(pre); 592 if (proc != 0) 593 (*proc) (mem, pre + 1, size, pre->o_type); 594 END_OBJECTS_SCAN 595 } 596 597 /* Mark 0- and 1-character names, and those referenced from the */ 598 /* op_array_nx_table, and unmark all the rest. */ 599 private void 600 gc_unmark_names(name_table * nt) 601 { 602 uint i; 603 604 names_unmark_all(nt); 605 for (i = 0; i < op_array_table_global.count; i++) { 606 name_index_t nidx = op_array_table_global.nx_table[i]; 607 608 names_mark_index(nt, nidx); 609 } 610 for (i = 0; i < op_array_table_local.count; i++) { 611 name_index_t nidx = op_array_table_local.nx_table[i]; 612 613 names_mark_index(nt, nidx); 614 } 615 } 616 617 /* ------ Marking phase ------ */ 618 619 /* Initialize (a segment of) the mark stack. */ 620 private void 621 gc_init_mark_stack(gc_mark_stack * pms, uint count) 622 { 623 pms->next = 0; 624 pms->count = count; 625 pms->entries[0].ptr = 0; 626 pms->entries[0].index = 0; 627 pms->entries[0].is_refs = false; 628 } 629 630 /* Mark starting from all marked objects in the interval of a chunk */ 631 /* needing rescanning. */ 632 private int 633 gc_rescan_chunk(chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) 634 { 635 byte *sbot = cp->rescan_bot; 636 byte *stop = cp->rescan_top; 637 gs_gc_root_t root; 638 void *comp; 639 int more = 0; 640 const gs_memory_t *mem = gcst_get_memory_ptr( pstate ); 641 642 if (sbot > stop) 643 return 0; 644 root.p = ∁ 645 if_debug_chunk('6', "[6]rescanning chunk", cp); 646 cp->rescan_bot = cp->cend; 647 cp->rescan_top = cp->cbase; 648 SCAN_CHUNK_OBJECTS(cp) 649 DO_ALL 650 if ((byte *) (pre + 1) + size < sbot); 651 else if ((byte *) (pre + 1) > stop) 652 return more; /* 'break' won't work here */ 653 else { 654 if_debug2('7', " [7]scanning/marking 0x%lx(%lu)\n", 655 (ulong) pre, (ulong) size); 656 if (pre->o_type == &st_refs) { 657 ref_packed *rp = (ref_packed *) (pre + 1); 658 char *end = (char *)rp + size; 659 660 root.ptype = ptr_ref_type; 661 while ((char *)rp < end) { 662 comp = rp; 663 if (r_is_packed(rp)) { 664 if (r_has_pmark(rp)) { 665 r_clear_pmark(rp); 666 more |= gc_trace(&root, pstate, 667 pmstack); 668 } 669 rp++; 670 } else { 671 ref *const pref = (ref *)rp; 672 673 if (r_has_attr(pref, l_mark)) { 674 r_clear_attrs(pref, l_mark); 675 more |= gc_trace(&root, pstate, pmstack); 676 } 677 rp += packed_per_ref; 678 } 679 } 680 } else if (!o_is_unmarked(pre)) { 681 struct_proc_clear_marks((*proc)) = 682 pre->o_type->clear_marks; 683 root.ptype = ptr_struct_type; 684 comp = pre + 1; 685 if (!o_is_untraced(pre)) 686 o_set_unmarked(pre); 687 if (proc != 0) 688 (*proc) (mem, comp, size, pre->o_type); 689 more |= gc_trace(&root, pstate, pmstack); 690 } 691 } 692 END_OBJECTS_SCAN 693 return more; 694 } 695 696 /* Mark starting from all the objects in a chunk. */ 697 /* We assume that pstate->min_collect > avm_system, */ 698 /* so we don't have to trace names. */ 699 private int 700 gc_trace_chunk(const gs_memory_t *mem, chunk_t * cp, gc_state_t * pstate, gc_mark_stack * pmstack) 701 { 702 gs_gc_root_t root; 703 void *comp; 704 int more = 0; 705 int min_trace = pstate->min_collect; 706 707 root.p = ∁ 708 if_debug_chunk('6', "[6]marking from chunk", cp); 709 SCAN_CHUNK_OBJECTS(cp) 710 DO_ALL 711 { 712 if_debug2('7', " [7]scanning/marking 0x%lx(%lu)\n", 713 (ulong) pre, (ulong) size); 714 if (pre->o_type == &st_refs) { 715 ref_packed *rp = (ref_packed *) (pre + 1); 716 char *end = (char *)rp + size; 717 718 root.ptype = ptr_ref_type; 719 while ((char *)rp < end) { 720 comp = rp; 721 if (r_is_packed(rp)) { /* No packed refs need tracing. */ 722 rp++; 723 } else { 724 ref *const pref = (ref *)rp; 725 726 if (r_space(pref) >= min_trace) { 727 r_clear_attrs(pref, l_mark); 728 more |= gc_trace(&root, pstate, pmstack); 729 } 730 rp += packed_per_ref; 731 } 732 } 733 } else if (!o_is_unmarked(pre)) { 734 if (!o_is_untraced(pre)) 735 o_set_unmarked(pre); 736 if (pre->o_type != &st_free) { 737 struct_proc_clear_marks((*proc)) = 738 pre->o_type->clear_marks; 739 740 root.ptype = ptr_struct_type; 741 comp = pre + 1; 742 if (proc != 0) 743 (*proc) (mem, comp, size, pre->o_type); 744 more |= gc_trace(&root, pstate, pmstack); 745 } 746 } 747 } 748 END_OBJECTS_SCAN 749 return more; 750 } 751 752 /* Recursively mark from a (root) pointer. */ 753 /* Return -1 if we overflowed the mark stack, */ 754 /* 0 if we completed successfully without marking any new objects, */ 755 /* 1 if we completed and marked some new objects. */ 756 private int gc_extend_stack(gc_mark_stack *, gc_state_t *); 757 private int 758 gc_trace(gs_gc_root_t * rp, gc_state_t * pstate, gc_mark_stack * pmstack) 759 { 760 int min_trace = pstate->min_collect; 761 gc_mark_stack *pms = pmstack; 762 ms_entry *sp = pms->entries + 1; 763 764 /* We stop the mark stack 1 entry early, because we store into */ 765 /* the entry beyond the top. */ 766 ms_entry *stop = sp + pms->count - 2; 767 int new = 0; 768 enum_ptr_t nep; 769 void *nptr; 770 name_table *nt = pstate->ntable; 771 772 #define mark_name(nidx)\ 773 BEGIN\ 774 if (names_mark_index(nt, nidx)) {\ 775 new |= 1;\ 776 if_debug2('8', " [8]marked name 0x%lx(%u)\n",\ 777 (ulong)names_index_ptr(nt, nidx), nidx);\ 778 }\ 779 END 780 781 nptr = *rp->p; 782 if (nptr == 0) 783 return 0; 784 785 /* Initialize the stack */ 786 sp->ptr = nptr; 787 if (rp->ptype == ptr_ref_type) 788 sp->index = 1, sp->is_refs = true; 789 else { 790 sp->index = 0, sp->is_refs = false; 791 nep.ptr = nptr; 792 if ((*rp->ptype->mark) (&nep, pstate)) 793 new |= 1; 794 } 795 for (;;) { 796 gs_ptr_type_t ptp; 797 798 /* 799 * The following should really be an if..else, but that 800 * would force unnecessary is_refs tests. 801 */ 802 if (sp->is_refs) 803 goto do_refs; 804 805 /* ---------------- Structure ---------------- */ 806 807 do_struct: 808 { 809 obj_header_t *ptr = sp->ptr; 810 811 struct_proc_enum_ptrs((*mproc)); 812 813 if (ptr == 0) { /* We've reached the bottom of a stack segment. */ 814 pms = pms->prev; 815 if (pms == 0) 816 break; /* all done */ 817 stop = pms->entries + pms->count - 1; 818 sp = stop; 819 continue; 820 } 821 debug_check_object(ptr - 1, NULL, NULL); 822 ts:if_debug4('7', " [7]%smarking %s 0x%lx[%u]", 823 depth_dots(sp, pms), 824 struct_type_name_string(ptr[-1].o_type), 825 (ulong) ptr, sp->index); 826 mproc = ptr[-1].o_type->enum_ptrs; 827 if (mproc == gs_no_struct_enum_ptrs || 828 (ptp = (*mproc) 829 (gcst_get_memory_ptr(pstate), ptr, pre_obj_contents_size(ptr - 1), 830 sp->index, &nep, ptr[-1].o_type, pstate)) == 0 831 ) { 832 if_debug0('7', " - done\n"); 833 sp--; 834 continue; 835 } 836 /* The cast in the following statement is the one */ 837 /* place we need to break 'const' to make the */ 838 /* template for pointer enumeration work. */ 839 nptr = (void *)nep.ptr; 840 sp->index++; 841 if_debug1('7', " = 0x%lx\n", (ulong) nptr); 842 /* Descend into nep.ptr, whose pointer type is ptp. */ 843 if (ptp == ptr_struct_type) { 844 sp[1].index = 0; 845 sp[1].is_refs = false; 846 if (sp == stop) 847 goto push; 848 if (!ptr_struct_mark(&nep, pstate)) 849 goto ts; 850 new |= 1; 851 (++sp)->ptr = nptr; 852 goto do_struct; 853 } else if (ptp == ptr_ref_type) { 854 sp[1].index = 1; 855 sp[1].is_refs = true; 856 if (sp == stop) 857 goto push; 858 new |= 1; 859 (++sp)->ptr = nptr; 860 goto do_refs; 861 } else { /* We assume this is some non-pointer- */ 862 /* containing type. */ 863 if ((*ptp->mark) (&nep, pstate)) 864 new |= 1; 865 goto ts; 866 } 867 } 868 869 /* ---------------- Refs ---------------- */ 870 871 do_refs: 872 { 873 ref_packed *pptr = sp->ptr; 874 ref *rptr; 875 876 tr:if (!sp->index) { 877 --sp; 878 continue; 879 } 880 --(sp->index); 881 if_debug3('8', " [8]%smarking refs 0x%lx[%u]\n", 882 depth_dots(sp, pms), (ulong) pptr, sp->index); 883 if (r_is_packed(pptr)) { 884 if (!r_has_pmark(pptr)) { 885 r_set_pmark(pptr); 886 new |= 1; 887 if (r_packed_is_name(pptr)) { 888 name_index_t nidx = packed_name_index(pptr); 889 890 mark_name(nidx); 891 } 892 } 893 ++pptr; 894 goto tr; 895 } 896 rptr = (ref *) pptr; /* * const beyond here */ 897 if (r_has_attr(rptr, l_mark)) { 898 pptr = (ref_packed *)(rptr + 1); 899 goto tr; 900 } 901 r_set_attrs(rptr, l_mark); 902 new |= 1; 903 if (r_space(rptr) < min_trace) { /* Note that this always picks up all scalars. */ 904 pptr = (ref_packed *) (rptr + 1); 905 goto tr; 906 } 907 sp->ptr = rptr + 1; 908 switch (r_type(rptr)) { 909 /* Struct cases */ 910 case t_file: 911 nptr = rptr->value.pfile; 912 rs:sp[1].is_refs = false; 913 sp[1].index = 0; 914 if (sp == stop) { 915 ptp = ptr_struct_type; 916 break; 917 } 918 nep.ptr = nptr; 919 if (!ptr_struct_mark(&nep, pstate)) 920 goto nr; 921 new |= 1; 922 (++sp)->ptr = nptr; 923 goto do_struct; 924 case t_device: 925 nptr = rptr->value.pdevice; 926 goto rs; 927 case t_fontID: 928 case t_struct: 929 case t_astruct: 930 nptr = rptr->value.pstruct; 931 goto rs; 932 /* Non-trivial non-struct cases */ 933 case t_dictionary: 934 nptr = rptr->value.pdict; 935 sp[1].index = sizeof(dict) / sizeof(ref); 936 goto rrp; 937 case t_array: 938 nptr = rptr->value.refs; 939 rr:if ((sp[1].index = r_size(rptr)) == 0) { /* Set the base pointer to 0, */ 940 /* so we never try to relocate it. */ 941 rptr->value.refs = 0; 942 goto nr; 943 } 944 rrp: 945 rrc:sp[1].is_refs = true; 946 if (sp == stop) { 947 /* 948 * The following initialization is unnecessary: 949 * ptp will not be used if sp[1].is_refs = true. 950 * We put this here solely to get rid of bogus 951 * "possibly uninitialized variable" warnings 952 * from certain compilers. 953 */ 954 ptp = ptr_ref_type; 955 break; 956 } 957 new |= 1; 958 (++sp)->ptr = nptr; 959 goto do_refs; 960 case t_mixedarray: 961 case t_shortarray: 962 nptr = rptr->value.writable_packed; 963 goto rr; 964 case t_name: 965 mark_name(names_index(nt, rptr)); 966 nr:pptr = (ref_packed *) (rptr + 1); 967 goto tr; 968 case t_string: 969 if (gc_string_mark(rptr->value.bytes, r_size(rptr), true, pstate)) 970 new |= 1; 971 goto nr; 972 case t_oparray: 973 nptr = rptr->value.refs; /* discard const */ 974 sp[1].index = 1; 975 goto rrc; 976 default: 977 goto nr; 978 } 979 } 980 981 /* ---------------- Recursion ---------------- */ 982 983 push: 984 if (sp == stop) { /* The current segment is full. */ 985 int new_added = gc_extend_stack(pms, pstate); 986 987 if (new_added) { 988 new |= new_added; 989 continue; 990 } 991 pms = pms->next; 992 stop = pms->entries + pms->count - 1; 993 pms->entries[1] = sp[1]; 994 sp = pms->entries; 995 } 996 /* index and is_refs are already set */ 997 if (!sp[1].is_refs) { 998 nep.ptr = nptr; 999 if (!(*ptp->mark) (&nep, pstate)) 1000 continue; 1001 new |= 1; 1002 } 1003 (++sp)->ptr = nptr; 1004 } 1005 return new; 1006 } 1007 /* Link to, attempting to allocate if necessary, */ 1008 /* another chunk of mark stack. */ 1009 private int 1010 gc_extend_stack(gc_mark_stack * pms, gc_state_t * pstate) 1011 { 1012 if (pms->next == 0) { /* Try to allocate another segment. */ 1013 uint count; 1014 1015 for (count = ms_size_desired; count >= ms_size_min; count >>= 1) { 1016 pms->next = (gc_mark_stack *) 1017 gs_alloc_bytes_immovable(pstate->heap, 1018 sizeof(gc_mark_stack) + 1019 sizeof(ms_entry) * count, 1020 "gc mark stack"); 1021 if (pms->next != 0) 1022 break; 1023 } 1024 if (pms->next == 0) { /* The mark stack overflowed. */ 1025 ms_entry *sp = pms->entries + pms->count - 1; 1026 byte *cptr = sp->ptr; /* container */ 1027 chunk_t *cp = gc_locate(cptr, pstate); 1028 int new = 1; 1029 1030 if (cp == 0) { /* We were tracing outside collectible */ 1031 /* storage. This can't happen. */ 1032 lprintf1("mark stack overflowed while outside collectible space at 0x%lx!\n", 1033 (ulong) cptr); 1034 gs_abort(pstate->heap); 1035 } 1036 if (cptr < cp->rescan_bot) 1037 cp->rescan_bot = cptr, new = -1; 1038 if (cptr > cp->rescan_top) 1039 cp->rescan_top = cptr, new = -1; 1040 return new; 1041 } 1042 gc_init_mark_stack(pms->next, count); 1043 pms->next->prev = pms; 1044 pms->next->on_heap = true; 1045 } 1046 return 0; 1047 } 1048 1049 /* Mark a struct. Return true if new mark. */ 1050 private bool 1051 ptr_struct_mark(enum_ptr_t *pep, gc_state_t * ignored) 1052 { 1053 obj_header_t *ptr = (obj_header_t *)pep->ptr; 1054 1055 if (ptr == 0) 1056 return false; 1057 ptr--; /* point to header */ 1058 if (!o_is_unmarked(ptr)) 1059 return false; 1060 o_mark(ptr); 1061 return true; 1062 } 1063 1064 /* Mark a string. Return true if new mark. */ 1065 private bool 1066 ptr_string_mark(enum_ptr_t *pep, gc_state_t * gcst) 1067 { 1068 return gc_string_mark(pep->ptr, pep->size, true, gcst); 1069 } 1070 1071 /* Finish tracing by marking names. */ 1072 private bool 1073 gc_trace_finish(gc_state_t * pstate) 1074 { 1075 name_table *nt = pstate->ntable; 1076 name_index_t nidx = 0; 1077 bool marked = false; 1078 1079 while ((nidx = names_next_valid_index(nt, nidx)) != 0) { 1080 name_string_t *pnstr = names_index_string_inline(nt, nidx); 1081 1082 if (pnstr->mark) { 1083 enum_ptr_t enst, ensst; 1084 1085 if (!pnstr->foreign_string && 1086 gc_string_mark(pnstr->string_bytes, pnstr->string_size, 1087 true, pstate) 1088 ) 1089 marked = true; 1090 enst.ptr = names_index_sub_table(nt, nidx); 1091 ensst.ptr = names_index_string_sub_table(nt, nidx); 1092 marked |= 1093 ptr_struct_mark(&enst, pstate) | 1094 ptr_struct_mark(&ensst, pstate); 1095 } 1096 } 1097 return marked; 1098 } 1099 1100 /* ------ Relocation planning phase ------ */ 1101 1102 /* Initialize the relocation information in the chunk header. */ 1103 private void 1104 gc_init_reloc(chunk_t * cp) 1105 { 1106 chunk_head_t *chead = cp->chead; 1107 1108 chead->dest = cp->cbase; 1109 chead->free.o_back = 1110 offset_of(chunk_head_t, free) >> obj_back_shift; 1111 chead->free.o_size = sizeof(obj_header_t); 1112 chead->free.o_nreloc = 0; 1113 } 1114 1115 /* Set marks and clear relocation for chunks that won't be compacted. */ 1116 private void 1117 gc_clear_reloc(chunk_t * cp) 1118 { 1119 byte *pfree = (byte *) & cp->chead->free; 1120 1121 gc_init_reloc(cp); 1122 SCAN_CHUNK_OBJECTS(cp) 1123 DO_ALL 1124 const struct_shared_procs_t *procs = 1125 pre->o_type->shared; 1126 1127 if (procs != 0) 1128 (*procs->clear_reloc) (pre, size); 1129 o_set_untraced(pre); 1130 pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; 1131 END_OBJECTS_SCAN 1132 gc_strings_set_marks(cp, true); 1133 gc_strings_clear_reloc(cp); 1134 } 1135 1136 /* Set the relocation for the objects in a chunk. */ 1137 /* This will never be called for a chunk with any o_untraced objects. */ 1138 private void 1139 gc_objects_set_reloc(chunk_t * cp) 1140 { 1141 uint reloc = 0; 1142 chunk_head_t *chead = cp->chead; 1143 byte *pfree = (byte *) & chead->free; /* most recent free object */ 1144 1145 if_debug_chunk('6', "[6]setting reloc for chunk", cp); 1146 gc_init_reloc(cp); 1147 SCAN_CHUNK_OBJECTS(cp) 1148 DO_ALL 1149 struct_proc_finalize((*finalize)); 1150 const struct_shared_procs_t *procs = 1151 pre->o_type->shared; 1152 1153 if ((procs == 0 ? o_is_unmarked(pre) : 1154 !(*procs->set_reloc) (pre, reloc, size)) 1155 ) { /* Free object */ 1156 reloc += sizeof(obj_header_t) + obj_align_round(size); 1157 if ((finalize = pre->o_type->finalize) != 0) { 1158 if_debug2('u', "[u]GC finalizing %s 0x%lx\n", 1159 struct_type_name_string(pre->o_type), 1160 (ulong) (pre + 1)); 1161 (*finalize) (pre + 1); 1162 } 1163 pfree = (byte *) pre; 1164 pre->o_back = (pfree - (byte *) chead) >> obj_back_shift; 1165 pre->o_nreloc = reloc; 1166 if_debug3('7', " [7]at 0x%lx, unmarked %lu, new reloc = %u\n", 1167 (ulong) pre, (ulong) size, reloc); 1168 } else { /* Useful object */ 1169 debug_check_object(pre, cp, NULL); 1170 pre->o_back = ((byte *) pre - pfree) >> obj_back_shift; 1171 } 1172 END_OBJECTS_SCAN 1173 #ifdef DEBUG 1174 if (reloc != 0) { 1175 if_debug1('6', "[6]freed %u", reloc); 1176 if_debug_chunk('6', " in", cp); 1177 } 1178 #endif 1179 } 1180 1181 /* ------ Relocation phase ------ */ 1182 1183 /* Relocate the pointers in all the objects in a chunk. */ 1184 private void 1185 gc_do_reloc(chunk_t * cp, gs_ref_memory_t * mem, gc_state_t * pstate) 1186 { 1187 chunk_head_t *chead = cp->chead; 1188 1189 if_debug_chunk('6', "[6]relocating in chunk", cp); 1190 SCAN_CHUNK_OBJECTS(cp) 1191 DO_ALL 1192 #ifdef DEBUG 1193 pstate->container = cp; 1194 #endif 1195 /* We need to relocate the pointers in an object iff */ 1196 /* it is o_untraced, or it is a useful object. */ 1197 /* An object is free iff its back pointer points to */ 1198 /* the chunk_head structure. */ 1199 if (o_is_untraced(pre) || 1200 pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead 1201 ) { 1202 struct_proc_reloc_ptrs((*proc)) = 1203 pre->o_type->reloc_ptrs; 1204 1205 if_debug3('7', 1206 " [7]relocating ptrs in %s(%lu) 0x%lx\n", 1207 struct_type_name_string(pre->o_type), 1208 (ulong) size, (ulong) pre); 1209 if (proc != 0) 1210 (*proc) (pre + 1, size, pre->o_type, pstate); 1211 } 1212 #ifdef DEBUG 1213 pstate->container = 0; 1214 #endif 1215 END_OBJECTS_SCAN 1216 } 1217 1218 /* Print pointer relocation if debugging. */ 1219 /* We have to provide this procedure even if DEBUG is not defined, */ 1220 /* in case one of the other GC modules was compiled with DEBUG. */ 1221 const void * 1222 print_reloc_proc(const void *obj, const char *cname, const void *robj) 1223 { 1224 if_debug3('9', " [9]relocate %s * 0x%lx to 0x%lx\n", 1225 cname, (ulong)obj, (ulong)robj); 1226 return robj; 1227 } 1228 1229 /* Relocate a pointer to an (aligned) object. */ 1230 /* See gsmemory.h for why the argument is const and the result is not. */ 1231 private void /*obj_header_t */ * 1232 igc_reloc_struct_ptr(const void /*obj_header_t */ *obj, gc_state_t * gcst) 1233 { 1234 const obj_header_t *const optr = (const obj_header_t *)obj; 1235 const void *robj; 1236 1237 if (obj == 0) { 1238 discard(print_reloc(obj, "NULL", 0)); 1239 return 0; 1240 } 1241 debug_check_object(optr - 1, NULL, gcst); 1242 { 1243 uint back = optr[-1].o_back; 1244 1245 if (back == o_untraced) 1246 robj = obj; 1247 else { 1248 #ifdef DEBUG 1249 /* Do some sanity checking. */ 1250 chunk_t *cp = gcst->container; 1251 1252 if (cp != 0 && cp->cbase <= (byte *)obj && (byte *)obj <cp->ctop) { 1253 if (back > (cp->ctop - cp->cbase) >> obj_back_shift) { 1254 lprintf2("Invalid back pointer %u at 0x%lx!\n", 1255 back, (ulong) obj); 1256 gs_abort(NULL); 1257 } 1258 } else { 1259 /* Pointed to unknown chunk. Can't check it, sorry. */ 1260 } 1261 #endif 1262 { 1263 const obj_header_t *pfree = (const obj_header_t *) 1264 ((const char *)(optr - 1) - 1265 (back << obj_back_shift)); 1266 const chunk_head_t *chead = (const chunk_head_t *) 1267 ((const char *)pfree - 1268 (pfree->o_back << obj_back_shift)); 1269 1270 robj = chead->dest + 1271 ((const char *)obj - (const char *)(chead + 1) - 1272 pfree->o_nreloc); 1273 } 1274 } 1275 } 1276 /* Use a severely deprecated pun to remove the const property. */ 1277 { 1278 union { const void *r; void *w; } u; 1279 1280 u.r = print_reloc(obj, struct_type_name_string(optr[-1].o_type), robj); 1281 return u.w; 1282 } 1283 } 1284 1285 /* ------ Compaction phase ------ */ 1286 1287 /* Compact the objects in a chunk. */ 1288 /* This will never be called for a chunk with any o_untraced objects. */ 1289 private void 1290 gc_objects_compact(chunk_t * cp, gc_state_t * gcst) 1291 { 1292 chunk_head_t *chead = cp->chead; 1293 obj_header_t *dpre = (obj_header_t *) chead->dest; 1294 const gs_memory_t *cmem = gcst->spaces.memories.named.system->stable_memory; 1295 1296 SCAN_CHUNK_OBJECTS(cp) 1297 DO_ALL 1298 /* An object is free iff its back pointer points to */ 1299 /* the chunk_head structure. */ 1300 if (pre->o_back << obj_back_shift != (byte *) pre - (byte *) chead) { 1301 const struct_shared_procs_t *procs = pre->o_type->shared; 1302 1303 debug_check_object(pre, cp, gcst); 1304 if_debug4('7', 1305 " [7]compacting %s 0x%lx(%lu) to 0x%lx\n", 1306 struct_type_name_string(pre->o_type), 1307 (ulong) pre, (ulong) size, (ulong) dpre); 1308 if (procs == 0) { 1309 if (dpre != pre) 1310 memmove(dpre, pre, 1311 sizeof(obj_header_t) + size); 1312 } else 1313 (*procs->compact) (cmem, pre, dpre, size); 1314 dpre = (obj_header_t *) 1315 ((byte *) dpre + obj_size_round(size)); 1316 } 1317 END_OBJECTS_SCAN 1318 if (cp->outer == 0 && chead->dest != cp->cbase) 1319 dpre = (obj_header_t *) cp->cbase; /* compacted this chunk into another */ 1320 gs_alloc_fill(dpre, gs_alloc_fill_collected, cp->cbot - (byte *) dpre); 1321 cp->cbot = (byte *) dpre; 1322 cp->rcur = 0; 1323 cp->rtop = 0; /* just to be sure */ 1324 } 1325 1326 /* ------ Cleanup ------ */ 1327 1328 /* Free empty chunks. */ 1329 private void 1330 gc_free_empty_chunks(gs_ref_memory_t * mem) 1331 { 1332 chunk_t *cp; 1333 chunk_t *csucc; 1334 1335 /* Free the chunks in reverse order, */ 1336 /* to encourage LIFO behavior. */ 1337 for (cp = mem->clast; cp != 0; cp = csucc) { /* Make sure this isn't an inner chunk, */ 1338 /* or a chunk that has inner chunks. */ 1339 csucc = cp->cprev; /* save before freeing */ 1340 if (cp->cbot == cp->cbase && cp->ctop == cp->climit && 1341 cp->outer == 0 && cp->inner_count == 0 1342 ) { 1343 alloc_free_chunk(cp, mem); 1344 if (mem->pcc == cp) 1345 mem->pcc = 0; 1346 } 1347 } 1348 } 1349 1350 1351 const gs_memory_t * gcst_get_memory_ptr(gc_state_t *gcst) 1352 { 1353 vm_spaces spaces = gcst->spaces; 1354 const gs_memory_t *cmem = space_system->stable_memory; 1355 return cmem; 1356 } 1357