xref: /plan9/sys/src/cmd/gs/src/gsalloc.c (revision 593dc095aefb2a85c828727bbfa9da139a49bdf4)
1 /* Copyright (C) 1995, 2000 Aladdin Enterprises.  All rights reserved.
2 
3   This software is provided AS-IS with no warranty, either express or
4   implied.
5 
6   This software is distributed under license and may not be copied,
7   modified or distributed except as expressly authorized under the terms
8   of the license contained in the file LICENSE in this distribution.
9 
10   For more information about licensing, please refer to
11   http://www.ghostscript.com/licensing/. For information on
12   commercial licensing, go to http://www.artifex.com/licensing/ or
13   contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14   San Rafael, CA  94903, U.S.A., +1(415)492-9861.
15 */
16 
17 /* $Id: gsalloc.c,v 1.24 2005/10/12 10:45:21 leonardo Exp $ */
18 /* Standard memory allocator */
19 #include "gx.h"
20 #include "memory_.h"
21 #include "gserrors.h"
22 #include "gsexit.h"
23 #include "gsmdebug.h"
24 #include "gsstruct.h"
25 #include "gxalloc.h"
26 #include "stream.h"		/* for clearing stream list */
27 
28 /*
29  * Define whether to try consolidating space before adding a new chunk.
30  * The default is not to do this, because it is computationally
31  * expensive and doesn't seem to help much.  However, this is done for
32  * "controlled" spaces whether or not the #define is in effect.
33  */
34 /*#define CONSOLIDATE_BEFORE_ADDING_CHUNK */
35 
36 
37 /*
38  * This allocator produces tracing messages of the form
39  *      [aNMOTS]...
40  * where
41  *   N is the VM space number, +1 if we are allocating from stable memory.
42  *   M is : for movable objects, | for immovable,
43  *   O is {alloc = +, free = -, grow = >, shrink = <},
44  *   T is {bytes = b, object = <, ref = $, string = >}, and
45  *   S is {small freelist = f, large freelist = F, LIFO = space,
46  *      own chunk = L, lost = #, lost own chunk = ~, other = .}.
47  */
48 #ifdef DEBUG
49 private int
alloc_trace_space(const gs_ref_memory_t * imem)50 alloc_trace_space(const gs_ref_memory_t *imem)
51 {
52     return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
53 }
54 private void
alloc_trace(const char * chars,gs_ref_memory_t * imem,client_name_t cname,gs_memory_type_ptr_t stype,uint size,const void * ptr)55 alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
56 	    gs_memory_type_ptr_t stype, uint size, const void *ptr)
57 {
58     if_debug7('A', "[a%d%s]%s %s(%u) %s0x%lx\n",
59 	      alloc_trace_space(imem), chars, client_name_string(cname),
60 	      (ptr == 0 || stype == 0 ? "" :
61 	       struct_type_name_string(stype)),
62 	      size, (chars[1] == '+' ? "= " : ""), (ulong) ptr);
63 }
64 private bool
alloc_size_is_ok(gs_memory_type_ptr_t stype)65 alloc_size_is_ok(gs_memory_type_ptr_t stype)
66 {
67     return (stype->ssize > 0 && stype->ssize < 0x100000);
68 }
69 #  define ALLOC_CHECK_SIZE(stype)\
70     BEGIN\
71       if (!alloc_size_is_ok(stype)) {\
72 	lprintf2("size of struct type 0x%lx is 0x%lx!\n",\
73 		 (ulong)(stype), (ulong)((stype)->ssize));\
74 	return 0;\
75       }\
76     END
77 #else
78 #  define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
79 #  define ALLOC_CHECK_SIZE(stype) DO_NOTHING
80 #endif
81 
82 /*
83  * The structure descriptor for allocators.  Even though allocators
84  * are allocated outside GC space, they reference objects within it.
85  */
86 public_st_ref_memory();
87 private
88 ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
89 ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
90 ENUM_PTR(3, gs_ref_memory_t, saved);
91 ENUM_PTRS_END
RELOC_PTRS_WITH(ref_memory_reloc_ptrs,gs_ref_memory_t * mptr)92 private RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
93 {
94     RELOC_PTR(gs_ref_memory_t, streams);
95     RELOC_PTR(gs_ref_memory_t, names_array);
96     RELOC_PTR(gs_ref_memory_t, changes);
97     /* Don't relocate the saved pointer now -- see igc.c for details. */
98     mptr->reloc_saved = RELOC_OBJ(mptr->saved);
99 }
100 RELOC_PTRS_END
101 
102 /*
103  * Define the flags for alloc_obj, which implements all but the fastest
104  * case of allocation.
105  */
106 typedef enum {
107     ALLOC_IMMOVABLE = 1,
108     ALLOC_DIRECT = 2		/* called directly, without fast-case checks */
109 } alloc_flags_t;
110 
111 /* Forward references */
112 private void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top);
113 private obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, uint size);
114 private obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
115 private ulong compute_free_objects(gs_ref_memory_t *);
116 private obj_header_t *alloc_obj(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
117 private void consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem);
118 private void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp);
119 private chunk_t *alloc_acquire_chunk(gs_ref_memory_t *, ulong, bool, client_name_t);
120 private chunk_t *alloc_add_chunk(gs_ref_memory_t *, ulong, client_name_t);
121 void alloc_close_chunk(gs_ref_memory_t *);
122 
123 /*
124  * Define the standard implementation (with garbage collection)
125  * of Ghostscript's memory manager interface.
126  */
127 /* Raw memory procedures */
128 private gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
129 private gs_memory_proc_resize_object(i_resize_object);
130 private gs_memory_proc_free_object(i_free_object);
131 private gs_memory_proc_stable(i_stable);
132 private gs_memory_proc_status(i_status);
133 private gs_memory_proc_free_all(i_free_all);
134 private gs_memory_proc_consolidate_free(i_consolidate_free);
135 
136 /* Object memory procedures */
137 private gs_memory_proc_alloc_bytes(i_alloc_bytes);
138 private gs_memory_proc_alloc_struct(i_alloc_struct);
139 private gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
140 private gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
141 private gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
142 private gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
143 private gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
144 private gs_memory_proc_object_size(i_object_size);
145 private gs_memory_proc_object_type(i_object_type);
146 private gs_memory_proc_alloc_string(i_alloc_string);
147 private gs_memory_proc_alloc_string(i_alloc_string_immovable);
148 private gs_memory_proc_resize_string(i_resize_string);
149 private gs_memory_proc_free_string(i_free_string);
150 private gs_memory_proc_register_root(i_register_root);
151 private gs_memory_proc_unregister_root(i_unregister_root);
152 private gs_memory_proc_enable_free(i_enable_free);
153 
154 /* We export the procedures for subclasses. */
155 const gs_memory_procs_t gs_ref_memory_procs =
156 {
157     /* Raw memory procedures */
158     i_alloc_bytes_immovable,
159     i_resize_object,
160     i_free_object,
161     i_stable,
162     i_status,
163     i_free_all,
164     i_consolidate_free,
165     /* Object memory procedures */
166     i_alloc_bytes,
167     i_alloc_struct,
168     i_alloc_struct_immovable,
169     i_alloc_byte_array,
170     i_alloc_byte_array_immovable,
171     i_alloc_struct_array,
172     i_alloc_struct_array_immovable,
173     i_object_size,
174     i_object_type,
175     i_alloc_string,
176     i_alloc_string_immovable,
177     i_resize_string,
178     i_free_string,
179     i_register_root,
180     i_unregister_root,
181     i_enable_free
182 };
183 
184 /*
185  * Allocate and mostly initialize the state of an allocator (system, global,
186  * or local).  Does not initialize global or space.
187  */
188 private void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
189 			  chunk_t **);
190 gs_ref_memory_t *
ialloc_alloc_state(gs_memory_t * parent,uint chunk_size)191 ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
192 {
193     chunk_t *cp;
194     gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
195 
196     if (iimem == 0)
197 	return 0;
198     iimem->stable_memory = (gs_memory_t *)iimem;
199     iimem->procs = gs_ref_memory_procs;
200     iimem->gs_lib_ctx = parent->gs_lib_ctx;
201     iimem->non_gc_memory = parent;
202     iimem->chunk_size = chunk_size;
203     iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
204     iimem->is_controlled = false;
205     iimem->gc_status.vm_threshold = chunk_size * 3L;
206     iimem->gc_status.max_vm = max_long;
207     iimem->gc_status.psignal = NULL;
208     iimem->gc_status.signal_value = 0;
209     iimem->gc_status.enabled = false;
210     iimem->gc_status.requested = 0;
211     iimem->gc_allocated = 0;
212     iimem->previous_status.allocated = 0;
213     iimem->previous_status.used = 0;
214     ialloc_reset(iimem);
215     iimem->cfirst = iimem->clast = cp;
216     ialloc_set_limit(iimem);
217     iimem->cc.cbot = iimem->cc.ctop = 0;
218     iimem->pcc = 0;
219     iimem->save_level = 0;
220     iimem->new_mask = 0;
221     iimem->test_mask = ~0;
222     iimem->streams = 0;
223     iimem->names_array = 0;
224     iimem->roots = 0;
225     iimem->num_contexts = 0;
226     iimem->saved = 0;
227     return iimem;
228 }
229 
230 /* Allocate a 'solo' object with its own chunk. */
231 private void *
ialloc_solo(gs_memory_t * parent,gs_memory_type_ptr_t pstype,chunk_t ** pcp)232 ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
233 	    chunk_t ** pcp)
234 {	/*
235 	 * We can't assume that the parent uses the same object header
236 	 * that we do, but the GC requires that allocators have
237 	 * such a header.  Therefore, we prepend one explicitly.
238 	 */
239     chunk_t *cp =
240 	gs_raw_alloc_struct_immovable(parent, &st_chunk,
241 				      "ialloc_solo(chunk)");
242     uint csize =
243 	ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
244 		 pstype->ssize,
245 		 obj_align_mod);
246     byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
247     obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
248 
249     if (cp == 0 || cdata == 0)
250 	return 0;
251     alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
252     cp->cbot = cp->ctop;
253     cp->cprev = cp->cnext = 0;
254     /* Construct the object header "by hand". */
255     obj->o_alone = 1;
256     obj->o_size = pstype->ssize;
257     obj->o_type = pstype;
258     *pcp = cp;
259     return (void *)(obj + 1);
260 }
261 
262 /*
263  * Add a chunk to an externally controlled allocator.  Such allocators
264  * allocate all objects as immovable, are not garbage-collected, and
265  * don't attempt to acquire additional memory on their own.
266  */
267 int
ialloc_add_chunk(gs_ref_memory_t * imem,ulong space,client_name_t cname)268 ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
269 {
270     chunk_t *cp;
271 
272     /* Allow acquisition of this chunk. */
273     imem->is_controlled = false;
274     imem->large_size = imem->chunk_size;
275     imem->limit = max_long;
276     imem->gc_status.max_vm = max_long;
277 
278     /* Acquire the chunk. */
279     cp = alloc_add_chunk(imem, space, cname);
280 
281     /*
282      * Make all allocations immovable.  Since the "movable" allocators
283      * allocate within existing chunks, whereas the "immovable" ones
284      * allocate in new chunks, we equate the latter to the former, even
285      * though this seems backwards.
286      */
287     imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
288     imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
289     imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
290     imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
291     imem->procs.alloc_string_immovable = imem->procs.alloc_string;
292 
293     /* Disable acquisition of additional chunks. */
294     imem->is_controlled = true;
295     imem->limit = 0;
296 
297     return (cp ? 0 : gs_note_error(gs_error_VMerror));
298 }
299 
300 /* Prepare for a GC by clearing the stream list. */
301 /* This probably belongs somewhere else.... */
302 void
ialloc_gc_prepare(gs_ref_memory_t * mem)303 ialloc_gc_prepare(gs_ref_memory_t * mem)
304 {	/*
305 	 * We have to unlink every stream from its neighbors,
306 	 * so that referenced streams don't keep all streams around.
307 	 */
308     while (mem->streams != 0) {
309 	stream *s = mem->streams;
310 
311 	mem->streams = s->next;
312 	s->prev = s->next = 0;
313     }
314 }
315 
316 /* Initialize after a save. */
317 void
ialloc_reset(gs_ref_memory_t * mem)318 ialloc_reset(gs_ref_memory_t * mem)
319 {
320     mem->cfirst = 0;
321     mem->clast = 0;
322     mem->cc.rcur = 0;
323     mem->cc.rtop = 0;
324     mem->cc.has_refs = false;
325     mem->allocated = 0;
326     mem->inherited = 0;
327     mem->changes = 0;
328     ialloc_reset_free(mem);
329 }
330 
331 /* Initialize after a save or GC. */
332 void
ialloc_reset_free(gs_ref_memory_t * mem)333 ialloc_reset_free(gs_ref_memory_t * mem)
334 {
335     int i;
336     obj_header_t **p;
337 
338     mem->lost.objects = 0;
339     mem->lost.refs = 0;
340     mem->lost.strings = 0;
341     mem->cfreed.cp = 0;
342     for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
343 	*p = 0;
344     mem->largest_free_size = 0;
345 }
346 
347 /*
348  * Set an arbitrary limit so that the amount of allocated VM does not grow
349  * indefinitely even when GC is disabled.  Benchmarks have shown that
350  * the resulting GC's are infrequent enough not to degrade performance
351  * significantly.
352  */
353 #define FORCE_GC_LIMIT 8000000
354 
355 /* Set the allocation limit after a change in one or more of */
356 /* vm_threshold, max_vm, or enabled, or after a GC. */
357 void
ialloc_set_limit(register gs_ref_memory_t * mem)358 ialloc_set_limit(register gs_ref_memory_t * mem)
359 {	/*
360 	 * The following code is intended to set the limit so that
361 	 * we stop allocating when allocated + previous_status.allocated
362 	 * exceeds the lesser of max_vm or (if GC is enabled)
363 	 * gc_allocated + vm_threshold.
364 	 */
365     ulong max_allocated =
366     (mem->gc_status.max_vm > mem->previous_status.allocated ?
367      mem->gc_status.max_vm - mem->previous_status.allocated :
368      0);
369 
370     if (mem->gc_status.enabled) {
371 	ulong limit = mem->gc_allocated + mem->gc_status.vm_threshold;
372 
373 	if (limit < mem->previous_status.allocated)
374 	    mem->limit = 0;
375 	else {
376 	    limit -= mem->previous_status.allocated;
377 	    mem->limit = min(limit, max_allocated);
378 	}
379     } else
380 	mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
381     if_debug7('0', "[0]space=%d, max_vm=%ld, prev.alloc=%ld, enabled=%d,\n\
382       gc_alloc=%ld, threshold=%ld => limit=%ld\n",
383 	      mem->space, (long)mem->gc_status.max_vm,
384 	      (long)mem->previous_status.allocated,
385 	      mem->gc_status.enabled, (long)mem->gc_allocated,
386 	      (long)mem->gc_status.vm_threshold, (long)mem->limit);
387 }
388 
389 /*
390  * Free all the memory owned by the allocator, except the allocator itself.
391  * Note that this only frees memory at the current save level: the client
392  * is responsible for restoring to the outermost level if desired.
393  */
394 private void
i_free_all(gs_memory_t * mem,uint free_mask,client_name_t cname)395 i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
396 {
397     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
398     chunk_t *cp;
399 
400     if (free_mask & FREE_ALL_DATA) {
401 	chunk_t *csucc;
402 
403 	/*
404 	 * Free the chunks in reverse order, to encourage LIFO behavior.
405 	 * Don't free the chunk holding the allocator itself.
406 	 */
407 	for (cp = imem->clast; cp != 0; cp = csucc) {
408 	    csucc = cp->cprev;	/* save before freeing */
409 	    if (cp->cbase + sizeof(obj_header_t) != (byte *)mem)
410 		alloc_free_chunk(cp, imem);
411 	}
412     }
413     if (free_mask & FREE_ALL_ALLOCATOR) {
414 	/* Free the chunk holding the allocator itself. */
415 	for (cp = imem->clast; cp != 0; cp = cp->cprev)
416 	    if (cp->cbase + sizeof(obj_header_t) == (byte *)mem) {
417 		alloc_free_chunk(cp, imem);
418 		break;
419 	    }
420     }
421 }
422 
423 /* ================ Accessors ================ */
424 
425 /* Get the size of an object from the header. */
426 private uint
i_object_size(gs_memory_t * mem,const void * obj)427 i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
428 {
429     return pre_obj_contents_size((const obj_header_t *)obj - 1);
430 }
431 
432 /* Get the type of a structure from the header. */
433 private gs_memory_type_ptr_t
i_object_type(gs_memory_t * mem,const void * obj)434 i_object_type(gs_memory_t * mem, const void /*obj_header_t */ *obj)
435 {
436     return ((const obj_header_t *)obj - 1)->o_type;
437 }
438 
439 /* Get the GC status of a memory. */
440 void
gs_memory_gc_status(const gs_ref_memory_t * mem,gs_memory_gc_status_t * pstat)441 gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
442 {
443     *pstat = mem->gc_status;
444 }
445 
446 /* Set the GC status of a memory. */
447 void
gs_memory_set_gc_status(gs_ref_memory_t * mem,const gs_memory_gc_status_t * pstat)448 gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
449 {
450     mem->gc_status = *pstat;
451     ialloc_set_limit(mem);
452 }
453 
454 /* Set VM threshold. */
455 void
gs_memory_set_vm_threshold(gs_ref_memory_t * mem,long val)456 gs_memory_set_vm_threshold(gs_ref_memory_t * mem, long val)
457 {
458     gs_memory_gc_status_t stat;
459     gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
460 
461     gs_memory_gc_status(mem, &stat);
462     stat.vm_threshold = val;
463     gs_memory_set_gc_status(mem, &stat);
464     gs_memory_gc_status(stable, &stat);
465     stat.vm_threshold = val;
466     gs_memory_set_gc_status(stable, &stat);
467 }
468 
469 /* Set VM reclaim. */
470 void
gs_memory_set_vm_reclaim(gs_ref_memory_t * mem,bool enabled)471 gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled)
472 {
473     gs_memory_gc_status_t stat;
474     gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
475 
476     gs_memory_gc_status(mem, &stat);
477     stat.enabled = enabled;
478     gs_memory_set_gc_status(mem, &stat);
479     gs_memory_gc_status(stable, &stat);
480     stat.enabled = enabled;
481     gs_memory_set_gc_status(stable, &stat);
482 }
483 
484 /* ================ Objects ================ */
485 
486 /* Allocate a small object quickly if possible. */
487 /* The size must be substantially less than max_uint. */
488 /* ptr must be declared as obj_header_t *. */
489 /* pfl must be declared as obj_header_t **. */
490 #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
491 	if ( size <= max_freelist_size &&\
492 	     *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
493 	   )\
494 	{	ptr = *pfl;\
495 		*pfl = *(obj_header_t **)ptr;\
496 		ptr[-1].o_size = size;\
497 		ptr[-1].o_type = pstype;\
498 		/* If debugging, clear the block in an attempt to */\
499 		/* track down uninitialized data errors. */\
500 		gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
501 #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
502 	}\
503 	else if (size > max_freelist_size &&\
504 		 (ptr = large_freelist_alloc(imem, size)) != 0)\
505 	{	ptr[-1].o_type = pstype;\
506 		/* If debugging, clear the block in an attempt to */\
507 		/* track down uninitialized data errors. */\
508 		gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
509 #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
510 	}\
511 	else if ( (imem->cc.ctop - (byte *)(ptr = (obj_header_t *)imem->cc.cbot))\
512 		>= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
513 	     size < imem->large_size\
514 	   )\
515 	{	imem->cc.cbot = (byte *)ptr + obj_size_round(size);\
516 		ptr->o_alone = 0;\
517 		ptr->o_size = size;\
518 		ptr->o_type = pstype;\
519 		ptr++;\
520 		/* If debugging, clear the block in an attempt to */\
521 		/* track down uninitialized data errors. */\
522 		gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
523 #define ELSE_ALLOC\
524 	}\
525 	else
526 
527 private byte *
i_alloc_bytes(gs_memory_t * mem,uint size,client_name_t cname)528 i_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
529 {
530     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
531     obj_header_t *obj;
532     obj_header_t **pfl;
533 
534     IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
535 	alloc_trace(":+bf", imem, cname, NULL, size, obj);
536     ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
537 	alloc_trace(":+bF", imem, cname, NULL, size, obj);
538     ELSEIF_LIFO_ALLOC(obj, imem, size, &st_bytes)
539 	alloc_trace(":+b ", imem, cname, NULL, size, obj);
540     ELSE_ALLOC
541     {
542 	obj = alloc_obj(imem, size, &st_bytes, 0, cname);
543 	if (obj == 0)
544 	    return 0;
545 	alloc_trace(":+b.", imem, cname, NULL, size, obj);
546     }
547     return (byte *) obj;
548 }
549 private byte *
i_alloc_bytes_immovable(gs_memory_t * mem,uint size,client_name_t cname)550 i_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
551 {
552     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
553     obj_header_t *obj = alloc_obj(imem, size, &st_bytes,
554 				  ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
555 
556     if (obj == 0)
557 	return 0;
558     alloc_trace("|+b.", imem, cname, NULL, size, obj);
559     return (byte *) obj;
560 }
561 private void *
i_alloc_struct(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)562 i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
563 	       client_name_t cname)
564 {
565     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
566     uint size = pstype->ssize;
567     obj_header_t *obj;
568     obj_header_t **pfl;
569 
570     ALLOC_CHECK_SIZE(pstype);
571     IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
572 	alloc_trace(":+<f", imem, cname, pstype, size, obj);
573     ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
574 	alloc_trace(":+<F", imem, cname, pstype, size, obj);
575     ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
576 	alloc_trace(":+< ", imem, cname, pstype, size, obj);
577     ELSE_ALLOC
578     {
579 	obj = alloc_obj(imem, size, pstype, 0, cname);
580 	if (obj == 0)
581 	    return 0;
582 	alloc_trace(":+<.", imem, cname, pstype, size, obj);
583     }
584     return obj;
585 }
586 private void *
i_alloc_struct_immovable(gs_memory_t * mem,gs_memory_type_ptr_t pstype,client_name_t cname)587 i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
588 			 client_name_t cname)
589 {
590     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
591     uint size = pstype->ssize;
592     obj_header_t *obj;
593 
594     ALLOC_CHECK_SIZE(pstype);
595     obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
596     alloc_trace("|+<.", imem, cname, pstype, size, obj);
597     return obj;
598 }
599 private byte *
i_alloc_byte_array(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)600 i_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
601 		   client_name_t cname)
602 {
603     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
604     obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
605 				  &st_bytes, ALLOC_DIRECT, cname);
606 
607     if_debug6('A', "[a%d:+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
608 	      alloc_trace_space(imem), client_name_string(cname),
609 	      (ulong) num_elements * elt_size,
610 	      num_elements, elt_size, (ulong) obj);
611     return (byte *) obj;
612 }
613 private byte *
i_alloc_byte_array_immovable(gs_memory_t * mem,uint num_elements,uint elt_size,client_name_t cname)614 i_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
615 			     uint elt_size, client_name_t cname)
616 {
617     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
618     obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
619 				  &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
620 				  cname);
621 
622     if_debug6('A', "[a%d|+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
623 	      alloc_trace_space(imem), client_name_string(cname),
624 	      (ulong) num_elements * elt_size,
625 	      num_elements, elt_size, (ulong) obj);
626     return (byte *) obj;
627 }
628 private void *
i_alloc_struct_array(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)629 i_alloc_struct_array(gs_memory_t * mem, uint num_elements,
630 		     gs_memory_type_ptr_t pstype, client_name_t cname)
631 {
632     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
633     obj_header_t *obj;
634 
635     ALLOC_CHECK_SIZE(pstype);
636 #ifdef DEBUG
637     if (pstype->enum_ptrs == basic_enum_ptrs) {
638 	dprintf2("  i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n",
639 		pstype->sname, cname);
640 	return NULL;		/* fail */
641     }
642 #endif
643     obj = alloc_obj(imem,
644 		    (ulong) num_elements * pstype->ssize,
645 		    pstype, ALLOC_DIRECT, cname);
646     if_debug7('A', "[a%d:+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
647 	      alloc_trace_space(imem), client_name_string(cname),
648 	      struct_type_name_string(pstype),
649 	      (ulong) num_elements * pstype->ssize,
650 	      num_elements, pstype->ssize, (ulong) obj);
651     return (char *)obj;
652 }
653 private void *
i_alloc_struct_array_immovable(gs_memory_t * mem,uint num_elements,gs_memory_type_ptr_t pstype,client_name_t cname)654 i_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
655 			   gs_memory_type_ptr_t pstype, client_name_t cname)
656 {
657     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
658     obj_header_t *obj;
659 
660     ALLOC_CHECK_SIZE(pstype);
661     obj = alloc_obj(imem,
662 		    (ulong) num_elements * pstype->ssize,
663 		    pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
664     if_debug7('A', "[a%d|+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
665 	      alloc_trace_space(imem), client_name_string(cname),
666 	      struct_type_name_string(pstype),
667 	      (ulong) num_elements * pstype->ssize,
668 	      num_elements, pstype->ssize, (ulong) obj);
669     return (char *)obj;
670 }
671 private void *
i_resize_object(gs_memory_t * mem,void * obj,uint new_num_elements,client_name_t cname)672 i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
673 		client_name_t cname)
674 {
675     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
676     obj_header_t *pp = (obj_header_t *) obj - 1;
677     gs_memory_type_ptr_t pstype = pp->o_type;
678     ulong old_size = pre_obj_contents_size(pp);
679     ulong new_size = (ulong) pstype->ssize * new_num_elements;
680     ulong old_size_rounded = obj_align_round(old_size);
681     ulong new_size_rounded = obj_align_round(new_size);
682     void *new_obj = NULL;
683 
684     if (old_size_rounded == new_size_rounded) {
685 	pp->o_size = new_size;
686 	new_obj = obj;
687     } else
688 	if ((byte *)obj + old_size_rounded == imem->cc.cbot &&
689 	    imem->cc.ctop - (byte *)obj >= new_size_rounded ) {
690 	    imem->cc.cbot = (byte *)obj + new_size_rounded;
691 	    pp->o_size = new_size;
692 	    new_obj = obj;
693 	} else /* try and trim the object -- but only if room for a dummy header */
694 	    if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
695 		trim_obj(imem, obj, new_size, (chunk_t *)0);
696 		new_obj = obj;
697 	    }
698     if (new_obj) {
699 	if_debug8('A', "[a%d:%c%c ]%s %s(%lu=>%lu) 0x%lx\n",
700 		  alloc_trace_space(imem),
701 		  (new_size > old_size ? '>' : '<'),
702 		  (pstype == &st_bytes ? 'b' : '<'),
703 		  client_name_string(cname),
704 		  struct_type_name_string(pstype),
705 		  old_size, new_size, (ulong) obj);
706 	return new_obj;
707     }
708     /* Punt. */
709     new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
710 				    pstype, cname);
711     if (new_obj == 0)
712 	return 0;
713     memcpy(new_obj, obj, min(old_size, new_size));
714     gs_free_object(mem, obj, cname);
715     return new_obj;
716 }
717 private void
i_free_object(gs_memory_t * mem,void * ptr,client_name_t cname)718 i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
719 {
720     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
721     obj_header_t *pp;
722     gs_memory_type_ptr_t pstype;
723 
724     struct_proc_finalize((*finalize));
725     uint size, rounded_size;
726 
727     if (ptr == 0)
728 	return;
729     pp = (obj_header_t *) ptr - 1;
730     pstype = pp->o_type;
731 #ifdef DEBUG
732     if (gs_debug_c('?')) {
733 	chunk_locator_t cld;
734 
735 	if (pstype == &st_free) {
736 	    lprintf2("%s: object 0x%lx already free!\n",
737 		     client_name_string(cname), (ulong) ptr);
738 	    return;		/*gs_abort(); */
739 	}
740 	/* Check that this allocator owns the object being freed. */
741 	cld.memory = imem;
742 	while ((cld.cp = cld.memory->clast),
743 	       !chunk_locate_ptr(ptr, &cld)
744 	    ) {
745 	    if (!cld.memory->saved) {
746 		lprintf3("%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
747 			 client_name_string(cname), (ulong) ptr,
748 			 (ulong) mem);
749 		return;		/*gs_abort(); */
750 	    }
751 		  /****** HACK: we know the saved state is the first ******
752 		   ****** member of an alloc_save_t. ******/
753 	    cld.memory = (gs_ref_memory_t *) cld.memory->saved;
754 	}
755 	/* Check that the object is in the allocated region. */
756 	if (cld.memory == imem && cld.cp == imem->pcc)
757 	    cld.cp = &imem->cc;
758 	if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
759 			  cld.cp->cbot))
760 	    ) {
761 	    lprintf5("%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
762 		     client_name_string(cname), (ulong) ptr,
763 		     (ulong) cld.cp, (ulong) cld.cp->cbase,
764 		     (ulong) cld.cp->cbot);
765 	    return;		/*gs_abort(); */
766 	}
767     }
768 #endif
769     size = pre_obj_contents_size(pp);
770     rounded_size = obj_align_round(size);
771     finalize = pstype->finalize;
772     if (finalize != 0) {
773 	if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
774 		  struct_type_name_string(pstype),
775 		  (ulong) ptr, client_name_string(cname));
776 	(*finalize) (ptr);
777     }
778     if ((byte *) ptr + rounded_size == imem->cc.cbot) {
779 	alloc_trace(":-o ", imem, cname, pstype, size, ptr);
780 	gs_alloc_fill(ptr, gs_alloc_fill_free, size);
781 	imem->cc.cbot = (byte *) pp;
782 	/* IFF this object is adjacent to (or below) the byte after the
783 	 * highest free object, do the consolidation within this chunk. */
784 	if ((byte *)pp <= imem->cc.int_freed_top) {
785 	    consolidate_chunk_free(&(imem->cc), imem);
786 	}
787 	return;
788     }
789     if (pp->o_alone) {
790 		/*
791 		 * We gave this object its own chunk.  Free the entire chunk,
792 		 * unless it belongs to an older save level, in which case
793 		 * we mustn't overwrite it.
794 		 */
795 	chunk_locator_t cl;
796 
797 #ifdef DEBUG
798 	{
799 	    chunk_locator_t cld;
800 
801 	    cld.memory = imem;
802 	    cld.cp = 0;
803 	    if (gs_debug_c('a'))
804 		alloc_trace(
805 			    (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
806 			       imem, cname, pstype, size, ptr);
807 	}
808 #endif
809 	cl.memory = imem;
810 	cl.cp = 0;
811 	if (chunk_locate_ptr(ptr, &cl)) {
812 	    if (!imem->is_controlled)
813 		alloc_free_chunk(cl.cp, imem);
814 	    return;
815 	}
816 	/* Don't overwrite even if gs_alloc_debug is set. */
817     }
818     if (rounded_size >= sizeof(obj_header_t *)) {
819 	/*
820 	 * Put the object on a freelist, unless it belongs to
821 	 * an older save level, in which case we mustn't
822 	 * overwrite it.
823 	 */
824 	imem->cfreed.memory = imem;
825 	if (chunk_locate(ptr, &imem->cfreed)) {
826 	    obj_header_t **pfl;
827 
828 	    if (size > max_freelist_size) {
829 		pfl = &imem->freelists[LARGE_FREELIST_INDEX];
830 		if (rounded_size > imem->largest_free_size)
831 		    imem->largest_free_size = rounded_size;
832 	    } else {
833 		pfl = &imem->freelists[(size + obj_align_mask) >>
834 				      log2_obj_align_mod];
835 	    }
836 	    /* keep track of highest object on a freelist */
837 	    if ((byte *)pp >= imem->cc.int_freed_top)
838 		imem->cc.int_freed_top = (byte *)ptr + rounded_size;
839 	    pp->o_type = &st_free;	/* don't confuse GC */
840 	    gs_alloc_fill(ptr, gs_alloc_fill_free, size);
841 	    *(obj_header_t **) ptr = *pfl;
842 	    *pfl = (obj_header_t *) ptr;
843 	    alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
844 			imem, cname, pstype, size, ptr);
845 	    return;
846 	}
847 	/* Don't overwrite even if gs_alloc_debug is set. */
848     } else {
849 	pp->o_type = &st_free;	/* don't confuse GC */
850 	gs_alloc_fill(ptr, gs_alloc_fill_free, size);
851     }
852     alloc_trace(":-o#", imem, cname, pstype, size, ptr);
853     imem->lost.objects += obj_size_round(size);
854 }
855 private byte *
i_alloc_string(gs_memory_t * mem,uint nbytes,client_name_t cname)856 i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
857 {
858     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
859     byte *str;
860     /*
861      * Cycle through the chunks at the current save level, starting
862      * with the currently open one.
863      */
864     chunk_t *cp_orig = imem->pcc;
865 
866     if (cp_orig == 0) {
867 	/* Open an arbitrary chunk. */
868 	cp_orig = imem->pcc = imem->cfirst;
869 	alloc_open_chunk(imem);
870     }
871 top:
872     if (imem->cc.ctop - imem->cc.cbot > nbytes) {
873 	if_debug4('A', "[a%d:+> ]%s(%u) = 0x%lx\n",
874 		  alloc_trace_space(imem), client_name_string(cname), nbytes,
875 		  (ulong) (imem->cc.ctop - nbytes));
876 	str = imem->cc.ctop -= nbytes;
877 	gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
878 	return str;
879     }
880     /* Try the next chunk. */
881     {
882 	chunk_t *cp = imem->cc.cnext;
883 
884 	alloc_close_chunk(imem);
885 	if (cp == 0)
886 	    cp = imem->cfirst;
887 	imem->pcc = cp;
888 	alloc_open_chunk(imem);
889 	if (cp != cp_orig)
890 	    goto top;
891     }
892     if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
893 	string_data_quantum
894 	) {			/* Can't represent the size in a uint! */
895 	return 0;
896     }
897     if (nbytes >= imem->large_size) {	/* Give it a chunk all its own. */
898 	return i_alloc_string_immovable(mem, nbytes, cname);
899     } else {			/* Add another chunk. */
900 	chunk_t *cp =
901 	    alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
902 
903 	if (cp == 0)
904 	    return 0;
905 	alloc_close_chunk(imem);
906 	imem->pcc = cp;
907 	imem->cc = *imem->pcc;
908 	gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
909 		      imem->cc.climit - imem->cc.cbase);
910 	goto top;
911     }
912 }
913 private byte *
i_alloc_string_immovable(gs_memory_t * mem,uint nbytes,client_name_t cname)914 i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
915 {
916     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
917     byte *str;
918     /* Give it a chunk all its own. */
919     uint asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
920     chunk_t *cp = alloc_acquire_chunk(imem, (ulong) asize, true,
921 				      "large string chunk");
922 
923     if (cp == 0)
924 	return 0;
925     str = cp->ctop = cp->climit - nbytes;
926     if_debug4('a', "[a%d|+>L]%s(%u) = 0x%lx\n",
927 	      alloc_trace_space(imem), client_name_string(cname), nbytes,
928 	      (ulong) str);
929     gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
930     return str;
931 }
932 private byte *
i_resize_string(gs_memory_t * mem,byte * data,uint old_num,uint new_num,client_name_t cname)933 i_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
934 		client_name_t cname)
935 {
936     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
937     byte *ptr;
938 
939     if (old_num == new_num)	/* same size returns the same string */
940         return data;
941     if (data == imem->cc.ctop &&	/* bottom-most string */
942 	(new_num < old_num ||
943 	 imem->cc.ctop - imem->cc.cbot > new_num - old_num)
944 	) {			/* Resize in place. */
945 	ptr = data + old_num - new_num;
946 	if_debug6('A', "[a%d:%c> ]%s(%u->%u) 0x%lx\n",
947 		  alloc_trace_space(imem),
948 		  (new_num > old_num ? '>' : '<'),
949 		  client_name_string(cname), old_num, new_num,
950 		  (ulong) ptr);
951 	imem->cc.ctop = ptr;
952 	memmove(ptr, data, min(old_num, new_num));
953 #ifdef DEBUG
954 	if (new_num > old_num)
955 	    gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
956 			  new_num - old_num);
957 	else
958 	    gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
959 #endif
960     } else
961 	if (new_num < old_num) {
962 	    /* trim the string and create a free space hole */
963 	    ptr = data;
964 	    imem->lost.strings += old_num - new_num;
965 	    gs_alloc_fill(data + new_num, gs_alloc_fill_free,
966 			  old_num - new_num);
967 	    if_debug5('A', "[a%d:<> ]%s(%u->%u) 0x%lx\n",
968 		      alloc_trace_space(imem), client_name_string(cname),
969 		      old_num, new_num, (ulong)ptr);
970         } else {			/* Punt. */
971 	    ptr = gs_alloc_string(mem, new_num, cname);
972 	    if (ptr == 0)
973 		return 0;
974 	    memcpy(ptr, data, min(old_num, new_num));
975 	    gs_free_string(mem, data, old_num, cname);
976 	}
977     return ptr;
978 }
979 
980 private void
i_free_string(gs_memory_t * mem,byte * data,uint nbytes,client_name_t cname)981 i_free_string(gs_memory_t * mem, byte * data, uint nbytes,
982 	      client_name_t cname)
983 {
984     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
985     if (data == imem->cc.ctop) {
986 	if_debug4('A', "[a%d:-> ]%s(%u) 0x%lx\n",
987 		  alloc_trace_space(imem), client_name_string(cname), nbytes,
988 		  (ulong) data);
989 	imem->cc.ctop += nbytes;
990     } else {
991 	if_debug4('A', "[a%d:->#]%s(%u) 0x%lx\n",
992 		  alloc_trace_space(imem), client_name_string(cname), nbytes,
993 		  (ulong) data);
994 	imem->lost.strings += nbytes;
995     }
996     gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
997 }
998 
999 private gs_memory_t *
i_stable(gs_memory_t * mem)1000 i_stable(gs_memory_t *mem)
1001 {
1002     return mem->stable_memory;
1003 }
1004 
1005 private void
i_status(gs_memory_t * mem,gs_memory_status_t * pstat)1006 i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
1007 {
1008     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1009     ulong unused = imem->lost.refs + imem->lost.strings;
1010     ulong inner = 0;
1011 
1012     alloc_close_chunk(imem);
1013     /* Add up unallocated space within each chunk. */
1014     /* Also keep track of space allocated to inner chunks, */
1015     /* which are included in previous_status.allocated. */
1016     {
1017 	const chunk_t *cp = imem->cfirst;
1018 
1019 	while (cp != 0) {
1020 	    unused += cp->ctop - cp->cbot;
1021 	    if (cp->outer)
1022 		inner += cp->cend - (byte *) cp->chead;
1023 	    cp = cp->cnext;
1024 	}
1025     }
1026     unused += compute_free_objects(imem);
1027     pstat->used = imem->allocated + inner - unused +
1028 	imem->previous_status.used;
1029     pstat->allocated = imem->allocated +
1030 	imem->previous_status.allocated;
1031 }
1032 
1033 private void
i_enable_free(gs_memory_t * mem,bool enable)1034 i_enable_free(gs_memory_t * mem, bool enable)
1035 {
1036     if (enable)
1037 	mem->procs.free_object = i_free_object,
1038 	    mem->procs.free_string = i_free_string;
1039     else
1040 	mem->procs.free_object = gs_ignore_free_object,
1041 	    mem->procs.free_string = gs_ignore_free_string;
1042 }
1043 
1044 /* ------ Internal procedures ------ */
1045 
1046 /* Compute the amount of free object space by scanning free lists. */
1047 private ulong
compute_free_objects(gs_ref_memory_t * mem)1048 compute_free_objects(gs_ref_memory_t * mem)
1049 {
1050     ulong unused = mem->lost.objects;
1051     int i;
1052 
1053     /* Add up space on free lists. */
1054     for (i = 0; i < num_freelists; i++) {
1055 	const obj_header_t *pfree;
1056 
1057 	for (pfree = mem->freelists[i]; pfree != 0;
1058 	     pfree = *(const obj_header_t * const *)pfree
1059 	    )
1060 	    unused += obj_align_round(pfree[-1].o_size);
1061     }
1062     return unused;
1063 }
1064 
1065 /* Allocate an object from the large-block freelist. */
1066 private obj_header_t *	/* rets obj if allocated, else 0 */
large_freelist_alloc(gs_ref_memory_t * mem,uint size)1067 large_freelist_alloc(gs_ref_memory_t *mem, uint size)
1068 {
1069     /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
1070     /* right away, else use best fit of entire scan. */
1071     uint aligned_size = obj_align_round(size);
1072     uint aligned_min_size = aligned_size + sizeof(obj_header_t);
1073     uint aligned_max_size =
1074 	aligned_min_size + obj_align_round(aligned_min_size / 8);
1075     obj_header_t *best_fit = 0;
1076     obj_header_t **best_fit_prev = NULL; /* Initialize against indeteminizm. */
1077     uint best_fit_size = max_uint;
1078     obj_header_t *pfree;
1079     obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
1080     uint largest_size = 0;
1081 
1082     if (aligned_size > mem->largest_free_size)
1083 	return 0;		/* definitely no block large enough */
1084 
1085     while ((pfree = *ppfprev) != 0) {
1086 	uint free_size = obj_align_round(pfree[-1].o_size);
1087 
1088         if (free_size == aligned_size ||
1089 	    (free_size >= aligned_min_size && free_size < best_fit_size)
1090 	    ) {
1091 	    best_fit = pfree;
1092 	    best_fit_prev = ppfprev;
1093 	    best_fit_size = pfree[-1].o_size;
1094 	    if (best_fit_size <= aligned_max_size)
1095 		break;	/* good enough fit to spare scan of entire list */
1096 	}
1097 	ppfprev = (obj_header_t **) pfree;
1098 	if (free_size > largest_size)
1099 	    largest_size = free_size;
1100     }
1101     if (best_fit == 0) {
1102 	/*
1103 	 * No single free chunk is large enough, but since we scanned the
1104 	 * entire list, we now have an accurate updated value for
1105 	 * largest_free_size.
1106 	 */
1107 	mem->largest_free_size = largest_size;
1108 	return 0;
1109     }
1110 
1111     /* Remove from freelist & return excess memory to free */
1112     *best_fit_prev = *(obj_header_t **)best_fit;
1113     trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
1114 
1115     /* Pre-init block header; o_alone & o_type are already init'd */
1116     best_fit[-1].o_size = size;
1117 
1118     return best_fit;
1119 }
1120 
1121 /* Allocate an object.  This handles all but the fastest, simplest case. */
1122 private obj_header_t *
alloc_obj(gs_ref_memory_t * mem,ulong lsize,gs_memory_type_ptr_t pstype,alloc_flags_t flags,client_name_t cname)1123 alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
1124 	  alloc_flags_t flags, client_name_t cname)
1125 {
1126     obj_header_t *ptr;
1127 
1128     if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
1129 	/*
1130 	 * Give the object a chunk all its own.  Note that this case does
1131 	 * not occur if is_controlled is true.
1132 	 */
1133 	ulong asize =
1134 	    ((lsize + obj_align_mask) & -obj_align_mod) +
1135 	    sizeof(obj_header_t);
1136 	chunk_t *cp =
1137 	    alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
1138 				"large object chunk");
1139 
1140 	if (
1141 #if arch_sizeof_long > arch_sizeof_int
1142 	    asize > max_uint
1143 #else
1144 	    asize < lsize
1145 #endif
1146 	    )
1147 	    return 0;
1148 	if (cp == 0)
1149 	    return 0;
1150 	ptr = (obj_header_t *) cp->cbot;
1151 	cp->cbot += asize;
1152 	ptr->o_alone = 1;
1153 	ptr->o_size = lsize;
1154     } else {
1155 	/*
1156 	 * Cycle through the chunks at the current save level, starting
1157 	 * with the currently open one.
1158 	 */
1159 	chunk_t *cp_orig = mem->pcc;
1160 	uint asize = obj_size_round((uint) lsize);
1161 	bool allocate_success = false;
1162 
1163 	if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) {
1164 	    /* We haven't checked the large block freelist yet. */
1165 	    if ((ptr = large_freelist_alloc(mem, lsize)) != 0) {
1166 		--ptr;			/* must point to header */
1167 		goto done;
1168 	    }
1169 	}
1170 
1171 	if (cp_orig == 0) {
1172 	    /* Open an arbitrary chunk. */
1173 	    cp_orig = mem->pcc = mem->cfirst;
1174 	    alloc_open_chunk(mem);
1175 	}
1176 
1177 #define CAN_ALLOC_AT_END(cp)\
1178   ((cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\
1179    > asize + sizeof(obj_header_t))
1180 
1181 	do {
1182 	    if (CAN_ALLOC_AT_END(&mem->cc)) {
1183 		allocate_success = true;
1184 		break;
1185 	    } else if (mem->is_controlled) {
1186 		/* Try consolidating free space. */
1187 		gs_consolidate_free((gs_memory_t *)mem);
1188 		if (CAN_ALLOC_AT_END(&mem->cc)) {
1189 		    allocate_success = true;
1190 		    break;
1191 		}
1192 	    }
1193 	    /* No luck, go on to the next chunk. */
1194 	    {
1195 		chunk_t *cp = mem->cc.cnext;
1196 
1197 		alloc_close_chunk(mem);
1198 		if (cp == 0)
1199 		    cp = mem->cfirst;
1200 		mem->pcc = cp;
1201 		alloc_open_chunk(mem);
1202 	    }
1203 	} while (mem->pcc != cp_orig);
1204 
1205 #ifdef CONSOLIDATE_BEFORE_ADDING_CHUNK
1206 	if (!allocate_success) {
1207 	    /*
1208 	     * Try consolidating free space before giving up.
1209 	     * It's not clear this is a good idea, since it requires quite
1210 	     * a lot of computation and doesn't seem to improve things much.
1211 	     */
1212 	    if (!mem->is_controlled) { /* already did this if controlled */
1213 		chunk_t *cp = cp_orig;
1214 
1215 		alloc_close_chunk(mem);
1216 		do {
1217 		    consolidate_chunk_free(cp, mem);
1218 		    if (CAN_ALLOC_AT_END(cp)) {
1219 			mem->pcc = cp;
1220 			alloc_open_chunk(mem);
1221 			allocate_success = true;
1222 			break;
1223 		    }
1224 		    if ((cp = cp->cnext) == 0)
1225 			cp = mem->cfirst;
1226 		} while (cp != cp_orig);
1227 	    }
1228 	}
1229 #endif
1230 
1231 #undef CAN_ALLOC_AT_END
1232 
1233 	if (!allocate_success) {
1234 	    /* Add another chunk. */
1235 	    chunk_t *cp =
1236 		alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
1237 
1238 	    if (cp) {
1239 		/* mem->pcc == cp, mem->cc == *mem->pcc. */
1240 		ptr = (obj_header_t *)cp->cbot;
1241 		allocate_success = true;
1242 	    }
1243 	}
1244 
1245 	/*
1246 	 * If no success, try to scavenge from low free memory. This is
1247 	 * only enabled for controlled memory (currently only async
1248 	 * renderer) because it's too much work to prevent it from
1249 	 * examining outer save levels in the general case.
1250 	 */
1251 	if (allocate_success)
1252 	    mem->cc.cbot = (byte *) ptr + asize;
1253 	else if (!mem->is_controlled ||
1254 		 (ptr = scavenge_low_free(mem, (uint)lsize)) == 0)
1255 	    return 0;	/* allocation failed */
1256 	ptr->o_alone = 0;
1257 	ptr->o_size = (uint) lsize;
1258     }
1259 done:
1260     ptr->o_type = pstype;
1261 #   if IGC_PTR_STABILITY_CHECK
1262 	ptr->d.o.space_id = mem->space_id;
1263 #   endif
1264     ptr++;
1265     gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
1266     return ptr;
1267 }
1268 
1269 /*
1270  * Consolidate free objects contiguous to free space at cbot onto the cbot
1271  * area. Also keep track of end of highest internal free object
1272  * (int_freed_top).
1273  */
1274 private void
consolidate_chunk_free(chunk_t * cp,gs_ref_memory_t * mem)1275 consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
1276 {
1277     obj_header_t *begin_free = 0;
1278 
1279     cp->int_freed_top = cp->cbase;	/* below all objects in chunk */
1280     SCAN_CHUNK_OBJECTS(cp)
1281     DO_ALL
1282 	if (pre->o_type == &st_free) {
1283 	    if (begin_free == 0)
1284 		begin_free = pre;
1285 	} else {
1286 	    if (begin_free)
1287 		cp->int_freed_top = (byte *)pre; /* first byte following internal free */
1288 	    begin_free = 0;
1289         }
1290     END_OBJECTS_SCAN
1291     if (begin_free) {
1292 	/* We found free objects at the top of the object area. */
1293 	/* Remove the free objects from the freelists. */
1294 	remove_range_from_freelist(mem, begin_free, cp->cbot);
1295 	if_debug4('a', "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
1296 		  (ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
1297 		  (ulong) ((byte *) cp->cbot - (byte *) begin_free));
1298 	cp->cbot = (byte *) begin_free;
1299     }
1300 }
1301 
1302 /* Consolidate free objects. */
1303 void
ialloc_consolidate_free(gs_ref_memory_t * mem)1304 ialloc_consolidate_free(gs_ref_memory_t *mem)
1305 {
1306     chunk_t *cp;
1307     chunk_t *cprev;
1308 
1309     alloc_close_chunk(mem);
1310 
1311     /* Visit chunks in reverse order to encourage LIFO behavior. */
1312     for (cp = mem->clast; cp != 0; cp = cprev) {
1313 	cprev = cp->cprev;
1314 	consolidate_chunk_free(cp, mem);
1315 	if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
1316 	    /* The entire chunk is free. */
1317 	    chunk_t *cnext = cp->cnext;
1318 
1319 	    if (!mem->is_controlled) {
1320 		alloc_free_chunk(cp, mem);
1321 		if (mem->pcc == cp)
1322 		    mem->pcc =
1323 			(cnext == 0 ? cprev : cprev == 0 ? cnext :
1324 			 cprev->cbot - cprev->ctop >
1325 			 cnext->cbot - cnext->ctop ? cprev :
1326 			 cnext);
1327 	    }
1328 	}
1329     }
1330     alloc_open_chunk(mem);
1331 }
1332 private void
i_consolidate_free(gs_memory_t * mem)1333 i_consolidate_free(gs_memory_t *mem)
1334 {
1335     ialloc_consolidate_free((gs_ref_memory_t *)mem);
1336 }
1337 
1338 /* try to free-up given amount of space from freespace below chunk base */
1339 private obj_header_t *	/* returns uninitialized object hdr, NULL if none found */
scavenge_low_free(gs_ref_memory_t * mem,unsigned request_size)1340 scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
1341 {
1342     /* find 1st range of memory that can be glued back together to fill request */
1343     obj_header_t *found_pre = 0;
1344 
1345     /* Visit chunks in forward order */
1346     obj_header_t *begin_free = 0;
1347     uint found_free;
1348     uint request_size_rounded = obj_size_round(request_size);
1349     uint need_free = request_size_rounded + sizeof(obj_header_t);    /* room for GC's dummy hdr */
1350     chunk_t *cp;
1351 
1352     for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
1353 	begin_free = 0;
1354 	found_free = 0;
1355 	SCAN_CHUNK_OBJECTS(cp)
1356 	DO_ALL
1357 	    if (pre->o_type == &st_free) {
1358 	    	if (begin_free == 0) {
1359 	    	    found_free = 0;
1360 	    	    begin_free = pre;
1361 	    	}
1362 	    	found_free += pre_obj_rounded_size(pre);
1363 	    	if (begin_free != 0 && found_free >= need_free)
1364 	    	    break;
1365 	    } else
1366 	    	begin_free = 0;
1367 	END_OBJECTS_SCAN_NO_ABORT
1368 
1369 	/* Found sufficient range of empty memory */
1370 	if (begin_free != 0 && found_free >= need_free) {
1371 
1372 	    /* Fish found pieces out of various freelists */
1373 	    remove_range_from_freelist(mem, (char*)begin_free,
1374 				       (char*)begin_free + found_free);
1375 
1376 	    /* Prepare found object */
1377 	    found_pre = begin_free;
1378 	    found_pre->o_type = &st_free;  /* don't confuse GC if gets lost */
1379 	    found_pre->o_size = found_free - sizeof(obj_header_t);
1380 
1381 	    /* Chop off excess tail piece & toss it back into free pool */
1382 	    trim_obj(mem, found_pre + 1, request_size, cp);
1383      	}
1384     }
1385     return found_pre;
1386 }
1387 
1388 /* Remove range of memory from a mem's freelists */
1389 private void
remove_range_from_freelist(gs_ref_memory_t * mem,void * bottom,void * top)1390 remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
1391 {
1392     int num_free[num_freelists];
1393     int smallest = num_freelists, largest = -1;
1394     const obj_header_t *cur;
1395     uint size;
1396     int i;
1397     uint removed = 0;
1398 
1399     /*
1400      * Scan from bottom to top, a range containing only free objects,
1401      * counting the number of objects of each size.
1402      */
1403 
1404     for (cur = bottom; cur != top;
1405 	 cur = (const obj_header_t *)
1406 	     ((const byte *)cur + obj_size_round(size))
1407 	) {
1408 	size = cur->o_size;
1409 	i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
1410 	     (size + obj_align_mask) >> log2_obj_align_mod);
1411 	if (i < smallest) {
1412 	    /*
1413 	     * 0-length free blocks aren't kept on any list, because
1414 	     * they don't have room for a pointer.
1415 	     */
1416 	    if (i == 0)
1417 		continue;
1418 	    if (smallest < num_freelists)
1419 		memset(&num_free[i], 0, (smallest - i) * sizeof(int));
1420 	    else
1421 		num_free[i] = 0;
1422 	    smallest = i;
1423 	}
1424 	if (i > largest) {
1425 	    if (largest >= 0)
1426 		memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
1427 	    largest = i;
1428 	}
1429 	num_free[i]++;
1430     }
1431 
1432     /*
1433      * Remove free objects from the freelists, adjusting lost.objects by
1434      * subtracting the size of the region being processed minus the amount
1435      * of space reclaimed.
1436      */
1437 
1438     for (i = smallest; i <= largest; i++) {
1439 	int count = num_free[i];
1440         obj_header_t *pfree;
1441 	obj_header_t **ppfprev;
1442 
1443 	if (!count)
1444 	    continue;
1445 	ppfprev = &mem->freelists[i];
1446 	for (;;) {
1447 	    pfree = *ppfprev;
1448 	    if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
1449 		/* We're removing an object. */
1450 		*ppfprev = *(obj_header_t **) pfree;
1451 		removed += obj_align_round(pfree[-1].o_size);
1452 		if (!--count)
1453 		    break;
1454 	    } else
1455 		ppfprev = (obj_header_t **) pfree;
1456 	}
1457     }
1458     mem->lost.objects -= (char*)top - (char*)bottom - removed;
1459 }
1460 
1461 /* Trim a memory object down to a given size */
1462 private void
trim_obj(gs_ref_memory_t * mem,obj_header_t * obj,uint size,chunk_t * cp)1463 trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
1464 /* Obj must have rounded size == req'd size, or have enough room for */
1465 /* trailing dummy obj_header */
1466 {
1467     uint rounded_size = obj_align_round(size);
1468     obj_header_t *pre_obj = obj - 1;
1469     obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
1470     uint old_rounded_size = obj_align_round(pre_obj->o_size);
1471     uint excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
1472 
1473     /* trim object's size to desired */
1474     pre_obj->o_size = size;
1475     if (old_rounded_size == rounded_size)
1476 	return;	/* nothing more to do here */
1477     /*
1478      * If the object is alone in its chunk, move cbot to point to the end
1479      * of the object.
1480      */
1481     if (pre_obj->o_alone) {
1482 	if (!cp) {
1483 	    mem->cfreed.memory = mem;
1484 	    if (chunk_locate(obj, &mem->cfreed)) {
1485 		cp = mem->cfreed.cp;
1486 	    }
1487 	}
1488 	if (cp) {
1489 #ifdef DEBUG
1490 	    if (cp->cbot != (byte *)obj + old_rounded_size) {
1491 		lprintf3("resizing 0x%lx, old size %u, new size %u, cbot wrong!\n",
1492 			 (ulong)obj, old_rounded_size, size);
1493 		/* gs_abort */
1494 	    } else
1495 #endif
1496 		{
1497 		    cp->cbot = (byte *)excess_pre;
1498 		    return;
1499 		}
1500 	}
1501 	/*
1502 	 * Something very weird is going on.  This probably shouldn't
1503 	 * ever happen, but if it does....
1504 	 */
1505 	pre_obj->o_alone = 0;
1506     }
1507     /* make excess into free obj */
1508     excess_pre->o_type = &st_free;  /* don't confuse GC */
1509     excess_pre->o_size = excess_size;
1510     excess_pre->o_alone = 0;
1511     if (excess_size >= obj_align_mod) {
1512 	/* Put excess object on a freelist */
1513 	obj_header_t **pfl;
1514 
1515 	if ((byte *)excess_pre >= mem->cc.int_freed_top)
1516 	    mem->cc.int_freed_top = (byte *)excess_pre + excess_size;
1517 	if (excess_size <= max_freelist_size)
1518 	    pfl = &mem->freelists[(excess_size + obj_align_mask) >>
1519 				 log2_obj_align_mod];
1520 	else {
1521 	    uint rounded_size = obj_align_round(excess_size);
1522 
1523 	    pfl = &mem->freelists[LARGE_FREELIST_INDEX];
1524 	    if (rounded_size > mem->largest_free_size)
1525 		mem->largest_free_size = rounded_size;
1526 	}
1527 	*(obj_header_t **) (excess_pre + 1) = *pfl;
1528 	*pfl = excess_pre + 1;
1529 	mem->cfreed.memory = mem;
1530     } else {
1531 	/* excess piece will be "lost" memory */
1532 	mem->lost.objects += excess_size + sizeof(obj_header_t);
1533     }
1534 }
1535 
1536 /* ================ Roots ================ */
1537 
1538 /* Register a root. */
1539 private int
i_register_root(gs_memory_t * mem,gs_gc_root_t * rp,gs_ptr_type_t ptype,void ** up,client_name_t cname)1540 i_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
1541 		void **up, client_name_t cname)
1542 {
1543     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1544 
1545     if (rp == NULL) {
1546 	rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t,
1547 					   "i_register_root");
1548 	if (rp == 0)
1549 	    return_error(gs_error_VMerror);
1550 	rp->free_on_unregister = true;
1551     } else
1552 	rp->free_on_unregister = false;
1553     if_debug3('8', "[8]register root(%s) 0x%lx -> 0x%lx\n",
1554 	      client_name_string(cname), (ulong)rp, (ulong)up);
1555     rp->ptype = ptype;
1556     rp->p = up;
1557     rp->next = imem->roots;
1558     imem->roots = rp;
1559     return 0;
1560 }
1561 
1562 /* Unregister a root. */
1563 private void
i_unregister_root(gs_memory_t * mem,gs_gc_root_t * rp,client_name_t cname)1564 i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
1565 {
1566     gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
1567     gs_gc_root_t **rpp = &imem->roots;
1568 
1569     if_debug2('8', "[8]unregister root(%s) 0x%lx\n",
1570 	      client_name_string(cname), (ulong) rp);
1571     while (*rpp != rp)
1572 	rpp = &(*rpp)->next;
1573     *rpp = (*rpp)->next;
1574     if (rp->free_on_unregister)
1575 	gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
1576 }
1577 
1578 /* ================ Chunks ================ */
1579 
1580 public_st_chunk();
1581 
1582 /* Insert a chunk in the chain.  This is exported for the GC and for */
1583 /* the forget_save operation. */
1584 void
alloc_link_chunk(chunk_t * cp,gs_ref_memory_t * imem)1585 alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
1586 {
1587     byte *cdata = cp->cbase;
1588     chunk_t *icp;
1589     chunk_t *prev;
1590 
1591     /*
1592      * Allocators tend to allocate in either ascending or descending
1593      * address order.  The loop will handle the latter well; check for
1594      * the former first.
1595      */
1596     if (imem->clast && PTR_GE(cdata, imem->clast->ctop))
1597 	icp = 0;
1598     else
1599 	for (icp = imem->cfirst; icp != 0 && PTR_GE(cdata, icp->ctop);
1600 	     icp = icp->cnext
1601 	    );
1602     cp->cnext = icp;
1603     if (icp == 0) {		/* add at end of chain */
1604 	prev = imem->clast;
1605 	imem->clast = cp;
1606     } else {			/* insert before icp */
1607 	prev = icp->cprev;
1608 	icp->cprev = cp;
1609     }
1610     cp->cprev = prev;
1611     if (prev == 0)
1612 	imem->cfirst = cp;
1613     else
1614 	prev->cnext = cp;
1615     if (imem->pcc != 0) {
1616 	imem->cc.cnext = imem->pcc->cnext;
1617 	imem->cc.cprev = imem->pcc->cprev;
1618     }
1619 }
1620 
1621 /* Add a chunk for ordinary allocation. */
1622 private chunk_t *
alloc_add_chunk(gs_ref_memory_t * mem,ulong csize,client_name_t cname)1623 alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
1624 {
1625     chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
1626 
1627     if (cp) {
1628 	alloc_close_chunk(mem);
1629 	mem->pcc = cp;
1630 	mem->cc = *mem->pcc;
1631 	gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
1632 		      mem->cc.climit - mem->cc.cbase);
1633     }
1634     return cp;
1635 }
1636 
1637 /* Acquire a chunk.  If we would exceed MaxLocalVM (if relevant), */
1638 /* or if we would exceed the VMThreshold and psignal is NULL, */
1639 /* return 0; if we would exceed the VMThreshold but psignal is valid, */
1640 /* just set the signal and return successfully. */
1641 private chunk_t *
alloc_acquire_chunk(gs_ref_memory_t * mem,ulong csize,bool has_strings,client_name_t cname)1642 alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
1643 		    client_name_t cname)
1644 {
1645     gs_memory_t *parent = mem->non_gc_memory;
1646     chunk_t *cp;
1647     byte *cdata;
1648 
1649 #if arch_sizeof_long > arch_sizeof_int
1650     /* If csize is larger than max_uint, punt. */
1651     if (csize != (uint) csize)
1652 	return 0;
1653 #endif
1654     cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
1655 
1656     if( mem->gc_status.psignal != 0) {
1657 	/* we have a garbage collector */
1658 	if ((ulong) (mem->allocated) >= mem->limit) {
1659 	    mem->gc_status.requested += csize;
1660 	    if (mem->limit >= mem->gc_status.max_vm) {
1661 		gs_free_object(parent, cp, cname);
1662 		return 0;
1663 	    }
1664 	    if_debug4('0', "[0]signaling space=%d, allocated=%ld, limit=%ld, requested=%ld\n",
1665 		      mem->space, (long)mem->allocated,
1666 		      (long)mem->limit, (long)mem->gc_status.requested);
1667 	    *mem->gc_status.psignal = mem->gc_status.signal_value;
1668 	}
1669     }
1670     cdata = gs_alloc_bytes_immovable(parent, csize, cname);
1671     if (cp == 0 || cdata == 0) {
1672 	gs_free_object(parent, cdata, cname);
1673 	gs_free_object(parent, cp, cname);
1674 	mem->gc_status.requested = csize;
1675 	return 0;
1676     }
1677     alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
1678     alloc_link_chunk(cp, mem);
1679     mem->allocated += st_chunk.ssize + csize;
1680     return cp;
1681 }
1682 
1683 /* Initialize the pointers in a chunk.  This is exported for save/restore. */
1684 /* The bottom pointer must be aligned, but the top pointer need not */
1685 /* be aligned. */
1686 void
alloc_init_chunk(chunk_t * cp,byte * bot,byte * top,bool has_strings,chunk_t * outer)1687 alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
1688 		 chunk_t * outer)
1689 {
1690     byte *cdata = bot;
1691 
1692     if (outer != 0)
1693 	outer->inner_count++;
1694     cp->chead = (chunk_head_t *) cdata;
1695     cdata += sizeof(chunk_head_t);
1696     cp->cbot = cp->cbase = cp->int_freed_top = cdata;
1697     cp->cend = top;
1698     cp->rcur = 0;
1699     cp->rtop = 0;
1700     cp->outer = outer;
1701     cp->inner_count = 0;
1702     cp->has_refs = false;
1703     cp->sbase = cdata;
1704     if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
1705 	/*
1706 	 * We allocate a large enough string marking and reloc table
1707 	 * to cover the entire chunk.
1708 	 */
1709 	uint nquanta = string_space_quanta(top - cdata);
1710 
1711 	cp->climit = cdata + nquanta * string_data_quantum;
1712 	cp->smark = cp->climit;
1713 	cp->smark_size = string_quanta_mark_size(nquanta);
1714 	cp->sreloc =
1715 	    (string_reloc_offset *) (cp->smark + cp->smark_size);
1716 	cp->sfree1 = (uint *) cp->sreloc;
1717     } else {
1718 	/* No strings, don't need the string GC tables. */
1719 	cp->climit = cp->cend;
1720 	cp->sfree1 = 0;
1721 	cp->smark = 0;
1722 	cp->smark_size = 0;
1723 	cp->sreloc = 0;
1724     }
1725     cp->ctop = cp->climit;
1726     alloc_init_free_strings(cp);
1727 }
1728 
1729 /* Initialize the string freelists in a chunk. */
1730 void
alloc_init_free_strings(chunk_t * cp)1731 alloc_init_free_strings(chunk_t * cp)
1732 {
1733     if (cp->sfree1)
1734 	memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
1735     cp->sfree = 0;
1736 }
1737 
1738 /* Close up the current chunk. */
1739 /* This is exported for save/restore and the GC. */
1740 void
alloc_close_chunk(gs_ref_memory_t * mem)1741 alloc_close_chunk(gs_ref_memory_t * mem)
1742 {
1743     if (mem->pcc != 0) {
1744 	*mem->pcc = mem->cc;
1745 #ifdef DEBUG
1746 	if (gs_debug_c('a')) {
1747 	    dlprintf1("[a%d]", alloc_trace_space(mem));
1748 	    dprintf_chunk("closing chunk", mem->pcc);
1749 	}
1750 #endif
1751     }
1752 }
1753 
1754 /* Reopen the current chunk after a GC or restore. */
1755 void
alloc_open_chunk(gs_ref_memory_t * mem)1756 alloc_open_chunk(gs_ref_memory_t * mem)
1757 {
1758     if (mem->pcc != 0) {
1759 	mem->cc = *mem->pcc;
1760 #ifdef DEBUG
1761 	if (gs_debug_c('a')) {
1762 	    dlprintf1("[a%d]", alloc_trace_space(mem));
1763 	    dprintf_chunk("opening chunk", mem->pcc);
1764 	}
1765 #endif
1766     }
1767 }
1768 
1769 /* Remove a chunk from the chain.  This is exported for the GC. */
1770 void
alloc_unlink_chunk(chunk_t * cp,gs_ref_memory_t * mem)1771 alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1772 {
1773 #ifdef DEBUG
1774     if (gs_alloc_debug) {	/* Check to make sure this chunk belongs to this allocator. */
1775 	const chunk_t *ap = mem->cfirst;
1776 
1777 	while (ap != 0 && ap != cp)
1778 	    ap = ap->cnext;
1779 	if (ap != cp) {
1780 	    lprintf2("unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
1781 		     (ulong) cp, (ulong) mem);
1782 	    return;		/*gs_abort(); */
1783 	}
1784     }
1785 #endif
1786     if (cp->cprev == 0)
1787 	mem->cfirst = cp->cnext;
1788     else
1789 	cp->cprev->cnext = cp->cnext;
1790     if (cp->cnext == 0)
1791 	mem->clast = cp->cprev;
1792     else
1793 	cp->cnext->cprev = cp->cprev;
1794     if (mem->pcc != 0) {
1795 	mem->cc.cnext = mem->pcc->cnext;
1796 	mem->cc.cprev = mem->pcc->cprev;
1797 	if (mem->pcc == cp) {
1798 	    mem->pcc = 0;
1799 	    mem->cc.cbot = mem->cc.ctop = 0;
1800 	}
1801     }
1802 }
1803 
1804 /*
1805  * Free a chunk.  This is exported for the GC.  Since we eventually use
1806  * this to free the chunk containing the allocator itself, we must be
1807  * careful not to reference anything in the allocator after freeing the
1808  * chunk data.
1809  */
1810 void
alloc_free_chunk(chunk_t * cp,gs_ref_memory_t * mem)1811 alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
1812 {
1813     gs_memory_t *parent = mem->non_gc_memory;
1814     byte *cdata = (byte *)cp->chead;
1815     ulong csize = (byte *)cp->cend - cdata;
1816 
1817     alloc_unlink_chunk(cp, mem);
1818     mem->allocated -= st_chunk.ssize;
1819     if (mem->cfreed.cp == cp)
1820 	mem->cfreed.cp = 0;
1821     if (cp->outer == 0) {
1822 	mem->allocated -= csize;
1823 	gs_free_object(parent, cdata, "alloc_free_chunk(data)");
1824     } else {
1825 	cp->outer->inner_count--;
1826 	gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
1827     }
1828     gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
1829 }
1830 
1831 /* Find the chunk for a pointer. */
1832 /* Note that this only searches the current save level. */
1833 /* Since a given save level can't contain both a chunk and an inner chunk */
1834 /* of that chunk, we can stop when is_within_chunk succeeds, and just test */
1835 /* is_in_inner_chunk then. */
1836 bool
chunk_locate_ptr(const void * ptr,chunk_locator_t * clp)1837 chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
1838 {
1839     register chunk_t *cp = clp->cp;
1840 
1841     if (cp == 0) {
1842 	cp = clp->memory->cfirst;
1843 	if (cp == 0)
1844 	    return false;
1845 	/* ptr is in the last chunk often enough to be worth checking for. */
1846 	if (PTR_GE(ptr, clp->memory->clast->cbase))
1847 	    cp = clp->memory->clast;
1848     }
1849     if (PTR_LT(ptr, cp->cbase)) {
1850 	do {
1851 	    cp = cp->cprev;
1852 	    if (cp == 0)
1853 		return false;
1854 	}
1855 	while (PTR_LT(ptr, cp->cbase));
1856 	if (PTR_GE(ptr, cp->cend))
1857 	    return false;
1858     } else {
1859 	while (PTR_GE(ptr, cp->cend)) {
1860 	    cp = cp->cnext;
1861 	    if (cp == 0)
1862 		return false;
1863 	}
1864 	if (PTR_LT(ptr, cp->cbase))
1865 	    return false;
1866     }
1867     clp->cp = cp;
1868     return !ptr_is_in_inner_chunk(ptr, cp);
1869 }
1870 
1871 /* ------ Debugging ------ */
1872 
1873 #ifdef DEBUG
1874 
1875 #include "string_.h"
1876 
1877 inline private bool
obj_in_control_region(const void * obot,const void * otop,const dump_control_t * pdc)1878 obj_in_control_region(const void *obot, const void *otop,
1879 		      const dump_control_t *pdc)
1880 {
1881     return
1882 	((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
1883 	 (pdc->top == NULL || PTR_LT(obot, pdc->top)));
1884 }
1885 
1886 const dump_control_t dump_control_default =
1887 {
1888     dump_do_default, NULL, NULL
1889 };
1890 const dump_control_t dump_control_all =
1891 {
1892     dump_do_strings | dump_do_type_addresses | dump_do_pointers |
1893     dump_do_pointed_strings | dump_do_contents, NULL, NULL
1894 };
1895 
1896 /*
1897  * Internal procedure to dump a block of memory, in hex and optionally
1898  * also as characters.
1899  */
1900 private void
debug_indent(int indent)1901 debug_indent(int indent)
1902 {
1903     int i;
1904 
1905     for (i = indent; i > 0; --i)
1906 	dputc(' ');
1907 }
1908 private void
debug_dump_contents(const byte * bot,const byte * top,int indent,bool as_chars)1909 debug_dump_contents(const byte * bot, const byte * top, int indent,
1910 		    bool as_chars)
1911 {
1912     const byte *block;
1913 
1914 #define block_size 16
1915 
1916     if (bot >= top)
1917 	return;
1918     for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
1919 	 block < top; block += block_size
1920 	) {
1921 	int i;
1922 	char label[12];
1923 
1924 	/* Check for repeated blocks. */
1925 	if (block >= bot + block_size &&
1926 	    block <= top - (block_size * 2) &&
1927 	    !memcmp(block, block - block_size, block_size) &&
1928 	    !memcmp(block, block + block_size, block_size)
1929 	    ) {
1930 	    if (block < bot + block_size * 2 ||
1931 		memcmp(block, block - block_size * 2, block_size)
1932 		) {
1933 		debug_indent(indent);
1934 		dputs("  ...\n");
1935 	    }
1936 	    continue;
1937 	}
1938 	sprintf(label, "0x%lx:", (ulong) block);
1939 	debug_indent(indent);
1940 	dputs(label);
1941 	for (i = 0; i < block_size; ++i) {
1942 	    const char *sepr = ((i & 3) == 0 && i != 0 ? "  " : " ");
1943 
1944 	    dputs(sepr);
1945 	    if (block + i >= bot && block + i < top)
1946 		dprintf1("%02x", block[i]);
1947 	    else
1948 		dputs("  ");
1949 	}
1950 	dputc('\n');
1951 	if (as_chars) {
1952 	    debug_indent(indent + strlen(label));
1953 	    for (i = 0; i < block_size; ++i) {
1954 		byte ch;
1955 
1956 		if ((i & 3) == 0 && i != 0)
1957 		    dputc(' ');
1958 		if (block + i >= bot && block + i < top &&
1959 		    (ch = block[i]) >= 32 && ch <= 126
1960 		    )
1961 		    dprintf1("  %c", ch);
1962 		else
1963 		    dputs("   ");
1964 	    }
1965 	    dputc('\n');
1966 	}
1967     }
1968 #undef block_size
1969 }
1970 
1971 /* Print one object with the given options. */
1972 /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
1973 /* contents. */
1974 void
debug_print_object(const gs_memory_t * mem,const void * obj,const dump_control_t * control)1975 debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control)
1976 {
1977     const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
1978     ulong size = pre_obj_contents_size(pre);
1979     const gs_memory_struct_type_t *type = pre->o_type;
1980     dump_options_t options = control->options;
1981 
1982     dprintf3("  pre=0x%lx(obj=0x%lx) size=%lu", (ulong) pre, (ulong) obj,
1983 	     size);
1984     switch (options & (dump_do_type_addresses | dump_do_no_types)) {
1985 	case dump_do_type_addresses + dump_do_no_types:	/* addresses only */
1986 	    dprintf1(" type=0x%lx", (ulong) type);
1987 	    break;
1988 	case dump_do_type_addresses:	/* addresses & names */
1989 	    dprintf2(" type=%s(0x%lx)", struct_type_name_string(type),
1990 		     (ulong) type);
1991 	    break;
1992 	case 0:		/* names only */
1993 	    dprintf1(" type=%s", struct_type_name_string(type));
1994 	case dump_do_no_types:	/* nothing */
1995 	    ;
1996     }
1997     if (options & dump_do_marks) {
1998 	dprintf2(" smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
1999     }
2000     dputc('\n');
2001     if (type == &st_free)
2002 	return;
2003     if (options & dump_do_pointers) {
2004 	struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
2005 	uint index = 0;
2006 	enum_ptr_t eptr;
2007 	gs_ptr_type_t ptype;
2008 
2009 	if (proc != gs_no_struct_enum_ptrs)
2010 	    for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0;
2011 		 ++index
2012 		) {
2013 		const void *ptr = eptr.ptr;
2014 
2015 		dprintf1("    ptr %u: ", index);
2016 		if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
2017 		    const gs_const_string *str = (const gs_const_string *)ptr;
2018 
2019 		    dprintf2("0x%lx(%u)", (ulong) str->data, str->size);
2020 		    if (options & dump_do_pointed_strings) {
2021 			dputs(" =>\n");
2022 			debug_dump_contents(str->data, str->data + str->size, 6,
2023 					    true);
2024 		    } else {
2025 			dputc('\n');
2026 		    }
2027 		} else {
2028 		    dprintf1((PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
2029 			      "(0x%lx)\n" : "0x%lx\n"), (ulong) ptr);
2030 		}
2031 	    }
2032     }
2033     if (options & dump_do_contents) {
2034 	debug_dump_contents((const byte *)obj, (const byte *)obj + size,
2035 			    0, false);
2036     }
2037 }
2038 
2039 /* Print the contents of a chunk with the given options. */
2040 /* Relevant options: all. */
2041 void
debug_dump_chunk(const gs_memory_t * mem,const chunk_t * cp,const dump_control_t * control)2042 debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control)
2043 {
2044     dprintf1("chunk at 0x%lx:\n", (ulong) cp);
2045     dprintf3("   chead=0x%lx  cbase=0x%lx sbase=0x%lx\n",
2046 	     (ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
2047     dprintf3("    rcur=0x%lx   rtop=0x%lx  cbot=0x%lx\n",
2048 	     (ulong) cp->rcur, (ulong) cp->rtop, (ulong) cp->cbot);
2049     dprintf4("    ctop=0x%lx climit=0x%lx smark=0x%lx, size=%u\n",
2050 	     (ulong) cp->ctop, (ulong) cp->climit, (ulong) cp->smark,
2051 	     cp->smark_size);
2052     dprintf2("  sreloc=0x%lx   cend=0x%lx\n",
2053 	     (ulong) cp->sreloc, (ulong) cp->cend);
2054     dprintf5("cprev=0x%lx cnext=0x%lx outer=0x%lx inner_count=%u has_refs=%s\n",
2055 	     (ulong) cp->cprev, (ulong) cp->cnext, (ulong) cp->outer,
2056 	     cp->inner_count, (cp->has_refs ? "true" : "false"));
2057 
2058     dprintf2("  sfree1=0x%lx   sfree=0x%x\n",
2059 	     (ulong) cp->sfree1, cp->sfree);
2060     if (control->options & dump_do_strings) {
2061 	debug_dump_contents((control->bottom == 0 ? cp->ctop :
2062 			     max(control->bottom, cp->ctop)),
2063 			    (control->top == 0 ? cp->climit :
2064 			     min(control->top, cp->climit)),
2065 			    0, true);
2066     }
2067     SCAN_CHUNK_OBJECTS(cp)
2068 	DO_ALL
2069 	if (obj_in_control_region(pre + 1,
2070 				  (const byte *)(pre + 1) + size,
2071 				  control)
2072 	)
2073 	debug_print_object(mem, pre + 1, control);
2074     END_OBJECTS_SCAN_NO_ABORT
2075 }
2076 void
debug_print_chunk(const gs_memory_t * mem,const chunk_t * cp)2077 debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp)
2078 {
2079     dump_control_t control;
2080 
2081     control = dump_control_default;
2082     debug_dump_chunk(mem, cp, &control);
2083 }
2084 
2085 /* Print the contents of all chunks managed by an allocator. */
2086 /* Relevant options: all. */
2087 void
debug_dump_memory(const gs_ref_memory_t * mem,const dump_control_t * control)2088 debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
2089 {
2090     const chunk_t *mcp;
2091 
2092     for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
2093 	const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
2094 
2095 	if (obj_in_control_region(cp->cbase, cp->cend, control))
2096 	    debug_dump_chunk((const gs_memory_t *)mem, cp, control);
2097     }
2098 }
2099 
2100 /* Find all the objects that contain a given pointer. */
2101 void
debug_find_pointers(const gs_ref_memory_t * mem,const void * target)2102 debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
2103 {
2104     dump_control_t control;
2105     const chunk_t *mcp;
2106 
2107     control.options = 0;
2108     for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
2109 	const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
2110 
2111 	SCAN_CHUNK_OBJECTS(cp);
2112 	DO_ALL
2113 	    struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
2114 	    uint index = 0;
2115 	    enum_ptr_t eptr;
2116 
2117 	    if (proc)		/* doesn't trace refs */
2118 		for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index,
2119 			       &eptr, pre->o_type, NULL);
2120 		     ++index)
2121 		    if (eptr.ptr == target) {
2122 			dprintf1("Index %d in", index);
2123 			debug_print_object((const gs_memory_t *)mem, pre + 1, &control);
2124 		    }
2125 	END_OBJECTS_SCAN_NO_ABORT
2126     }
2127 }
2128 
2129 #endif /* DEBUG */
2130