1 /* Copyright (C) 1992, 1995, 1996, 1997, 1998, 1999 Aladdin Enterprises. All rights reserved.
2
3 This software is provided AS-IS with no warranty, either express or
4 implied.
5
6 This software is distributed under license and may not be copied,
7 modified or distributed except as expressly authorized under the terms
8 of the license contained in the file LICENSE in this distribution.
9
10 For more information about licensing, please refer to
11 http://www.ghostscript.com/licensing/. For information on
12 commercial licensing, go to http://www.artifex.com/licensing/ or
13 contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14 San Rafael, CA 94903, U.S.A., +1(415)492-9861.
15 */
16
17 /* $Id: istack.c,v 1.6 2003/09/03 03:22:59 giles Exp $ */
18 /* Manager for expandable stacks of refs */
19 #include "memory_.h"
20 #include "ghost.h"
21 #include "gsstruct.h"
22 #include "gsutil.h"
23 #include "ierrors.h"
24 #include "ialloc.h"
25 #include "istack.h"
26 #include "istkparm.h"
27 #include "istruct.h" /* for RELOC_REF_VAR */
28 #include "iutil.h"
29 #include "ivmspace.h" /* for local/global test */
30 #include "store.h"
31
32 /* Forward references */
33 private void init_block(ref_stack_t *pstack, const ref *pblock_array,
34 uint used);
35 private int ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add);
36
37 /* GC descriptors and procedures */
38 private_st_ref_stack_params();
39 private
CLEAR_MARKS_PROC(ref_stack_clear_marks)40 CLEAR_MARKS_PROC(ref_stack_clear_marks)
41 {
42 ref_stack_t *const sptr = vptr;
43
44 r_clear_attrs(&sptr->current, l_mark);
45 }
46 private
47 ENUM_PTRS_WITH(ref_stack_enum_ptrs, ref_stack_t *sptr) return 0;
48 case 0: ENUM_RETURN_REF(&sptr->current);
49 case 1: return ENUM_OBJ(sptr->params);
50 ENUM_PTRS_END
RELOC_PTRS_WITH(ref_stack_reloc_ptrs,ref_stack_t * sptr)51 private RELOC_PTRS_WITH(ref_stack_reloc_ptrs, ref_stack_t *sptr)
52 {
53 /* Note that the relocation must be a multiple of sizeof(ref_packed) */
54 /* * align_packed_per_ref, but it need not be a multiple of */
55 /* sizeof(ref). Therefore, we must do the adjustments using */
56 /* ref_packed pointers rather than ref pointers. */
57 ref_packed *bot = (ref_packed *) sptr->current.value.refs;
58 long reloc;
59
60 RELOC_REF_VAR(sptr->current);
61 r_clear_attrs(&sptr->current, l_mark);
62 reloc = bot - (ref_packed *) sptr->current.value.refs;
63 #define RELOC_P(p)\
64 sptr->p = (ref *)((ref_packed *)sptr->p - reloc);
65 RELOC_P(p);
66 RELOC_P(bot);
67 RELOC_P(top);
68 #undef RELOC_P
69 RELOC_OBJ_VAR(sptr->params);
70 } RELOC_PTRS_END
71 /* Structure type for a ref_stack. */
72 public_st_ref_stack();
73
74 /* Initialize a stack. */
75 int
ref_stack_init(ref_stack_t * pstack,const ref * pblock_array,uint bot_guard,uint top_guard,const ref * pguard_value,gs_ref_memory_t * mem,ref_stack_params_t * params)76 ref_stack_init(ref_stack_t *pstack, const ref *pblock_array,
77 uint bot_guard, uint top_guard, const ref *pguard_value,
78 gs_ref_memory_t *mem, ref_stack_params_t *params)
79 {
80 uint size = r_size(pblock_array);
81 uint avail = size - (stack_block_refs + bot_guard + top_guard);
82 ref_stack_block *pblock = (ref_stack_block *)pblock_array->value.refs;
83 s_ptr body = (s_ptr)(pblock + 1);
84
85 if (params == 0) {
86 params = gs_alloc_struct((gs_memory_t *)mem, ref_stack_params_t,
87 &st_ref_stack_params,
88 "ref_stack_alloc(stack.params)");
89 if (params == 0)
90 return_error(-1); /* avoid binding in any error codes */
91 }
92
93 pstack->bot = body + bot_guard;
94 pstack->p = pstack->bot - 1;
95 pstack->top = pstack->p + avail;
96 pstack->current = *pblock_array;
97 pstack->extension_size = 0;
98 pstack->extension_used = 0;
99
100 make_int(&pstack->max_stack, avail);
101 pstack->requested = 0;
102 pstack->margin = 0;
103 pstack->body_size = avail;
104
105 pstack->params = params;
106 pstack->memory = mem;
107
108 params->bot_guard = bot_guard;
109 params->top_guard = top_guard;
110 params->block_size = size;
111 params->data_size = avail;
112 if (pguard_value != 0)
113 params->guard_value = *pguard_value;
114 else
115 make_tav(¶ms->guard_value, t__invalid, 0, intval, 0);
116 params->underflow_error = -1;
117 params->overflow_error = -1;
118 params->allow_expansion = true;
119 init_block(pstack, pblock_array, 0);
120 refset_null_new(pstack->bot, avail, 0);
121 make_empty_array(&pblock->next, 0);
122 return 0;
123 }
124
125 /* Set whether a stack is allowed to expand. The initial value is true. */
126 void
ref_stack_allow_expansion(ref_stack_t * pstack,bool expand)127 ref_stack_allow_expansion(ref_stack_t *pstack, bool expand)
128 {
129 pstack->params->allow_expansion = expand;
130 }
131
132 /* Set the error codes for under- and overflow. The initial values are -1. */
133 void
ref_stack_set_error_codes(ref_stack_t * pstack,int underflow_error,int overflow_error)134 ref_stack_set_error_codes(ref_stack_t *pstack, int underflow_error,
135 int overflow_error)
136 {
137 pstack->params->underflow_error = underflow_error;
138 pstack->params->overflow_error = overflow_error;
139 }
140
141 /* Set the maximum number of elements allowed on a stack. */
142 int
ref_stack_set_max_count(ref_stack_t * pstack,long nmax)143 ref_stack_set_max_count(ref_stack_t *pstack, long nmax)
144 {
145 uint nmin = ref_stack_count_inline(pstack);
146
147 if (nmax < nmin)
148 nmax = nmin;
149 if (nmax > max_uint / sizeof(ref))
150 nmax = max_uint / sizeof(ref);
151 if (!pstack->params->allow_expansion) {
152 uint ncur = pstack->body_size;
153
154 if (nmax > ncur)
155 nmax = ncur;
156 }
157 pstack->max_stack.value.intval = nmax;
158 return 0;
159 }
160
161 /*
162 * Set the margin between the limit and the top of the stack.
163 * Note that this may require allocating a block.
164 */
165 int
ref_stack_set_margin(ref_stack_t * pstack,uint margin)166 ref_stack_set_margin(ref_stack_t *pstack, uint margin)
167 {
168 const ref_stack_params_t *params = pstack->params;
169 uint data_size = params->data_size;
170
171 if (margin <= pstack->margin) {
172 refset_null_new(pstack->top + 1, pstack->margin - margin, 0);
173 } else {
174 if (margin > data_size >> 1)
175 return_error(e_rangecheck);
176 if (pstack->top - pstack->p < margin) {
177 uint used = pstack->p + 1 - pstack->bot;
178 uint keep = data_size - margin;
179 int code = ref_stack_push_block(pstack, keep, used - keep);
180
181 if (code < 0)
182 return code;
183 }
184 }
185 pstack->margin = margin;
186 pstack->body_size = data_size - margin;
187 pstack->top = pstack->bot + pstack->body_size - 1;
188 return 0;
189 }
190
191 /* Return the number of elements on a stack. */
192 uint
ref_stack_count(const ref_stack_t * pstack)193 ref_stack_count(const ref_stack_t *pstack)
194 {
195 return ref_stack_count_inline(pstack);
196 }
197
198 /*
199 * Return a pointer to a given element from the stack, counting from
200 * 0 as the top element. If the index is out of range, return 0.
201 */
202 ref *
ref_stack_index(const ref_stack_t * pstack,long idx)203 ref_stack_index(const ref_stack_t *pstack, long idx)
204 {
205 ref_stack_block *pblock;
206 uint used = pstack->p + 1 - pstack->bot;
207
208 if (idx < 0)
209 return NULL;
210 if (idx < used) /* common case */
211 return pstack->p - (uint) idx;
212 pblock = (ref_stack_block *) pstack->current.value.refs;
213 do {
214 pblock = (ref_stack_block *) pblock->next.value.refs;
215 if (pblock == 0)
216 return NULL;
217 idx -= used;
218 used = r_size(&pblock->used);
219 } while (idx >= used);
220 return pblock->used.value.refs + (used - 1 - (uint) idx);
221 }
222
223 /*
224 * Count the number of elements down to and including the first mark.
225 * If no mark is found, return 0.
226 */
227 uint
ref_stack_counttomark(const ref_stack_t * pstack)228 ref_stack_counttomark(const ref_stack_t *pstack)
229 {
230 uint scanned = 0;
231 ref_stack_enum_t rsenum;
232
233 ref_stack_enum_begin(&rsenum, pstack);
234 do {
235 uint count = rsenum.size;
236 const ref *p = rsenum.ptr + count - 1;
237
238 for (; count; count--, p--)
239 if (r_has_type(p, t_mark))
240 return scanned + (rsenum.size - count + 1);
241 scanned += rsenum.size;
242 } while (ref_stack_enum_next(&rsenum));
243 return 0;
244 }
245
246 /*
247 * Do the store check for storing 'count' elements of a stack, starting
248 * 'skip' elements below the top, into an array. Return 0 or e_invalidaccess.
249 */
250 int
ref_stack_store_check(const ref_stack_t * pstack,ref * parray,uint count,uint skip)251 ref_stack_store_check(const ref_stack_t *pstack, ref *parray, uint count,
252 uint skip)
253 {
254 uint space = r_space(parray);
255
256 if (space != avm_local) {
257 uint left = count, pass = skip;
258 ref_stack_enum_t rsenum;
259
260 ref_stack_enum_begin(&rsenum, pstack);
261 do {
262 ref *ptr = rsenum.ptr;
263 uint size = rsenum.size;
264
265 if (size <= pass)
266 pass -= size;
267 else {
268 int code;
269
270 if (pass != 0)
271 size -= pass, pass = 0;
272 ptr += size;
273 if (size > left)
274 size = left;
275 left -= size;
276 code = refs_check_space(ptr - size, size, space);
277 if (code < 0)
278 return code;
279 if (left == 0)
280 break;
281 }
282 } while (ref_stack_enum_next(&rsenum));
283 }
284 return 0;
285 }
286
287 /*
288 * Store the top 'count' elements of a stack, starting 'skip' elements below
289 * the top, into an array, with or without store/undo checking. age=-1 for
290 * no check, 0 for old, 1 for new. May return e_rangecheck or
291 * e_invalidaccess.
292 */
293 #undef idmemory /****** NOTA BENE ******/
294 int
ref_stack_store(const ref_stack_t * pstack,ref * parray,uint count,uint skip,int age,bool check,gs_dual_memory_t * idmemory,client_name_t cname)295 ref_stack_store(const ref_stack_t *pstack, ref *parray, uint count,
296 uint skip, int age, bool check, gs_dual_memory_t *idmemory,
297 client_name_t cname)
298 {
299 uint left, pass;
300 ref *to;
301 ref_stack_enum_t rsenum;
302
303 if (count > ref_stack_count(pstack) || count > r_size(parray))
304 return_error(e_rangecheck);
305 if (check) {
306 int code = ref_stack_store_check(pstack, parray, count, skip);
307
308 if (code < 0)
309 return code;
310 }
311 to = parray->value.refs + count;
312 left = count, pass = skip;
313 ref_stack_enum_begin(&rsenum, pstack);
314 do {
315 ref *from = rsenum.ptr;
316 uint size = rsenum.size;
317
318 if (size <= pass)
319 pass -= size;
320 else {
321 if (pass != 0)
322 size -= pass, pass = 0;
323 from += size;
324 if (size > left)
325 size = left;
326 left -= size;
327 switch (age) {
328 case -1: /* not an array */
329 while (size--) {
330 from--, to--;
331 ref_assign(to, from);
332 }
333 break;
334 case 0: /* old array */
335 while (size--) {
336 from--, to--;
337 ref_assign_old(parray, to, from, cname);
338 }
339 break;
340 case 1: /* new array */
341 while (size--) {
342 from--, to--;
343 ref_assign_new(to, from);
344 }
345 break;
346 }
347 if (left == 0)
348 break;
349 }
350 } while (ref_stack_enum_next(&rsenum));
351 r_set_size(parray, count);
352 return 0;
353 }
354
355 /*
356 * Pop the top N elements off a stack.
357 * The number must not exceed the number of elements in use.
358 */
359 void
ref_stack_pop(ref_stack_t * pstack,uint count)360 ref_stack_pop(ref_stack_t *pstack, uint count)
361 {
362 uint used;
363
364 while ((used = pstack->p + 1 - pstack->bot) < count) {
365 count -= used;
366 pstack->p = pstack->bot - 1;
367 ref_stack_pop_block(pstack);
368 }
369 pstack->p -= count;
370 }
371
372 /* Pop the top block off a stack. May return underflow_error. */
373 int
ref_stack_pop_block(ref_stack_t * pstack)374 ref_stack_pop_block(ref_stack_t *pstack)
375 {
376 s_ptr bot = pstack->bot;
377 uint count = pstack->p + 1 - bot;
378 ref_stack_block *pcur =
379 (ref_stack_block *) pstack->current.value.refs;
380 ref_stack_block *pnext =
381 (ref_stack_block *) pcur->next.value.refs;
382 uint used;
383 ref *body;
384 ref next;
385
386 if (pnext == 0)
387 return_error(pstack->params->underflow_error);
388 used = r_size(&pnext->used);
389 body = (ref *) (pnext + 1) + pstack->params->bot_guard;
390 next = pcur->next;
391 /*
392 * If the contents of the two blocks won't fit in a single block, we
393 * move up the used part of the top block, and copy up as much of
394 * the contents of the next block under it as will fit. If the
395 * contents of both blocks fit in a single block, we copy the used
396 * part of the top block to the top of the next block, and free the
397 * top block.
398 */
399 if (used + count > pstack->body_size) {
400 /*
401 * The contents of the two blocks won't fit into a single block.
402 * On the assumption that we're recovering from a local stack
403 * underflow and need to increase the number of contiguous
404 * elements available, move up the used part of the top block, and
405 * copy up as much of the contents of the next block under it as
406 * will fit.
407 */
408 uint moved = pstack->body_size - count;
409 uint left;
410
411 if (moved == 0)
412 return_error(e_Fatal);
413 memmove(bot + moved, bot, count * sizeof(ref));
414 left = used - moved;
415 memcpy(bot, body + left, moved * sizeof(ref));
416 refset_null_new(body + left, moved, 0);
417 r_dec_size(&pnext->used, moved);
418 pstack->p = pstack->top;
419 pstack->extension_used -= moved;
420 } else {
421 /*
422 * The contents of the two blocks will fit into a single block.
423 * Copy the used part of the top block to the top of the next
424 * block, and free the top block.
425 */
426 memcpy(body + used, bot, count * sizeof(ref));
427 pstack->bot = bot = body;
428 pstack->top = bot + pstack->body_size - 1;
429 gs_free_ref_array(pstack->memory, &pstack->current,
430 "ref_stack_pop_block");
431 pstack->current = next;
432 pstack->p = bot + (used + count - 1);
433 pstack->extension_size -= pstack->body_size;
434 pstack->extension_used -= used;
435 }
436 return 0;
437 }
438
439 /*
440 * Extend a stack to recover from an overflow condition.
441 * May return overflow_error or e_VMerror.
442 */
443 int
ref_stack_extend(ref_stack_t * pstack,uint request)444 ref_stack_extend(ref_stack_t *pstack, uint request)
445 {
446 uint keep = (pstack->top - pstack->bot + 1) / 3;
447 uint count = pstack->p - pstack->bot + 1;
448 const ref_stack_params_t *params = pstack->params;
449
450 if (request > params->data_size)
451 return_error(params->overflow_error);
452 if (keep + request > pstack->body_size)
453 keep = pstack->body_size - request;
454 if (keep > count)
455 keep = count; /* required by ref_stack_push_block */
456 return ref_stack_push_block(pstack, keep, request);
457 }
458
459 /*
460 * Push N empty slots onto a stack. These slots are not initialized:
461 * the caller must immediately fill them. May return overflow_error
462 * (if max_stack would be exceeded, or the stack has no allocator)
463 * or e_VMerror.
464 */
465 int
ref_stack_push(ref_stack_t * pstack,uint count)466 ref_stack_push(ref_stack_t *pstack, uint count)
467 {
468 /* Don't bother to pre-check for overflow: we must be able to */
469 /* back out in the case of a VMerror anyway, and */
470 /* ref_stack_push_block will make the check itself. */
471 uint needed = count;
472 uint added;
473
474 for (; (added = pstack->top - pstack->p) < needed; needed -= added) {
475 int code;
476
477 pstack->p = pstack->top;
478 code = ref_stack_push_block(pstack,
479 (pstack->top - pstack->bot + 1) / 3,
480 added);
481 if (code < 0) {
482 /* Back out. */
483 ref_stack_pop(pstack, count - needed + added);
484 pstack->requested = count;
485 return code;
486 }
487 }
488 pstack->p += needed;
489 return 0;
490 }
491
492 /*
493 * Push a block onto the stack, specifying how many elements of the current
494 * top block should remain in the top block and also how many elements we
495 * are trying to add. Requires keep <= count. May return overflow_error or
496 * e_VMerror.
497 */
498 private int
ref_stack_push_block(ref_stack_t * pstack,uint keep,uint add)499 ref_stack_push_block(ref_stack_t *pstack, uint keep, uint add)
500 {
501 const ref_stack_params_t *params = pstack->params;
502 uint count = pstack->p - pstack->bot + 1;
503 uint move = count - keep;
504 ref_stack_block *pcur = (ref_stack_block *) pstack->current.value.refs;
505 ref next;
506 ref_stack_block *pnext;
507 ref *body;
508 int code;
509
510 if (keep > count)
511 return_error(e_Fatal);
512 /* Check for overflowing the maximum size, */
513 /* or expansion not allowed. */
514 if (pstack->extension_used + (pstack->top - pstack->bot) + add >=
515 pstack->max_stack.value.intval ||
516 !params->allow_expansion
517 )
518 return_error(params->overflow_error);
519 code = gs_alloc_ref_array(pstack->memory, &next, 0,
520 params->block_size, "ref_stack_push_block");
521 if (code < 0)
522 return code;
523 pnext = (ref_stack_block *) next.value.refs;
524 body = (ref *) (pnext + 1);
525 /* Copy the top keep elements into the new block, */
526 /* and make the new block the top block. */
527 init_block(pstack, &next, keep);
528 body += params->bot_guard;
529 memcpy(body, pstack->bot + move, keep * sizeof(ref));
530 /* Clear the elements above the top of the new block. */
531 refset_null_new(body + keep, params->data_size - keep, 0);
532 /* Clear the elements above the top of the old block. */
533 refset_null_new(pstack->bot + move, keep, 0);
534 pnext->next = pstack->current;
535 pcur->used.value.refs = pstack->bot;
536 r_set_size(&pcur->used, move);
537 pstack->current = next;
538 pstack->bot = body;
539 pstack->top = pstack->bot + pstack->body_size - 1;
540 pstack->p = pstack->bot + keep - 1;
541 pstack->extension_size += pstack->body_size;
542 pstack->extension_used += move;
543 return 0;
544 }
545
546 /* Begin enumerating the blocks of a stack. */
547 void
ref_stack_enum_begin(ref_stack_enum_t * prse,const ref_stack_t * pstack)548 ref_stack_enum_begin(ref_stack_enum_t *prse, const ref_stack_t *pstack)
549 {
550 prse->block = (ref_stack_block *)pstack->current.value.refs;
551 prse->ptr = pstack->bot;
552 prse->size = pstack->p + 1 - pstack->bot;
553 }
554
555 bool
ref_stack_enum_next(ref_stack_enum_t * prse)556 ref_stack_enum_next(ref_stack_enum_t *prse)
557 {
558 ref_stack_block *block =
559 prse->block = (ref_stack_block *)prse->block->next.value.refs;
560
561 if (block == 0)
562 return false;
563 prse->ptr = block->used.value.refs;
564 prse->size = r_size(&block->used);
565 return true;
566 }
567
568 /* Clean up a stack for garbage collection. */
569 void
ref_stack_cleanup(ref_stack_t * pstack)570 ref_stack_cleanup(ref_stack_t *pstack)
571 {
572 ref_stack_block *pblock =
573 (ref_stack_block *) pstack->current.value.refs;
574
575 refset_null_new(pstack->p + 1, pstack->top - pstack->p, 0);
576 pblock->used = pstack->current; /* set attrs */
577 pblock->used.value.refs = pstack->bot;
578 r_set_size(&pblock->used, pstack->p + 1 - pstack->bot);
579 }
580
581 /*
582 * Free the entire contents of a stack, including the bottom block.
583 * The client must still call ref_stack_free. Note that after calling
584 * ref_stack_release, the stack is no longer usable.
585 */
586 void
ref_stack_release(ref_stack_t * pstack)587 ref_stack_release(ref_stack_t *pstack)
588 {
589 gs_ref_memory_t *mem = pstack->memory;
590
591 ref_stack_clear(pstack);
592 /* Free the parameter structure. */
593 gs_free_object((gs_memory_t *)mem, pstack->params,
594 "ref_stack_release(stack.params)");
595 /* Free the original (bottom) block. */
596 gs_free_ref_array(mem, &pstack->current, "ref_stack_release");
597 }
598
599 /*
600 * Release a stack and then free the ref_stack object.
601 */
602 void
ref_stack_free(ref_stack_t * pstack)603 ref_stack_free(ref_stack_t *pstack)
604 {
605 gs_memory_t *mem = (gs_memory_t *)pstack->memory;
606
607 ref_stack_release(pstack);
608 gs_free_object(mem, pstack, "ref_stack_free");
609 }
610
611 /* ------ Internal routines ------ */
612
613 /* Initialize the guards and body of a stack block. */
614 private void
init_block(ref_stack_t * pstack,const ref * psb,uint used)615 init_block(ref_stack_t *pstack, const ref *psb, uint used)
616 {
617 ref_stack_params_t *params = pstack->params;
618 ref *brefs = psb->value.refs;
619 uint i;
620 ref *p;
621
622 for (i = params->bot_guard, p = brefs + stack_block_refs;
623 i != 0; i--, p++
624 )
625 ref_assign(p, ¶ms->guard_value);
626 /* The top guard elements will never be read, */
627 /* but we need to initialize them for the sake of the GC. */
628 /* We can use refset_null for this, because even though it uses */
629 /* make_null_new and stack elements must not be marked new, */
630 /* these slots will never actually be read or written. */
631 if (params->top_guard) {
632 ref *top = brefs + r_size(psb);
633 int top_guard = params->top_guard;
634
635 refset_null_new(top - top_guard, top_guard, 0);
636 } {
637 ref_stack_block *const pblock = (ref_stack_block *) brefs;
638
639 pblock->used = *psb;
640 pblock->used.value.refs = brefs + stack_block_refs + params->bot_guard;
641 r_set_size(&pblock->used, 0);
642 }
643 }
644