1 /* Copyright (C) 1991, 2000 Aladdin Enterprises. All rights reserved.
2
3 This software is provided AS-IS with no warranty, either express or
4 implied.
5
6 This software is distributed under license and may not be copied,
7 modified or distributed except as expressly authorized under the terms
8 of the license contained in the file LICENSE in this distribution.
9
10 For more information about licensing, please refer to
11 http://www.ghostscript.com/licensing/. For information on
12 commercial licensing, go to http://www.artifex.com/licensing/ or
13 contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14 San Rafael, CA 94903, U.S.A., +1(415)492-9861.
15 */
16
17 /* $Id: zcontext.c,v 1.11 2004/08/04 19:36:13 stefan Exp $ */
18 /* Display PostScript context operators */
19 #include "memory_.h"
20 #include "ghost.h"
21 #include "gp.h" /* for usertime */
22 #include "oper.h"
23 #include "gsexit.h"
24 #include "gsgc.h"
25 #include "gsstruct.h"
26 #include "gsutil.h"
27 #include "gxalloc.h"
28 #include "gxstate.h" /* for copying gstate stack */
29 #include "stream.h" /* for files.h */
30 #include "files.h"
31 #include "idict.h"
32 #include "igstate.h"
33 #include "icontext.h"
34 #include "interp.h"
35 #include "isave.h"
36 #include "istruct.h"
37 #include "dstack.h"
38 #include "estack.h"
39 #include "ostack.h"
40 #include "store.h"
41
42 /*
43 * Define the rescheduling interval. A value of max_int effectively
44 * disables scheduling. The only reason not to make this const is to
45 * allow it to be changed during testing.
46 */
47 private int reschedule_interval = 100;
48
49 /* Scheduling hooks in interp.c */
50 extern int (*gs_interp_reschedule_proc)(i_ctx_t **);
51 extern int (*gs_interp_time_slice_proc)(i_ctx_t **);
52 extern int gs_interp_time_slice_ticks;
53
54 /* Context structure */
55 typedef enum {
56 cs_active,
57 cs_done
58 } ctx_status_t;
59 typedef long ctx_index_t; /* >= 0 */
60 typedef struct gs_context_s gs_context_t;
61 typedef struct gs_scheduler_s gs_scheduler_t;
62
63 /*
64 * If several contexts share local VM, then if any one of them has done an
65 * unmatched save, the others are not allowed to run. We handle this by
66 * maintaining the following invariant:
67 * When control reaches the point in the scheduler that decides
68 * what context to run next, then for each group of contexts
69 * sharing local VM, if the save level for that VM is non-zero,
70 * saved_local_vm is only set in the context that has unmatched
71 * saves.
72 * We maintain this invariant as follows: when control enters the
73 * scheduler, if a context was running, we set its saved_local_vm flag
74 * to (save_level > 0). When selecting a context to run, we ignore
75 * contexts where saved_local_vm is false and the local VM save_level > 0.
76 */
77 struct gs_context_s {
78 gs_context_state_t state; /* (must be first for subclassing) */
79 /* Private state */
80 gs_scheduler_t *scheduler;
81 ctx_status_t status;
82 ctx_index_t index; /* > 0 */
83 bool detach; /* true if a detach has been */
84 /* executed for this context */
85 bool saved_local_vm; /* (see above) */
86 bool visible; /* during GC, true if visible; */
87 /* otherwise, always true */
88 ctx_index_t next_index; /* next context with same status */
89 /* (active, waiting on same lock, */
90 /* waiting on same condition, */
91 /* waiting to be destroyed) */
92 ctx_index_t joiner_index; /* context waiting on a join */
93 /* for this one */
94 gs_context_t *table_next; /* hash table chain -- this must be a real */
95 /* pointer, for looking up indices */
96 };
97 inline private bool
context_is_visible(const gs_context_t * pctx)98 context_is_visible(const gs_context_t *pctx)
99 {
100 return (pctx && pctx->visible);
101 }
102 inline private gs_context_t *
visible_context(gs_context_t * pctx)103 visible_context(gs_context_t *pctx)
104 {
105 return (pctx && pctx->visible ? pctx : (gs_context_t *)0);
106 }
107
108 /* GC descriptor */
109 private
CLEAR_MARKS_PROC(context_clear_marks)110 CLEAR_MARKS_PROC(context_clear_marks)
111 {
112 gs_context_t *const pctx = vptr;
113
114 (*st_context_state.clear_marks)
115 (cmem, &pctx->state, sizeof(pctx->state), &st_context_state);
116 }
117 private
118 ENUM_PTRS_WITH(context_enum_ptrs, gs_context_t *pctx)
119 ENUM_PREFIX(st_context_state, 2);
120 case 0: return ENUM_OBJ(pctx->scheduler);
121 case 1: {
122 /* Return the next *visible* context. */
123 const gs_context_t *next = pctx->table_next;
124
125 while (next && !next->visible)
126 next = next->table_next;
127 return ENUM_OBJ(next);
128 }
129 ENUM_PTRS_END
130 private RELOC_PTRS_WITH(context_reloc_ptrs, gs_context_t *pctx)
131 RELOC_PREFIX(st_context_state);
132 RELOC_VAR(pctx->scheduler);
133 /* Don't relocate table_next -- the scheduler object handles that. */
134 RELOC_PTRS_END
135 gs_private_st_complex_only(st_context, gs_context_t, "gs_context_t",
136 context_clear_marks, context_enum_ptrs, context_reloc_ptrs, 0);
137
138 /*
139 * Context list structure. Note that this uses context indices, not
140 * pointers, to avoid having to worry about pointers between local VMs.
141 */
142 typedef struct ctx_list_s {
143 ctx_index_t head_index;
144 ctx_index_t tail_index;
145 } ctx_list_t;
146
147 /* Condition structure */
148 typedef struct gs_condition_s {
149 ctx_list_t waiting; /* contexts waiting on this condition */
150 } gs_condition_t;
151 gs_private_st_simple(st_condition, gs_condition_t, "conditiontype");
152
153 /* Lock structure */
154 typedef struct gs_lock_s {
155 ctx_list_t waiting; /* contexts waiting for this lock, */
156 /* must be first for subclassing */
157 ctx_index_t holder_index; /* context holding the lock, if any */
158 gs_scheduler_t *scheduler;
159 } gs_lock_t;
160 gs_private_st_ptrs1(st_lock, gs_lock_t, "locktype",
161 lock_enum_ptrs, lock_reloc_ptrs, scheduler);
162
163 /* Global state */
164 /*typedef struct gs_scheduler_s gs_scheduler_t; *//* (above) */
165 struct gs_scheduler_s {
166 gs_context_t *current;
167 long usertime_initial; /* usertime when current started running */
168 ctx_list_t active;
169 vm_reclaim_proc((*save_vm_reclaim));
170 ctx_index_t dead_index;
171 #define CTX_TABLE_SIZE 19
172 gs_context_t *table[CTX_TABLE_SIZE];
173 };
174
175 /* Convert a context index to a context pointer. */
176 private gs_context_t *
index_context(const gs_scheduler_t * psched,long index)177 index_context(const gs_scheduler_t *psched, long index)
178 {
179 gs_context_t *pctx;
180
181 if (index == 0)
182 return 0;
183 pctx = psched->table[index % CTX_TABLE_SIZE];
184 while (pctx != 0 && pctx->index != index)
185 pctx = pctx->table_next;
186 return pctx;
187 }
188
189 /* Structure definition */
190 gs_private_st_composite(st_scheduler, gs_scheduler_t, "gs_scheduler",
191 scheduler_enum_ptrs, scheduler_reloc_ptrs);
192 /*
193 * The only cross-local-VM pointers in the context machinery are the
194 * table_next pointers in contexts, and the current and table[] pointers
195 * in the scheduler. We need to handle all of these specially.
196 */
ENUM_PTRS_WITH(scheduler_enum_ptrs,gs_scheduler_t * psched)197 private ENUM_PTRS_WITH(scheduler_enum_ptrs, gs_scheduler_t *psched)
198 {
199 index -= 1;
200 if (index < CTX_TABLE_SIZE) {
201 gs_context_t *pctx = psched->table[index];
202
203 while (pctx && !pctx->visible)
204 pctx = pctx->table_next;
205 return ENUM_OBJ(pctx);
206 }
207 return 0;
208 }
209 case 0: return ENUM_OBJ(visible_context(psched->current));
210 ENUM_PTRS_END
RELOC_PTRS_WITH(scheduler_reloc_ptrs,gs_scheduler_t * psched)211 private RELOC_PTRS_WITH(scheduler_reloc_ptrs, gs_scheduler_t *psched)
212 {
213 if (psched->current->visible)
214 RELOC_VAR(psched->current);
215 {
216 int i;
217
218 for (i = 0; i < CTX_TABLE_SIZE; ++i) {
219 gs_context_t **ppctx = &psched->table[i];
220 gs_context_t **pnext;
221
222 for (; *ppctx; ppctx = pnext) {
223 pnext = &(*ppctx)->table_next;
224 if ((*ppctx)->visible)
225 RELOC_VAR(*ppctx);
226 }
227 }
228 }
229 }
230 RELOC_PTRS_END
231
232 /*
233 * The context scheduler requires special handling during garbage
234 * collection, since it is the only structure that can legitimately
235 * reference objects in multiple local VMs. To deal with this, we wrap the
236 * interpreter's garbage collector with code that prevents it from seeing
237 * contexts in other than the current local VM. ****** WORKS FOR LOCAL GC,
238 * NOT FOR GLOBAL ******
239 */
240 private void
context_reclaim(vm_spaces * pspaces,bool global)241 context_reclaim(vm_spaces * pspaces, bool global)
242 {
243 /*
244 * Search through the registered roots to find the current context.
245 * (This is a hack so we can find the scheduler.)
246 */
247 int i;
248 gs_context_t *pctx = 0; /* = 0 is bogus to pacify compilers */
249 gs_scheduler_t *psched = 0;
250 gs_ref_memory_t *lmem = 0; /* = 0 is bogus to pacify compilers */
251 chunk_locator_t loc;
252
253 for (i = countof(pspaces->memories.indexed) - 1; psched == 0 && i > 0; --i) {
254 gs_ref_memory_t *mem = pspaces->memories.indexed[i];
255 const gs_gc_root_t *root = mem->roots;
256
257 for (; root; root = root->next) {
258 if (gs_object_type((gs_memory_t *)mem, *root->p) == &st_context) {
259 pctx = *root->p;
260 psched = pctx->scheduler;
261 lmem = mem;
262 break;
263 }
264 }
265 }
266
267 /* Hide all contexts in other (local) VMs. */
268 /*
269 * See context_create below for why we look for the context
270 * in stable memory.
271 */
272 loc.memory = (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)lmem);
273 loc.cp = 0;
274 for (i = 0; i < CTX_TABLE_SIZE; ++i)
275 for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
276 pctx->visible = chunk_locate_ptr(pctx, &loc);
277
278 #ifdef DEBUG
279 if (!psched->current->visible) {
280 lprintf("Current context is invisible!\n");
281 gs_abort((gs_memory_t *)lmem);
282 }
283 #endif
284
285 /* Do the actual garbage collection. */
286 psched->save_vm_reclaim(pspaces, global);
287
288 /* Make all contexts visible again. */
289 for (i = 0; i < CTX_TABLE_SIZE; ++i)
290 for (pctx = psched->table[i]; pctx; pctx = pctx->table_next)
291 pctx->visible = true;
292 }
293
294
295 /* Forward references */
296 private int context_create(gs_scheduler_t *, gs_context_t **,
297 const gs_dual_memory_t *,
298 const gs_context_state_t *, bool);
299 private long context_usertime(void);
300 private int context_param(const gs_scheduler_t *, os_ptr, gs_context_t **);
301 private void context_destroy(gs_context_t *);
302 private void stack_copy(ref_stack_t *, const ref_stack_t *, uint, uint);
303 private int lock_acquire(os_ptr, gs_context_t *);
304 private int lock_release(ref *);
305
306 /* Internal procedures */
307 private void
context_load(gs_scheduler_t * psched,gs_context_t * pctx)308 context_load(gs_scheduler_t *psched, gs_context_t *pctx)
309 {
310 if_debug1('"', "[\"]loading %ld\n", pctx->index);
311 if ( pctx->state.keep_usertime )
312 psched->usertime_initial = context_usertime();
313 context_state_load(&pctx->state);
314 }
315 private void
context_store(gs_scheduler_t * psched,gs_context_t * pctx)316 context_store(gs_scheduler_t *psched, gs_context_t *pctx)
317 {
318 if_debug1('"', "[\"]storing %ld\n", pctx->index);
319 context_state_store(&pctx->state);
320 if ( pctx->state.keep_usertime )
321 pctx->state.usertime_total +=
322 context_usertime() - psched->usertime_initial;
323 }
324
325 /* List manipulation */
326 private void
add_last(const gs_scheduler_t * psched,ctx_list_t * pl,gs_context_t * pc)327 add_last(const gs_scheduler_t *psched, ctx_list_t *pl, gs_context_t *pc)
328 {
329 pc->next_index = 0;
330 if (pl->head_index == 0)
331 pl->head_index = pc->index;
332 else
333 index_context(psched, pl->tail_index)->next_index = pc->index;
334 pl->tail_index = pc->index;
335 }
336
337 /* ------ Initialization ------ */
338
339 private int ctx_initialize(i_ctx_t **);
340 private int ctx_reschedule(i_ctx_t **);
341 private int ctx_time_slice(i_ctx_t **);
342 private int
zcontext_init(i_ctx_t * i_ctx_p)343 zcontext_init(i_ctx_t *i_ctx_p)
344 {
345 /* Complete initialization after the interpreter is entered. */
346 gs_interp_reschedule_proc = ctx_initialize;
347 gs_interp_time_slice_proc = ctx_initialize;
348 gs_interp_time_slice_ticks = 0;
349 return 0;
350 }
351 /*
352 * The interpreter calls this procedure at the first reschedule point.
353 * It completes context initialization.
354 */
355 private int
ctx_initialize(i_ctx_t ** pi_ctx_p)356 ctx_initialize(i_ctx_t **pi_ctx_p)
357 {
358 i_ctx_t *i_ctx_p = *pi_ctx_p; /* for gs_imemory */
359 gs_ref_memory_t *imem = iimemory_system;
360 gs_scheduler_t *psched =
361 gs_alloc_struct_immovable((gs_memory_t *) imem, gs_scheduler_t,
362 &st_scheduler, "gs_scheduler");
363
364 psched->current = 0;
365 psched->active.head_index = psched->active.tail_index = 0;
366 psched->save_vm_reclaim = i_ctx_p->memory.spaces.vm_reclaim;
367 i_ctx_p->memory.spaces.vm_reclaim = context_reclaim;
368 psched->dead_index = 0;
369 memset(psched->table, 0, sizeof(psched->table));
370 /* Create an initial context. */
371 if (context_create(psched, &psched->current, &gs_imemory, *pi_ctx_p, true) < 0) {
372 lprintf("Can't create initial context!");
373 gs_abort(imemory);
374 }
375 psched->current->scheduler = psched;
376 /* Hook into the interpreter. */
377 *pi_ctx_p = &psched->current->state;
378 gs_interp_reschedule_proc = ctx_reschedule;
379 gs_interp_time_slice_proc = ctx_time_slice;
380 gs_interp_time_slice_ticks = reschedule_interval;
381 return 0;
382 }
383
384 /* ------ Interpreter interface to scheduler ------ */
385
386 /* When an operator decides it is time to run a new context, */
387 /* it returns o_reschedule. The interpreter saves all its state in */
388 /* memory, calls ctx_reschedule, and then loads the state from memory. */
389 private int
ctx_reschedule(i_ctx_t ** pi_ctx_p)390 ctx_reschedule(i_ctx_t **pi_ctx_p)
391 {
392 gs_context_t *current = (gs_context_t *)*pi_ctx_p;
393 gs_scheduler_t *psched = current->scheduler;
394
395 #ifdef DEBUG
396 if (*pi_ctx_p != ¤t->state) {
397 lprintf2("current->state = 0x%lx, != i_ctx_p = 0x%lx!\n",
398 (ulong)¤t->state, (ulong)*pi_ctx_p);
399 }
400 #endif
401 /* If there are any dead contexts waiting to be released, */
402 /* take care of that now. */
403 while (psched->dead_index != 0) {
404 gs_context_t *dead = index_context(psched, psched->dead_index);
405 long next_index = dead->next_index;
406
407 if (current == dead) {
408 if_debug1('"', "[\"]storing dead %ld\n", current->index);
409 context_state_store(¤t->state);
410 current = 0;
411 }
412 context_destroy(dead);
413 psched->dead_index = next_index;
414 }
415 /* Update saved_local_vm. See above for the invariant. */
416 if (current != 0)
417 current->saved_local_vm =
418 current->state.memory.space_local->saved != 0;
419 /* Run the first ready context, taking the 'save' lock into account. */
420 {
421 gs_context_t *prev = 0;
422 gs_context_t *ready;
423
424 for (ready = index_context(psched, psched->active.head_index);;
425 prev = ready, ready = index_context(psched, ready->next_index)
426 ) {
427 if (ready == 0) {
428 if (current != 0)
429 context_store(psched, current);
430 lprintf("No context to run!");
431 return_error(e_Fatal);
432 }
433 /* See above for an explanation of the following test. */
434 if (ready->state.memory.space_local->saved != 0 &&
435 !ready->saved_local_vm
436 )
437 continue;
438 /* Found a context to run. */
439 {
440 ctx_index_t next_index = ready->next_index;
441
442 if (prev)
443 prev->next_index = next_index;
444 else
445 psched->active.head_index = next_index;
446 if (!next_index)
447 psched->active.tail_index = (prev ? prev->index : 0);
448 }
449 break;
450 }
451 if (ready == current)
452 return 0; /* no switch */
453 /*
454 * Save the state of the current context in psched->current,
455 * if any context is current.
456 */
457 if (current != 0)
458 context_store(psched, current);
459 psched->current = ready;
460 /* Load the state of the new current context. */
461 context_load(psched, ready);
462 /* Switch the interpreter's context state pointer. */
463 *pi_ctx_p = &ready->state;
464 }
465 return 0;
466 }
467
468 /* If the interpreter wants to time-slice, it saves its state, */
469 /* calls ctx_time_slice, and reloads its state. */
470 private int
ctx_time_slice(i_ctx_t ** pi_ctx_p)471 ctx_time_slice(i_ctx_t **pi_ctx_p)
472 {
473 gs_scheduler_t *psched = ((gs_context_t *)*pi_ctx_p)->scheduler;
474
475 if (psched->active.head_index == 0)
476 return 0;
477 if_debug0('"', "[\"]time-slice\n");
478 add_last(psched, &psched->active, psched->current);
479 return ctx_reschedule(pi_ctx_p);
480 }
481
482 /* ------ Context operators ------ */
483
484 /* - currentcontext <context> */
485 private int
zcurrentcontext(i_ctx_t * i_ctx_p)486 zcurrentcontext(i_ctx_t *i_ctx_p)
487 {
488 os_ptr op = osp;
489 const gs_context_t *current = (const gs_context_t *)i_ctx_p;
490
491 push(1);
492 make_int(op, current->index);
493 return 0;
494 }
495
496 /* <context> detach - */
497 private int
zdetach(i_ctx_t * i_ctx_p)498 zdetach(i_ctx_t *i_ctx_p)
499 {
500 os_ptr op = osp;
501 const gs_scheduler_t *psched = ((gs_context_t *)i_ctx_p)->scheduler;
502 gs_context_t *pctx;
503 int code;
504
505 if ((code = context_param(psched, op, &pctx)) < 0)
506 return code;
507 if_debug2('\'', "[']detach %ld, status = %d\n",
508 pctx->index, pctx->status);
509 if (pctx->joiner_index != 0 || pctx->detach)
510 return_error(e_invalidcontext);
511 switch (pctx->status) {
512 case cs_active:
513 pctx->detach = true;
514 break;
515 case cs_done:
516 context_destroy(pctx);
517 }
518 pop(1);
519 return 0;
520 }
521
522 private int
523 do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin,
524 const ref * pstdout, uint mcount, bool local),
525 values_older_than(const ref_stack_t * pstack, uint first, uint last,
526 int max_space);
527 private int
528 fork_done(i_ctx_t *),
529 fork_done_with_error(i_ctx_t *),
530 finish_join(i_ctx_t *),
531 reschedule_now(i_ctx_t *);
532
533 /* <mark> <obj1> ... <objN> <proc> .fork <context> */
534 /* <mark> <obj1> ... <objN> <proc> <stdin|null> <stdout|null> */
535 /* .localfork <context> */
536 private int
zfork(i_ctx_t * i_ctx_p)537 zfork(i_ctx_t *i_ctx_p)
538 {
539 os_ptr op = osp;
540 uint mcount = ref_stack_counttomark(&o_stack);
541 ref rnull;
542
543 if (mcount == 0)
544 return_error(e_unmatchedmark);
545 make_null(&rnull);
546 return do_fork(i_ctx_p, op, &rnull, &rnull, mcount, false);
547 }
548 private int
zlocalfork(i_ctx_t * i_ctx_p)549 zlocalfork(i_ctx_t *i_ctx_p)
550 {
551 os_ptr op = osp;
552 uint mcount = ref_stack_counttomark(&o_stack);
553 int code;
554
555 if (mcount == 0)
556 return_error(e_unmatchedmark);
557 code = values_older_than(&o_stack, 1, mcount - 1, avm_local);
558 if (code < 0)
559 return code;
560 code = do_fork(i_ctx_p, op - 2, op - 1, op, mcount - 2, true);
561 if (code < 0)
562 return code;
563 op = osp;
564 op[-2] = *op;
565 pop(2);
566 return code;
567 }
568
569 /* Internal procedure to actually do the fork operation. */
570 private int
do_fork(i_ctx_t * i_ctx_p,os_ptr op,const ref * pstdin,const ref * pstdout,uint mcount,bool local)571 do_fork(i_ctx_t *i_ctx_p, os_ptr op, const ref * pstdin, const ref * pstdout,
572 uint mcount, bool local)
573 {
574 gs_context_t *pcur = (gs_context_t *)i_ctx_p;
575 gs_scheduler_t *psched = pcur->scheduler;
576 stream *s;
577 gs_dual_memory_t dmem;
578 gs_context_t *pctx;
579 ref old_userdict, new_userdict;
580 int code;
581
582 check_proc(*op);
583 if (iimemory_local->save_level)
584 return_error(e_invalidcontext);
585 if (r_has_type(pstdout, t_null)) {
586 code = zget_stdout(i_ctx_p, &s);
587 if (code < 0)
588 return code;
589 pstdout = &ref_stdio[1];
590 } else
591 check_read_file(s, pstdout);
592 if (r_has_type(pstdin, t_null)) {
593 code = zget_stdin(i_ctx_p, &s);
594 if (code < 0)
595 return code;
596 pstdin = &ref_stdio[0];
597 } else
598 check_read_file(s, pstdin);
599 dmem = gs_imemory;
600 if (local) {
601 /* Share global VM, private local VM. */
602 ref *puserdict;
603 uint userdict_size;
604 gs_memory_t *parent = iimemory_local->non_gc_memory;
605 gs_ref_memory_t *lmem;
606 gs_ref_memory_t *lmem_stable;
607
608 if (dict_find_string(systemdict, "userdict", &puserdict) <= 0 ||
609 !r_has_type(puserdict, t_dictionary)
610 )
611 return_error(e_Fatal);
612 old_userdict = *puserdict;
613 userdict_size = dict_maxlength(&old_userdict);
614 lmem = ialloc_alloc_state(parent, iimemory_local->chunk_size);
615 lmem_stable = ialloc_alloc_state(parent, iimemory_local->chunk_size);
616 if (lmem == 0 || lmem_stable == 0) {
617 gs_free_object(parent, lmem_stable, "do_fork");
618 gs_free_object(parent, lmem, "do_fork");
619 return_error(e_VMerror);
620 }
621 lmem->space = avm_local;
622 lmem_stable->space = avm_local;
623 lmem->stable_memory = (gs_memory_t *)lmem_stable;
624 dmem.space_local = lmem;
625 code = context_create(psched, &pctx, &dmem, &pcur->state, false);
626 if (code < 0) {
627 /****** FREE lmem ******/
628 return code;
629 }
630 /*
631 * Create a new userdict. PostScript code will take care of
632 * the rest of the initialization of the new context.
633 */
634 code = dict_alloc(lmem, userdict_size, &new_userdict);
635 if (code < 0) {
636 context_destroy(pctx);
637 /****** FREE lmem ******/
638 return code;
639 }
640 } else {
641 /* Share global and local VM. */
642 code = context_create(psched, &pctx, &dmem, &pcur->state, false);
643 if (code < 0) {
644 /****** FREE lmem ******/
645 return code;
646 }
647 /*
648 * Copy the gstate stack. The current method is not elegant;
649 * in fact, I'm not entirely sure it works.
650 */
651 {
652 int n;
653 const gs_state *old;
654 gs_state *new;
655
656 for (n = 0, old = igs; old != 0; old = gs_state_saved(old))
657 ++n;
658 for (old = pctx->state.pgs; old != 0; old = gs_state_saved(old))
659 --n;
660 for (; n > 0 && code >= 0; --n)
661 code = gs_gsave(pctx->state.pgs);
662 if (code < 0) {
663 /****** FREE lmem & GSTATES ******/
664 return code;
665 }
666 for (old = igs, new = pctx->state.pgs;
667 old != 0 /* (== new != 0) */ && code >= 0;
668 old = gs_state_saved(old), new = gs_state_saved(new)
669 )
670 code = gs_setgstate(new, old);
671 if (code < 0) {
672 /****** FREE lmem & GSTATES ******/
673 return code;
674 }
675 }
676 }
677 pctx->state.language_level = i_ctx_p->language_level;
678 pctx->state.dict_stack.min_size = idict_stack.min_size;
679 pctx->state.dict_stack.userdict_index = idict_stack.userdict_index;
680 pctx->state.stdio[0] = *pstdin;
681 pctx->state.stdio[1] = *pstdout;
682 pctx->state.stdio[2] = pcur->state.stdio[2];
683 /* Initialize the interpreter stacks. */
684 {
685 ref_stack_t *dstack = (ref_stack_t *)&pctx->state.dict_stack;
686 uint count = ref_stack_count(&d_stack);
687 uint copy = (local ? min_dstack_size : count);
688
689 ref_stack_push(dstack, copy);
690 stack_copy(dstack, &d_stack, copy, count - copy);
691 if (local) {
692 /* Substitute the new userdict for the old one. */
693 long i;
694
695 for (i = 0; i < copy; ++i) {
696 ref *pdref = ref_stack_index(dstack, i);
697
698 if (obj_eq(imemory, pdref, &old_userdict))
699 *pdref = new_userdict;
700 }
701 }
702 }
703 {
704 ref_stack_t *estack = (ref_stack_t *)&pctx->state.exec_stack;
705
706 ref_stack_push(estack, 3);
707 /* fork_done must be executed in both normal and error cases. */
708 make_mark_estack(estack->p - 2, es_other, fork_done_with_error);
709 make_oper(estack->p - 1, 0, fork_done);
710 *estack->p = *op;
711 }
712 {
713 ref_stack_t *ostack = (ref_stack_t *)&pctx->state.op_stack;
714 uint count = mcount - 2;
715
716 ref_stack_push(ostack, count);
717 stack_copy(ostack, &o_stack, count, osp - op + 1);
718 }
719 pctx->state.binary_object_format = pcur->state.binary_object_format;
720 add_last(psched, &psched->active, pctx);
721 pop(mcount - 1);
722 op = osp;
723 make_int(op, pctx->index);
724 return 0;
725 }
726
727 /*
728 * Check that all values being passed by fork or join are old enough
729 * to be valid in the environment to which they are being transferred.
730 */
731 private int
values_older_than(const ref_stack_t * pstack,uint first,uint last,int next_space)732 values_older_than(const ref_stack_t * pstack, uint first, uint last,
733 int next_space)
734 {
735 uint i;
736
737 for (i = first; i <= last; ++i)
738 if (r_space(ref_stack_index(pstack, (long)i)) >= next_space)
739 return_error(e_invalidaccess);
740 return 0;
741 }
742
743 /* This gets executed when a context terminates normally. */
744 /****** MUST DO ALL RESTORES ******/
745 /****** WHAT IF invalidrestore? ******/
746 private int
fork_done(i_ctx_t * i_ctx_p)747 fork_done(i_ctx_t *i_ctx_p)
748 {
749 os_ptr op = osp;
750 gs_context_t *pcur = (gs_context_t *)i_ctx_p;
751 gs_scheduler_t *psched = pcur->scheduler;
752
753 if_debug2('\'', "[']done %ld%s\n", pcur->index,
754 (pcur->detach ? ", detached" : ""));
755 /*
756 * Clear the context's dictionary, execution and graphics stacks
757 * now, to retain as little as possible in case of a garbage
758 * collection or restore. We know that fork_done is the
759 * next-to-bottom entry on the execution stack.
760 */
761 ref_stack_pop_to(&d_stack, min_dstack_size);
762 pop_estack(&pcur->state, ref_stack_count(&e_stack) - 1);
763 gs_grestoreall(igs);
764 /*
765 * If there are any unmatched saves, we need to execute restores
766 * until there aren't. An invalidrestore is possible and will
767 * result in an error termination.
768 */
769 if (iimemory_local->save_level) {
770 ref *prestore;
771
772 if (dict_find_string(systemdict, "restore", &prestore) <= 0) {
773 lprintf("restore not found in systemdict!");
774 return_error(e_Fatal);
775 }
776 if (pcur->detach) {
777 ref_stack_clear(&o_stack); /* help avoid invalidrestore */
778 op = osp;
779 }
780 push(1);
781 make_tv(op, t_save, saveid, alloc_save_current_id(&gs_imemory));
782 push_op_estack(fork_done);
783 ++esp;
784 ref_assign(esp, prestore);
785 return o_push_estack;
786 }
787 if (pcur->detach) {
788 /*
789 * We would like to free the context's memory, but we can't do
790 * it yet, because the interpreter still has references to it.
791 * Instead, queue the context to be freed the next time we
792 * reschedule. We can, however, clear its operand stack now.
793 */
794 ref_stack_clear(&o_stack);
795 context_store(psched, pcur);
796 pcur->next_index = psched->dead_index;
797 psched->dead_index = pcur->index;
798 psched->current = 0;
799 } else {
800 gs_context_t *pctx = index_context(psched, pcur->joiner_index);
801
802 pcur->status = cs_done;
803 /* Schedule the context waiting to join this one, if any. */
804 if (pctx != 0)
805 add_last(psched, &psched->active, pctx);
806 }
807 return o_reschedule;
808 }
809 /*
810 * This gets executed when the stack is being unwound for an error
811 * termination.
812 */
813 private int
fork_done_with_error(i_ctx_t * i_ctx_p)814 fork_done_with_error(i_ctx_t *i_ctx_p)
815 {
816 /****** WHAT TO DO? ******/
817 return fork_done(i_ctx_p);
818 }
819
820 /* <context> join <mark> <obj1> ... <objN> */
821 private int
zjoin(i_ctx_t * i_ctx_p)822 zjoin(i_ctx_t *i_ctx_p)
823 {
824 os_ptr op = osp;
825 gs_context_t *current = (gs_context_t *)i_ctx_p;
826 gs_scheduler_t *psched = current->scheduler;
827 gs_context_t *pctx;
828 int code;
829
830 if ((code = context_param(psched, op, &pctx)) < 0)
831 return code;
832 if_debug2('\'', "[']join %ld, status = %d\n",
833 pctx->index, pctx->status);
834 /*
835 * It doesn't seem logically necessary, but the Red Book says that
836 * the context being joined must share both global and local VM with
837 * the current context.
838 */
839 if (pctx->joiner_index != 0 || pctx->detach || pctx == current ||
840 pctx->state.memory.space_global !=
841 current->state.memory.space_global ||
842 pctx->state.memory.space_local !=
843 current->state.memory.space_local ||
844 iimemory_local->save_level != 0
845 )
846 return_error(e_invalidcontext);
847 switch (pctx->status) {
848 case cs_active:
849 /*
850 * We need to re-execute the join after the joined
851 * context is done. Since we can't return both
852 * o_push_estack and o_reschedule, we push a call on
853 * reschedule_now, which accomplishes the latter.
854 */
855 check_estack(2);
856 push_op_estack(finish_join);
857 push_op_estack(reschedule_now);
858 pctx->joiner_index = current->index;
859 return o_push_estack;
860 case cs_done:
861 {
862 const ref_stack_t *ostack =
863 (ref_stack_t *)&pctx->state.op_stack;
864 uint count = ref_stack_count(ostack);
865
866 push(count);
867 {
868 ref *rp = ref_stack_index(&o_stack, count);
869
870 make_mark(rp);
871 }
872 stack_copy(&o_stack, ostack, count, 0);
873 context_destroy(pctx);
874 }
875 }
876 return 0;
877 }
878
879 /* Finish a deferred join. */
880 private int
finish_join(i_ctx_t * i_ctx_p)881 finish_join(i_ctx_t *i_ctx_p)
882 {
883 os_ptr op = osp;
884 gs_context_t *current = (gs_context_t *)i_ctx_p;
885 gs_scheduler_t *psched = current->scheduler;
886 gs_context_t *pctx;
887 int code;
888
889 if ((code = context_param(psched, op, &pctx)) < 0)
890 return code;
891 if_debug2('\'', "[']finish_join %ld, status = %d\n",
892 pctx->index, pctx->status);
893 if (pctx->joiner_index != current->index)
894 return_error(e_invalidcontext);
895 pctx->joiner_index = 0;
896 return zjoin(i_ctx_p);
897 }
898
899 /* Reschedule now. */
900 private int
reschedule_now(i_ctx_t * i_ctx_p)901 reschedule_now(i_ctx_t *i_ctx_p)
902 {
903 return o_reschedule;
904 }
905
906 /* - yield - */
907 private int
zyield(i_ctx_t * i_ctx_p)908 zyield(i_ctx_t *i_ctx_p)
909 {
910 gs_context_t *current = (gs_context_t *)i_ctx_p;
911 gs_scheduler_t *psched = current->scheduler;
912
913 if (psched->active.head_index == 0)
914 return 0;
915 if_debug0('"', "[\"]yield\n");
916 add_last(psched, &psched->active, current);
917 return o_reschedule;
918 }
919
920 /* ------ Condition and lock operators ------ */
921
922 private int
923 monitor_cleanup(i_ctx_t *),
924 monitor_release(i_ctx_t *),
925 await_lock(i_ctx_t *);
926 private void
927 activate_waiting(gs_scheduler_t *, ctx_list_t * pcl);
928
929 /* - condition <condition> */
930 private int
zcondition(i_ctx_t * i_ctx_p)931 zcondition(i_ctx_t *i_ctx_p)
932 {
933 os_ptr op = osp;
934 gs_condition_t *pcond =
935 ialloc_struct(gs_condition_t, &st_condition, "zcondition");
936
937 if (pcond == 0)
938 return_error(e_VMerror);
939 pcond->waiting.head_index = pcond->waiting.tail_index = 0;
940 push(1);
941 make_istruct(op, a_all, pcond);
942 return 0;
943 }
944
945 /* - lock <lock> */
946 private int
zlock(i_ctx_t * i_ctx_p)947 zlock(i_ctx_t *i_ctx_p)
948 {
949 os_ptr op = osp;
950 gs_lock_t *plock = ialloc_struct(gs_lock_t, &st_lock, "zlock");
951
952 if (plock == 0)
953 return_error(e_VMerror);
954 plock->holder_index = 0;
955 plock->waiting.head_index = plock->waiting.tail_index = 0;
956 push(1);
957 make_istruct(op, a_all, plock);
958 return 0;
959 }
960
961 /* <lock> <proc> monitor - */
962 private int
zmonitor(i_ctx_t * i_ctx_p)963 zmonitor(i_ctx_t *i_ctx_p)
964 {
965 gs_context_t *current = (gs_context_t *)i_ctx_p;
966 os_ptr op = osp;
967 gs_lock_t *plock;
968 gs_context_t *pctx;
969 int code;
970
971 check_stype(op[-1], st_lock);
972 check_proc(*op);
973 plock = r_ptr(op - 1, gs_lock_t);
974 pctx = index_context(current->scheduler, plock->holder_index);
975 if_debug1('\'', "[']monitor 0x%lx\n", (ulong) plock);
976 if (pctx != 0) {
977 if (pctx == current ||
978 (iimemory_local->save_level != 0 &&
979 pctx->state.memory.space_local ==
980 current->state.memory.space_local)
981 )
982 return_error(e_invalidcontext);
983 }
984 /*
985 * We push on the e-stack:
986 * The lock object
987 * An e-stack mark with monitor_cleanup, to release the lock
988 * in case of an error
989 * monitor_release, to release the lock in the normal case
990 * The procedure to execute
991 */
992 check_estack(4);
993 code = lock_acquire(op - 1, current);
994 if (code != 0) { /* We didn't acquire the lock. Re-execute this later. */
995 push_op_estack(zmonitor);
996 return code; /* o_reschedule */
997 }
998 *++esp = op[-1];
999 push_mark_estack(es_other, monitor_cleanup);
1000 push_op_estack(monitor_release);
1001 *++esp = *op;
1002 pop(2);
1003 return o_push_estack;
1004 }
1005 /* Release the monitor lock when unwinding for an error or exit. */
1006 private int
monitor_cleanup(i_ctx_t * i_ctx_p)1007 monitor_cleanup(i_ctx_t *i_ctx_p)
1008 {
1009 int code = lock_release(esp);
1010
1011 if (code < 0)
1012 return code;
1013 --esp;
1014 return o_pop_estack;
1015 }
1016 /* Release the monitor lock when the procedure completes. */
1017 private int
monitor_release(i_ctx_t * i_ctx_p)1018 monitor_release(i_ctx_t *i_ctx_p)
1019 {
1020 int code = lock_release(esp - 1);
1021
1022 if (code < 0)
1023 return code;
1024 esp -= 2;
1025 return o_pop_estack;
1026 }
1027
1028 /* <condition> notify - */
1029 private int
znotify(i_ctx_t * i_ctx_p)1030 znotify(i_ctx_t *i_ctx_p)
1031 {
1032 os_ptr op = osp;
1033 gs_context_t *current = (gs_context_t *)i_ctx_p;
1034 gs_condition_t *pcond;
1035
1036 check_stype(*op, st_condition);
1037 pcond = r_ptr(op, gs_condition_t);
1038 if_debug1('"', "[\"]notify 0x%lx\n", (ulong) pcond);
1039 pop(1);
1040 op--;
1041 if (pcond->waiting.head_index == 0) /* nothing to do */
1042 return 0;
1043 activate_waiting(current->scheduler, &pcond->waiting);
1044 return zyield(i_ctx_p);
1045 }
1046
1047 /* <lock> <condition> wait - */
1048 private int
zwait(i_ctx_t * i_ctx_p)1049 zwait(i_ctx_t *i_ctx_p)
1050 {
1051 os_ptr op = osp;
1052 gs_context_t *current = (gs_context_t *)i_ctx_p;
1053 gs_scheduler_t *psched = current->scheduler;
1054 gs_lock_t *plock;
1055 gs_context_t *pctx;
1056 gs_condition_t *pcond;
1057
1058 check_stype(op[-1], st_lock);
1059 plock = r_ptr(op - 1, gs_lock_t);
1060 check_stype(*op, st_condition);
1061 pcond = r_ptr(op, gs_condition_t);
1062 if_debug2('"', "[\"]wait lock 0x%lx, condition 0x%lx\n",
1063 (ulong) plock, (ulong) pcond);
1064 pctx = index_context(psched, plock->holder_index);
1065 if (pctx == 0 || pctx != psched->current ||
1066 (iimemory_local->save_level != 0 &&
1067 (r_space(op - 1) == avm_local || r_space(op) == avm_local))
1068 )
1069 return_error(e_invalidcontext);
1070 check_estack(1);
1071 lock_release(op - 1);
1072 add_last(psched, &pcond->waiting, pctx);
1073 push_op_estack(await_lock);
1074 return o_reschedule;
1075 }
1076 /* When the condition is signaled, wait for acquiring the lock. */
1077 private int
await_lock(i_ctx_t * i_ctx_p)1078 await_lock(i_ctx_t *i_ctx_p)
1079 {
1080 gs_context_t *current = (gs_context_t *)i_ctx_p;
1081 os_ptr op = osp;
1082 int code = lock_acquire(op - 1, current);
1083
1084 if (code == 0) {
1085 pop(2);
1086 return 0;
1087 }
1088 /* We didn't acquire the lock. Re-execute the wait. */
1089 push_op_estack(await_lock);
1090 return code; /* o_reschedule */
1091 }
1092
1093 /* Activate a list of waiting contexts, and reset the list. */
1094 private void
activate_waiting(gs_scheduler_t * psched,ctx_list_t * pcl)1095 activate_waiting(gs_scheduler_t *psched, ctx_list_t * pcl)
1096 {
1097 gs_context_t *pctx = index_context(psched, pcl->head_index);
1098 gs_context_t *next;
1099
1100 for (; pctx != 0; pctx = next) {
1101 next = index_context(psched, pctx->next_index);
1102 add_last(psched, &psched->active, pctx);
1103 }
1104 pcl->head_index = pcl->tail_index = 0;
1105 }
1106
1107 /* ------ Miscellaneous operators ------ */
1108
1109 /* - usertime <int> */
1110 private int
zusertime_context(i_ctx_t * i_ctx_p)1111 zusertime_context(i_ctx_t *i_ctx_p)
1112 {
1113 gs_context_t *current = (gs_context_t *)i_ctx_p;
1114 gs_scheduler_t *psched = current->scheduler;
1115 os_ptr op = osp;
1116 long utime = context_usertime();
1117
1118 push(1);
1119 if (!current->state.keep_usertime) {
1120 /*
1121 * This is the first time this context has executed usertime:
1122 * we must track its execution time from now on.
1123 */
1124 psched->usertime_initial = utime;
1125 current->state.keep_usertime = true;
1126 }
1127 make_int(op, current->state.usertime_total + utime -
1128 psched->usertime_initial);
1129 return 0;
1130 }
1131
1132 /* ------ Internal procedures ------ */
1133
1134 /* Create a context. */
1135 private int
context_create(gs_scheduler_t * psched,gs_context_t ** ppctx,const gs_dual_memory_t * dmem,const gs_context_state_t * i_ctx_p,bool copy_state)1136 context_create(gs_scheduler_t * psched, gs_context_t ** ppctx,
1137 const gs_dual_memory_t * dmem,
1138 const gs_context_state_t *i_ctx_p, bool copy_state)
1139 {
1140 /*
1141 * Contexts are always created at the outermost save level, so they do
1142 * not need to be allocated in stable memory for the sake of
1143 * save/restore. However, context_reclaim needs to be able to test
1144 * whether a given context belongs to a given local VM, and allocating
1145 * contexts in stable local VM avoids the need to scan multiple save
1146 * levels when making this test.
1147 */
1148 gs_memory_t *mem = gs_memory_stable((gs_memory_t *)dmem->space_local);
1149 gs_context_t *pctx;
1150 int code;
1151 long ctx_index;
1152 gs_context_t **pte;
1153
1154 pctx = gs_alloc_struct(mem, gs_context_t, &st_context, "context_create");
1155 if (pctx == 0)
1156 return_error(e_VMerror);
1157 if (copy_state) {
1158 pctx->state = *i_ctx_p;
1159 } else {
1160 gs_context_state_t *pctx_st = &pctx->state;
1161
1162 code = context_state_alloc(&pctx_st, systemdict, dmem);
1163 if (code < 0) {
1164 gs_free_object(mem, pctx, "context_create");
1165 return code;
1166 }
1167 }
1168 ctx_index = gs_next_ids(mem, 1);
1169 pctx->scheduler = psched;
1170 pctx->status = cs_active;
1171 pctx->index = ctx_index;
1172 pctx->detach = false;
1173 pctx->saved_local_vm = false;
1174 pctx->visible = true;
1175 pctx->next_index = 0;
1176 pctx->joiner_index = 0;
1177 pte = &psched->table[ctx_index % CTX_TABLE_SIZE];
1178 pctx->table_next = *pte;
1179 *pte = pctx;
1180 *ppctx = pctx;
1181 if (gs_debug_c('\'') | gs_debug_c('"'))
1182 dlprintf2("[']create %ld at 0x%lx\n", ctx_index, (ulong) pctx);
1183 return 0;
1184 }
1185
1186 /* Check a context ID. Note that we do not check for context validity. */
1187 private int
context_param(const gs_scheduler_t * psched,os_ptr op,gs_context_t ** ppctx)1188 context_param(const gs_scheduler_t * psched, os_ptr op, gs_context_t ** ppctx)
1189 {
1190 gs_context_t *pctx;
1191
1192 check_type(*op, t_integer);
1193 pctx = index_context(psched, op->value.intval);
1194 if (pctx == 0)
1195 return_error(e_invalidcontext);
1196 *ppctx = pctx;
1197 return 0;
1198 }
1199
1200 /* Read the usertime as a single value. */
1201 private long
context_usertime(void)1202 context_usertime(void)
1203 {
1204 long secs_ns[2];
1205
1206 gp_get_usertime(secs_ns);
1207 return secs_ns[0] * 1000 + secs_ns[1] / 1000000;
1208 }
1209
1210 /* Destroy a context. */
1211 private void
context_destroy(gs_context_t * pctx)1212 context_destroy(gs_context_t * pctx)
1213 {
1214 gs_ref_memory_t *mem = pctx->state.memory.space_local;
1215 gs_scheduler_t *psched = pctx->scheduler;
1216 gs_context_t **ppctx = &psched->table[pctx->index % CTX_TABLE_SIZE];
1217
1218 while (*ppctx != pctx)
1219 ppctx = &(*ppctx)->table_next;
1220 *ppctx = (*ppctx)->table_next;
1221 if (gs_debug_c('\'') | gs_debug_c('"'))
1222 dlprintf3("[']destroy %ld at 0x%lx, status = %d\n",
1223 pctx->index, (ulong) pctx, pctx->status);
1224 if (!context_state_free(&pctx->state))
1225 gs_free_object((gs_memory_t *) mem, pctx, "context_destroy");
1226 }
1227
1228 /* Copy the top elements of one stack to another. */
1229 /* Note that this does not push the elements: */
1230 /* the destination stack must have enough space preallocated. */
1231 private void
stack_copy(ref_stack_t * to,const ref_stack_t * from,uint count,uint from_index)1232 stack_copy(ref_stack_t * to, const ref_stack_t * from, uint count,
1233 uint from_index)
1234 {
1235 long i;
1236
1237 for (i = (long)count - 1; i >= 0; --i)
1238 *ref_stack_index(to, i) = *ref_stack_index(from, i + from_index);
1239 }
1240
1241 /* Acquire a lock. Return 0 if acquired, o_reschedule if not. */
1242 private int
lock_acquire(os_ptr op,gs_context_t * pctx)1243 lock_acquire(os_ptr op, gs_context_t * pctx)
1244 {
1245 gs_lock_t *plock = r_ptr(op, gs_lock_t);
1246
1247 if (plock->holder_index == 0) {
1248 plock->holder_index = pctx->index;
1249 plock->scheduler = pctx->scheduler;
1250 return 0;
1251 }
1252 add_last(pctx->scheduler, &plock->waiting, pctx);
1253 return o_reschedule;
1254 }
1255
1256 /* Release a lock. Return 0 if OK, e_invalidcontext if not. */
1257 private int
lock_release(ref * op)1258 lock_release(ref * op)
1259 {
1260 gs_lock_t *plock = r_ptr(op, gs_lock_t);
1261 gs_scheduler_t *psched = plock->scheduler;
1262 gs_context_t *pctx = index_context(psched, plock->holder_index);
1263
1264 if (pctx != 0 && pctx == psched->current) {
1265 plock->holder_index = 0;
1266 activate_waiting(psched, &plock->waiting);
1267 return 0;
1268 }
1269 return_error(e_invalidcontext);
1270 }
1271
1272 /* ------ Initialization procedure ------ */
1273
1274 /* We need to split the table because of the 16-element limit. */
1275 const op_def zcontext1_op_defs[] = {
1276 {"0condition", zcondition},
1277 {"0currentcontext", zcurrentcontext},
1278 {"1detach", zdetach},
1279 {"2.fork", zfork},
1280 {"1join", zjoin},
1281 {"4.localfork", zlocalfork},
1282 {"0lock", zlock},
1283 {"2monitor", zmonitor},
1284 {"1notify", znotify},
1285 {"2wait", zwait},
1286 {"0yield", zyield},
1287 /* Note that the following replace prior definitions */
1288 /* in the indicated files: */
1289 {"0usertime", zusertime_context}, /* zmisc.c */
1290 op_def_end(0)
1291 };
1292 const op_def zcontext2_op_defs[] = {
1293 /* Internal operators */
1294 {"0%fork_done", fork_done},
1295 {"1%finish_join", finish_join},
1296 {"0%monitor_cleanup", monitor_cleanup},
1297 {"0%monitor_release", monitor_release},
1298 {"2%await_lock", await_lock},
1299 op_def_end(zcontext_init)
1300 };
1301