1 /* Copyright (C) 1989, 2000, 2001 Aladdin Enterprises. All rights reserved. 2 3 This file is part of AFPL Ghostscript. 4 5 AFPL Ghostscript is distributed with NO WARRANTY OF ANY KIND. No author or 6 distributor accepts any responsibility for the consequences of using it, or 7 for whether it serves any particular purpose or works at all, unless he or 8 she says so in writing. Refer to the Aladdin Free Public License (the 9 "License") for full details. 10 11 Every copy of AFPL Ghostscript must include a copy of the License, normally 12 in a plain ASCII text file named PUBLIC. The License grants you the right 13 to copy, modify and redistribute AFPL Ghostscript, but only under certain 14 conditions described in the License. Among other things, the License 15 requires that the copyright notice and this notice be preserved on all 16 copies. 17 */ 18 19 /*$Id: interp.c,v 1.8 2001/04/06 06:42:45 rayjj Exp $ */ 20 /* Ghostscript language interpreter */ 21 #include "memory_.h" 22 #include "string_.h" 23 #include "ghost.h" 24 #include "gsstruct.h" /* for iastruct.h */ 25 #include "stream.h" 26 #include "errors.h" 27 #include "estack.h" 28 #include "ialloc.h" 29 #include "iastruct.h" 30 #include "icontext.h" 31 #include "icremap.h" 32 #include "igstate.h" /* for handling e_RemapColor */ 33 #include "inamedef.h" 34 #include "iname.h" /* for the_name_table */ 35 #include "interp.h" 36 #include "ipacked.h" 37 #include "ostack.h" /* must precede iscan.h */ 38 #include "strimpl.h" /* for sfilter.h */ 39 #include "sfilter.h" /* for iscan.h */ 40 #include "iscan.h" 41 #include "iddict.h" 42 #include "isave.h" 43 #include "istack.h" 44 #include "itoken.h" 45 #include "iutil.h" /* for array_get */ 46 #include "ivmspace.h" 47 #include "dstack.h" 48 #include "files.h" /* for file_check_read */ 49 #include "oper.h" 50 #include "store.h" 51 52 /* 53 * We may or may not optimize the handling of the special fast operators 54 * in packed arrays. If we do this, they run much faster when packed, but 55 * slightly slower when not packed. 56 */ 57 #define PACKED_SPECIAL_OPS 1 58 59 /* 60 * Pseudo-operators (procedures of type t_oparray) record 61 * the operand and dictionary stack pointers, and restore them if an error 62 * occurs during the execution of the procedure and if the procedure hasn't 63 * (net) decreased the depth of the stack. While this obviously doesn't 64 * do all the work of restoring the state if a pseudo-operator gets an 65 * error, it's a big help. The only downside is that pseudo-operators run 66 * a little slower. 67 */ 68 69 /* GC descriptors for stacks */ 70 extern_st(st_ref_stack); 71 public_st_dict_stack(); 72 public_st_exec_stack(); 73 public_st_op_stack(); 74 75 /* 76 * The procedure to call if an operator requests rescheduling. 77 * This causes an error unless the context machinery has been installed. 78 */ 79 private int 80 no_reschedule(i_ctx_t **pi_ctx_p) 81 { 82 return_error(e_invalidcontext); 83 } 84 int (*gs_interp_reschedule_proc)(P1(i_ctx_t **)) = no_reschedule; 85 86 /* 87 * The procedure to call for time-slicing. 88 * This is a no-op unless the context machinery has been installed. 89 */ 90 int (*gs_interp_time_slice_proc)(P1(i_ctx_t **)) = 0; 91 92 /* 93 * The number of interpreter "ticks" between calls on the time_slice_proc. 94 * Currently, the clock ticks before each operator, and at each 95 * procedure return. 96 */ 97 int gs_interp_time_slice_ticks = 0x7fff; 98 99 /* 100 * Apply an operator. When debugging, we route all operator calls 101 * through a procedure. 102 */ 103 #ifdef DEBUG 104 private int 105 call_operator(op_proc_t op_proc, i_ctx_t *i_ctx_p) 106 { 107 int code = op_proc(i_ctx_p); 108 109 return code; 110 } 111 #else 112 # define call_operator(proc, p) ((*(proc))(p)) 113 #endif 114 115 /* Define debugging statistics. */ 116 #ifdef DEBUG 117 struct stats_interp_s { 118 long top; 119 long lit, lit_array, exec_array, exec_operator, exec_name; 120 long x_add, x_def, x_dup, x_exch, x_if, x_ifelse, 121 x_index, x_pop, x_roll, x_sub; 122 long find_name, name_lit, name_proc, name_oparray, name_operator; 123 long p_full, p_exec_operator, p_exec_oparray, p_exec_non_x_operator, 124 p_integer, p_lit_name, p_exec_name; 125 long p_find_name, p_name_lit, p_name_proc; 126 } stats_interp; 127 # define INCR(v) (++(stats_interp.v)) 128 #else 129 # define INCR(v) DO_NOTHING 130 #endif 131 132 /* Forward references */ 133 private int estack_underflow(P1(i_ctx_t *)); 134 private int interp(P3(i_ctx_t **, const ref *, ref *)); 135 private int interp_exit(P1(i_ctx_t *)); 136 private void set_gc_signal(P3(i_ctx_t *, int *, int)); 137 private int copy_stack(P3(i_ctx_t *, const ref_stack_t *, ref *)); 138 private int oparray_pop(P1(i_ctx_t *)); 139 private int oparray_cleanup(P1(i_ctx_t *)); 140 private int zsetstackprotect(P1(i_ctx_t *)); 141 private int zcurrentstackprotect(P1(i_ctx_t *)); 142 143 /* Stack sizes */ 144 145 /* The maximum stack sizes may all be set in the makefile. */ 146 147 /* 148 * Define the initial maximum size of the operand stack (MaxOpStack 149 * user parameter). 150 */ 151 #ifndef MAX_OSTACK 152 # define MAX_OSTACK 800 153 #endif 154 /* 155 * The minimum block size for extending the operand stack is the larger of: 156 * - the maximum number of parameters to an operator 157 * (currently setcolorscreen, with 12 parameters); 158 * - the maximum number of values pushed by an operator 159 * (currently setcolortransfer, which calls zcolor_remap_one 4 times 160 * and therefore pushes 16 values). 161 */ 162 #define MIN_BLOCK_OSTACK 16 163 const int gs_interp_max_op_num_args = MIN_BLOCK_OSTACK; /* for iinit.c */ 164 165 /* 166 * Define the initial maximum size of the execution stack (MaxExecStack 167 * user parameter). 168 */ 169 #ifndef MAX_ESTACK 170 # define MAX_ESTACK 5000 171 #endif 172 /* 173 * The minimum block size for extending the execution stack is the largest 174 * size of a contiguous block surrounding an e-stack mark. (At least, 175 * that's what the minimum value would be if we supported multi-block 176 * estacks, which we currently don't.) Currently, the largest such block is 177 * the one created for text processing, which is 8 (snumpush) slots. 178 */ 179 #define MIN_BLOCK_ESTACK 8 180 /* 181 * If we get an e-stack overflow, we need to cut it back far enough to 182 * have some headroom for executing the error procedure. 183 */ 184 #define ES_HEADROOM 20 185 186 /* 187 * Define the initial maximum size of the dictionary stack (MaxDictStack 188 * user parameter). Again, this is also currently the block size for 189 * extending the d-stack. 190 */ 191 #ifndef MAX_DSTACK 192 # define MAX_DSTACK 20 193 #endif 194 /* 195 * The minimum block size for extending the dictionary stack is the number 196 * of permanent entries on the dictionary stack, currently 3. 197 */ 198 #define MIN_BLOCK_DSTACK 3 199 200 /* See estack.h for a description of the execution stack. */ 201 202 /* The logic for managing icount and iref below assumes that */ 203 /* there are no control operators which pop and then push */ 204 /* information on the execution stack. */ 205 206 /* Stacks */ 207 extern_st(st_ref_stack); 208 #define OS_GUARD_UNDER 10 209 #define OS_GUARD_OVER 10 210 #define OS_REFS_SIZE(body_size)\ 211 (stack_block_refs + OS_GUARD_UNDER + (body_size) + OS_GUARD_OVER) 212 213 #define ES_GUARD_UNDER 1 214 #define ES_GUARD_OVER 10 215 #define ES_REFS_SIZE(body_size)\ 216 (stack_block_refs + ES_GUARD_UNDER + (body_size) + ES_GUARD_OVER) 217 218 #define DS_REFS_SIZE(body_size)\ 219 (stack_block_refs + (body_size)) 220 221 /* Extended types. The interpreter may replace the type of operators */ 222 /* in procedures with these, to speed up the interpretation loop. */ 223 /****** NOTE: If you add or change entries in this list, */ 224 /****** you must change the three dispatches in the interpreter loop. */ 225 /* The operator procedures are declared in opextern.h. */ 226 #define tx_op t_next_index 227 typedef enum { 228 tx_op_add = tx_op, 229 tx_op_def, 230 tx_op_dup, 231 tx_op_exch, 232 tx_op_if, 233 tx_op_ifelse, 234 tx_op_index, 235 tx_op_pop, 236 tx_op_roll, 237 tx_op_sub, 238 tx_next_op 239 } special_op_types; 240 241 #define num_special_ops ((int)tx_next_op - tx_op) 242 const int gs_interp_num_special_ops = num_special_ops; /* for iinit.c */ 243 const int tx_next_index = tx_next_op; 244 245 /* 246 * Define the interpreter operators, which include the extended-type 247 * operators defined in the list above. NOTE: if the size of this table 248 * ever exceeds 15 real entries, it will have to be split. 249 */ 250 const op_def interp_op_defs[] = { 251 /* 252 * The very first entry, which corresponds to operator index 0, 253 * must not contain an actual operator. 254 */ 255 op_def_begin_dict("systemdict"), 256 /* 257 * The next entries must be the extended-type operators, in the 258 * correct order. 259 */ 260 {"2add", zadd}, 261 {"2def", zdef}, 262 {"1dup", zdup}, 263 {"2exch", zexch}, 264 {"2if", zif}, 265 {"3ifelse", zifelse}, 266 {"1index", zindex}, 267 {"1pop", zpop}, 268 {"2roll", zroll}, 269 {"2sub", zsub}, 270 /* 271 * The remaining entries are internal operators. 272 */ 273 {"0.currentstackprotect", zcurrentstackprotect}, 274 {"1.setstackprotect", zsetstackprotect}, 275 {"0%interp_exit", interp_exit}, 276 {"0%oparray_pop", oparray_pop}, 277 op_def_end(0) 278 }; 279 280 #define make_null_proc(pref)\ 281 make_empty_const_array(pref, a_executable + a_readonly) 282 283 /* Initialize the interpreter. */ 284 int 285 gs_interp_init(i_ctx_t **pi_ctx_p, const ref *psystem_dict, 286 gs_dual_memory_t *dmem) 287 { 288 /* Create and initialize a context state. */ 289 gs_context_state_t *pcst = 0; 290 int code = context_state_alloc(&pcst, psystem_dict, dmem); 291 292 if (code >= 0) 293 code = context_state_load(pcst); 294 if (code < 0) 295 lprintf1("Fatal error %d in gs_interp_init!", code); 296 *pi_ctx_p = pcst; 297 return code; 298 } 299 /* 300 * Create initial stacks for the interpreter. 301 * We export this for creating new contexts. 302 */ 303 int 304 gs_interp_alloc_stacks(gs_ref_memory_t *mem, gs_context_state_t * pcst) 305 { 306 gs_ref_memory_t *smem = 307 (gs_ref_memory_t *)gs_memory_stable((gs_memory_t *)mem); 308 ref stk; 309 310 #define REFS_SIZE_OSTACK OS_REFS_SIZE(MAX_OSTACK) 311 #define REFS_SIZE_ESTACK ES_REFS_SIZE(MAX_ESTACK) 312 #define REFS_SIZE_DSTACK DS_REFS_SIZE(MAX_DSTACK) 313 gs_alloc_ref_array(smem, &stk, 0, 314 REFS_SIZE_OSTACK + REFS_SIZE_ESTACK + 315 REFS_SIZE_DSTACK, "gs_interp_alloc_stacks"); 316 317 { 318 ref_stack_t *pos = &pcst->op_stack.stack; 319 320 r_set_size(&stk, REFS_SIZE_OSTACK); 321 ref_stack_init(pos, &stk, OS_GUARD_UNDER, OS_GUARD_OVER, NULL, 322 smem, NULL); 323 ref_stack_set_error_codes(pos, e_stackunderflow, e_stackoverflow); 324 ref_stack_set_max_count(pos, MAX_OSTACK); 325 stk.value.refs += REFS_SIZE_OSTACK; 326 } 327 328 { 329 ref_stack_t *pes = &pcst->exec_stack.stack; 330 ref euop; 331 332 r_set_size(&stk, REFS_SIZE_ESTACK); 333 make_oper(&euop, 0, estack_underflow); 334 ref_stack_init(pes, &stk, ES_GUARD_UNDER, ES_GUARD_OVER, &euop, 335 smem, NULL); 336 ref_stack_set_error_codes(pes, e_ExecStackUnderflow, 337 e_execstackoverflow); 338 /**************** E-STACK EXPANSION IS NYI. ****************/ 339 ref_stack_allow_expansion(pes, false); 340 ref_stack_set_max_count(pes, MAX_ESTACK); 341 stk.value.refs += REFS_SIZE_ESTACK; 342 } 343 344 { 345 ref_stack_t *pds = &pcst->dict_stack.stack; 346 347 r_set_size(&stk, REFS_SIZE_DSTACK); 348 ref_stack_init(pds, &stk, 0, 0, NULL, smem, NULL); 349 ref_stack_set_error_codes(pds, e_dictstackunderflow, 350 e_dictstackoverflow); 351 ref_stack_set_max_count(pds, MAX_DSTACK); 352 } 353 354 #undef REFS_SIZE_OSTACK 355 #undef REFS_SIZE_ESTACK 356 #undef REFS_SIZE_DSTACK 357 return 0; 358 } 359 /* 360 * Free the stacks when destroying a context. This is the inverse of 361 * create_stacks. 362 */ 363 void 364 gs_interp_free_stacks(gs_ref_memory_t * smem, gs_context_state_t * pcst) 365 { 366 /* Free the stacks in inverse order of allocation. */ 367 ref_stack_release(&pcst->dict_stack.stack); 368 ref_stack_release(&pcst->exec_stack.stack); 369 ref_stack_release(&pcst->op_stack.stack); 370 } 371 void 372 gs_interp_reset(i_ctx_t *i_ctx_p) 373 { /* Reset the stacks. */ 374 ref_stack_clear(&o_stack); 375 ref_stack_clear(&e_stack); 376 esp++; 377 make_oper(esp, 0, interp_exit); 378 ref_stack_pop_to(&d_stack, min_dstack_size); 379 dict_set_top(); 380 } 381 /* Report an e-stack block underflow. The bottom guard slots of */ 382 /* e-stack blocks contain a pointer to this procedure. */ 383 private int 384 estack_underflow(i_ctx_t *i_ctx_p) 385 { 386 return e_ExecStackUnderflow; 387 } 388 389 /* 390 * Create an operator during initialization. 391 * If operator is hard-coded into the interpreter, 392 * assign it a special type and index. 393 */ 394 void 395 gs_interp_make_oper(ref * opref, op_proc_t proc, int idx) 396 { 397 int i; 398 399 for (i = num_special_ops; i > 0 && proc != interp_op_defs[i].proc; --i) 400 DO_NOTHING; 401 if (i > 0) 402 make_tasv(opref, tx_op + (i - 1), a_executable, i, opproc, proc); 403 else 404 make_tasv(opref, t_operator, a_executable, idx, opproc, proc); 405 } 406 407 /* 408 * Call the garbage collector, updating the context pointer properly. 409 */ 410 private int 411 interp_reclaim(i_ctx_t **pi_ctx_p, int space) 412 { 413 i_ctx_t *i_ctx_p = *pi_ctx_p; 414 gs_gc_root_t ctx_root; 415 int code; 416 417 gs_register_struct_root(imemory_system, &ctx_root, 418 (void **)pi_ctx_p, "interp_reclaim(pi_ctx_p)"); 419 code = (*idmemory->reclaim)(idmemory, space); 420 i_ctx_p = *pi_ctx_p; /* may have moved */ 421 gs_unregister_root(imemory_system, &ctx_root, "interp_reclaim(pi_ctx_p)"); 422 return code; 423 } 424 425 /* 426 * Invoke the interpreter. If execution completes normally, return 0. 427 * If an error occurs, the action depends on user_errors as follows: 428 * user_errors < 0: always return an error code. 429 * user_errors >= 0: let the PostScript machinery handle all errors. 430 * (This will eventually result in a fatal error if no 'stopped' 431 * is active.) 432 * In case of a quit or a fatal error, also store the exit code. 433 * Set *perror_object to null or the error object. 434 */ 435 private int gs_call_interp(P5(i_ctx_t **, ref *, int, int *, ref *)); 436 int 437 gs_interpret(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, int *pexit_code, 438 ref * perror_object) 439 { 440 i_ctx_t *i_ctx_p = *pi_ctx_p; 441 gs_gc_root_t error_root; 442 int code; 443 444 gs_register_ref_root(imemory_system, &error_root, 445 (void **)&perror_object, "gs_interpret"); 446 code = gs_call_interp(pi_ctx_p, pref, user_errors, pexit_code, 447 perror_object); 448 i_ctx_p = *pi_ctx_p; 449 gs_unregister_root(imemory_system, &error_root, "gs_interpret"); 450 /* Avoid a dangling reference to a stack-allocated GC signal. */ 451 set_gc_signal(i_ctx_p, NULL, 0); 452 return code; 453 } 454 private int 455 gs_call_interp(i_ctx_t **pi_ctx_p, ref * pref, int user_errors, 456 int *pexit_code, ref * perror_object) 457 { 458 ref *epref = pref; 459 ref doref; 460 ref *perrordict; 461 ref error_name; 462 int code, ccode; 463 ref saref; 464 int gc_signal = 0; 465 i_ctx_t *i_ctx_p = *pi_ctx_p; 466 467 *pexit_code = 0; 468 ialloc_reset_requested(idmemory); 469 again: 470 /* Avoid a dangling error object that might get traced by a future GC. */ 471 make_null(perror_object); 472 o_stack.requested = e_stack.requested = d_stack.requested = 0; 473 while (gc_signal) { /* Some routine below triggered a GC. */ 474 gs_gc_root_t epref_root; 475 476 gc_signal = 0; 477 /* Make sure that doref will get relocated properly if */ 478 /* a garbage collection happens with epref == &doref. */ 479 gs_register_ref_root(imemory_system, &epref_root, 480 (void **)&epref, "gs_call_interp(epref)"); 481 code = interp_reclaim(pi_ctx_p, -1); 482 i_ctx_p = *pi_ctx_p; 483 gs_unregister_root(imemory_system, &epref_root, 484 "gs_call_interp(epref)"); 485 if (code < 0) 486 return code; 487 } 488 code = interp(pi_ctx_p, epref, perror_object); 489 i_ctx_p = *pi_ctx_p; 490 /* Prevent a dangling reference to the GC signal in ticks_left */ 491 /* in the frame of interp, but be prepared to do a GC if */ 492 /* an allocation in this routine asks for it. */ 493 set_gc_signal(i_ctx_p, &gc_signal, 1); 494 if (esp < esbot) /* popped guard entry */ 495 esp = esbot; 496 switch (code) { 497 case e_Fatal: 498 *pexit_code = 255; 499 return code; 500 case e_Quit: 501 *perror_object = osp[-1]; 502 *pexit_code = code = osp->value.intval; 503 osp -= 2; 504 return 505 (code == 0 ? e_Quit : 506 code < 0 && code > -100 ? code : e_Fatal); 507 case e_InterpreterExit: 508 return 0; 509 case e_ExecStackUnderflow: 510 /****** WRONG -- must keep mark blocks intact ******/ 511 ref_stack_pop_block(&e_stack); 512 doref = *perror_object; 513 epref = &doref; 514 goto again; 515 case e_VMreclaim: 516 /* Do the GC and continue. */ 517 code = interp_reclaim(pi_ctx_p, 518 (osp->value.intval == 2 ? 519 avm_global : avm_local)); 520 i_ctx_p = *pi_ctx_p; 521 /****** What if code < 0? ******/ 522 make_oper(&doref, 0, zpop); 523 epref = &doref; 524 goto again; 525 case e_NeedInput: 526 case e_NeedStdin: 527 case e_NeedStdout: 528 case e_NeedStderr: 529 return code; 530 } 531 /* Adjust osp in case of operand stack underflow */ 532 if (osp < osbot - 1) 533 osp = osbot - 1; 534 /* We have to handle stack over/underflow specially, because */ 535 /* we might be able to recover by adding or removing a block. */ 536 switch (code) { 537 case e_dictstackoverflow: 538 if (ref_stack_extend(&d_stack, d_stack.requested) >= 0) { 539 dict_set_top(); 540 doref = *perror_object; 541 epref = &doref; 542 goto again; 543 } 544 if (osp >= ostop) { 545 if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) 546 return ccode; 547 } 548 ccode = copy_stack(i_ctx_p, &d_stack, &saref); 549 if (ccode < 0) 550 return ccode; 551 ref_stack_pop_to(&d_stack, min_dstack_size); 552 dict_set_top(); 553 *++osp = saref; 554 break; 555 case e_dictstackunderflow: 556 if (ref_stack_pop_block(&d_stack) >= 0) { 557 dict_set_top(); 558 doref = *perror_object; 559 epref = &doref; 560 goto again; 561 } 562 break; 563 case e_execstackoverflow: 564 /* We don't have to handle this specially: */ 565 /* The only places that could generate it */ 566 /* use check_estack, which does a ref_stack_extend, */ 567 /* so if we get this error, it's a real one. */ 568 if (osp >= ostop) { 569 if ((ccode = ref_stack_extend(&o_stack, 1)) < 0) 570 return ccode; 571 } 572 ccode = copy_stack(i_ctx_p, &e_stack, &saref); 573 if (ccode < 0) 574 return ccode; 575 { 576 uint count = ref_stack_count(&e_stack); 577 uint limit = ref_stack_max_count(&e_stack) - ES_HEADROOM; 578 579 if (count > limit) { 580 /* 581 * If there is an e-stack mark within MIN_BLOCK_ESTACK of 582 * the new top, cut the stack back to remove the mark. 583 */ 584 int skip = count - limit; 585 int i; 586 587 for (i = skip; i < skip + MIN_BLOCK_ESTACK; ++i) { 588 const ref *ep = ref_stack_index(&e_stack, i); 589 590 if (r_has_type_attrs(ep, t_null, a_executable)) { 591 skip = i + 1; 592 break; 593 } 594 } 595 pop_estack(i_ctx_p, skip); 596 } 597 } 598 *++osp = saref; 599 break; 600 case e_stackoverflow: 601 if (ref_stack_extend(&o_stack, o_stack.requested) >= 0) { /* We can't just re-execute the object, because */ 602 /* it might be a procedure being pushed as a */ 603 /* literal. We check for this case specially. */ 604 doref = *perror_object; 605 if (r_is_proc(&doref)) { 606 *++osp = doref; 607 make_null_proc(&doref); 608 } 609 epref = &doref; 610 goto again; 611 } 612 ccode = copy_stack(i_ctx_p, &o_stack, &saref); 613 if (ccode < 0) 614 return ccode; 615 ref_stack_clear(&o_stack); 616 *++osp = saref; 617 break; 618 case e_stackunderflow: 619 if (ref_stack_pop_block(&o_stack) >= 0) { 620 doref = *perror_object; 621 epref = &doref; 622 goto again; 623 } 624 break; 625 } 626 if (user_errors < 0) 627 return code; 628 if (gs_errorname(i_ctx_p, code, &error_name) < 0) 629 return code; /* out-of-range error code! */ 630 if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || 631 dict_find(perrordict, &error_name, &epref) <= 0 632 ) 633 return code; /* error name not in errordict??? */ 634 doref = *epref; 635 epref = &doref; 636 /* Push the error object on the operand stack if appropriate. */ 637 if (!ERROR_IS_INTERRUPT(code)) 638 *++osp = *perror_object; 639 goto again; 640 } 641 private int 642 interp_exit(i_ctx_t *i_ctx_p) 643 { 644 return e_InterpreterExit; 645 } 646 647 /* Set the GC signal for all VMs. */ 648 private void 649 set_gc_signal(i_ctx_t *i_ctx_p, int *psignal, int value) 650 { 651 gs_memory_gc_status_t stat; 652 int i; 653 654 for (i = 0; i < countof(idmemory->spaces_indexed); i++) { 655 gs_ref_memory_t *mem = idmemory->spaces_indexed[i]; 656 gs_ref_memory_t *mem_stable; 657 658 if (mem == 0) 659 continue; 660 for (;; mem = mem_stable) { 661 mem_stable = (gs_ref_memory_t *) 662 gs_memory_stable((gs_memory_t *)mem); 663 gs_memory_gc_status(mem, &stat); 664 stat.psignal = psignal; 665 stat.signal_value = value; 666 gs_memory_set_gc_status(mem, &stat); 667 if (mem_stable == mem) 668 break; 669 } 670 } 671 } 672 673 /* Copy the contents of an overflowed stack into a (local) array. */ 674 private int 675 copy_stack(i_ctx_t *i_ctx_p, const ref_stack_t * pstack, ref * arr) 676 { 677 uint size = ref_stack_count(pstack); 678 uint save_space = ialloc_space(idmemory); 679 int code; 680 681 ialloc_set_space(idmemory, avm_local); 682 code = ialloc_ref_array(arr, a_all, size, "copy_stack"); 683 if (code >= 0) 684 code = ref_stack_store(pstack, arr, size, 0, 1, true, idmemory, 685 "copy_stack"); 686 ialloc_set_space(idmemory, save_space); 687 return code; 688 } 689 690 /* Get the name corresponding to an error number. */ 691 int 692 gs_errorname(i_ctx_t *i_ctx_p, int code, ref * perror_name) 693 { 694 ref *perrordict, *pErrorNames; 695 696 if (dict_find_string(systemdict, "errordict", &perrordict) <= 0 || 697 dict_find_string(systemdict, "ErrorNames", &pErrorNames) <= 0 698 ) 699 return_error(e_undefined); /* errordict or ErrorNames not found?! */ 700 return array_get(pErrorNames, (long)(-code - 1), perror_name); 701 } 702 703 /* Store an error string in $error.errorinfo. */ 704 /* This routine is here because of the proximity to the error handler. */ 705 int 706 gs_errorinfo_put_string(i_ctx_t *i_ctx_p, const char *str) 707 { 708 ref rstr; 709 ref *pderror; 710 int code = string_to_ref(str, &rstr, iimemory, "gs_errorinfo_put_string"); 711 712 if (code < 0) 713 return code; 714 if (dict_find_string(systemdict, "$error", &pderror) <= 0 || 715 !r_has_type(pderror, t_dictionary) || 716 idict_put_string(pderror, "errorinfo", &rstr) < 0 717 ) 718 return_error(e_Fatal); 719 return 0; 720 } 721 722 /* Main interpreter. */ 723 /* If execution terminates normally, return e_InterpreterExit. */ 724 /* If an error occurs, leave the current object in *perror_object */ 725 /* and return a (negative) error code. */ 726 private int 727 interp(i_ctx_t **pi_ctx_p /* context for execution, updated if resched */, 728 const ref * pref /* object to interpret */, 729 ref * perror_object) 730 { 731 i_ctx_t *i_ctx_p = *pi_ctx_p; 732 /* 733 * Note that iref may actually be either a ref * or a ref_packed *. 734 * Certain DEC compilers assume that a ref * is ref-aligned even if it 735 * is cast to a short *, and generate code on this assumption, leading 736 * to "unaligned access" errors. For this reason, we declare 737 * iref_packed, and use a macro to cast it to the more aligned type 738 * where necessary (which is almost everywhere it is used). This may 739 * lead to compiler warnings about "cast increases alignment 740 * requirements", but this is less harmful than expensive traps at run 741 * time. 742 */ 743 register const ref_packed *iref_packed = (const ref_packed *)pref; 744 /* 745 * To make matters worse, some versions of gcc/egcs have a bug that 746 * leads them to assume that if iref_packed is EVER cast to a ref *, 747 * it is ALWAYS ref-aligned. We detect this in stdpre.h and provide 748 * the following workaround: 749 */ 750 #ifdef ALIGNMENT_ALIASING_BUG 751 const ref *iref_temp; 752 # define IREF (iref_temp = (const ref *)iref_packed, iref_temp) 753 #else 754 # define IREF ((const ref *)iref_packed) 755 #endif 756 #define SET_IREF(rp) (iref_packed = (const ref_packed *)(rp)) 757 register int icount = 0; /* # of consecutive tokens at iref */ 758 register os_ptr iosp = osp; /* private copy of osp */ 759 register es_ptr iesp = esp; /* private copy of esp */ 760 int code; 761 ref token; /* token read from file or string, */ 762 /* must be declared in this scope */ 763 register const ref *pvalue; 764 os_ptr whichp; 765 766 /* 767 * We have to make the error information into a struct; 768 * otherwise, the Watcom compiler will assign it to registers 769 * strictly on the basis of textual frequency. 770 * We also have to use ref_assign_inline everywhere, and 771 * avoid direct assignments of refs, so that esi and edi 772 * will remain available on Intel processors. 773 */ 774 struct interp_error_s { 775 int code; 776 int line; 777 const ref *obj; 778 ref full; 779 } ierror; 780 781 /* 782 * Get a pointer to the name table so that we can use the 783 * inline version of name_index_ref. 784 */ 785 const name_table *const int_nt = the_name_table(); 786 787 #define set_error(ecode)\ 788 { ierror.code = ecode; ierror.line = __LINE__; } 789 #define return_with_error(ecode, objp)\ 790 { set_error(ecode); ierror.obj = objp; goto rwe; } 791 #define return_with_error_iref(ecode)\ 792 { set_error(ecode); goto rwei; } 793 #define return_with_code_iref()\ 794 { ierror.line = __LINE__; goto rweci; } 795 #define return_with_error_code_op(nargs)\ 796 return_with_code_iref() 797 #define return_with_stackoverflow(objp)\ 798 { o_stack.requested = 1; return_with_error(e_stackoverflow, objp); } 799 #define return_with_stackoverflow_iref()\ 800 { o_stack.requested = 1; return_with_error_iref(e_stackoverflow); } 801 int ticks_left = gs_interp_time_slice_ticks; 802 803 /* 804 * If we exceed the VMThreshold, set ticks_left to -1 805 * to alert the interpreter that we need to garbage collect. 806 */ 807 set_gc_signal(i_ctx_p, &ticks_left, -100); 808 809 esfile_clear_cache(); 810 /* 811 * From here on, if icount > 0, iref and icount correspond 812 * to the top entry on the execution stack: icount is the count 813 * of sequential entries remaining AFTER the current one. 814 */ 815 #define IREF_NEXT(ip)\ 816 ((const ref_packed *)((const ref *)(ip) + 1)) 817 #define IREF_NEXT_EITHER(ip)\ 818 ( r_is_packed(ip) ? (ip) + 1 : IREF_NEXT(ip) ) 819 #define store_state(ep)\ 820 ( icount > 0 ? (ep->value.const_refs = IREF + 1, r_set_size(ep, icount)) : 0 ) 821 #define store_state_short(ep)\ 822 ( icount > 0 ? (ep->value.packed = iref_packed + 1, r_set_size(ep, icount)) : 0 ) 823 #define store_state_either(ep)\ 824 ( icount > 0 ? (ep->value.packed = IREF_NEXT_EITHER(iref_packed), r_set_size(ep, icount)) : 0 ) 825 #define next()\ 826 if ( --icount > 0 ) { iref_packed = IREF_NEXT(iref_packed); goto top; } else goto out 827 #define next_short()\ 828 if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ 829 ++iref_packed; goto top 830 #define next_either()\ 831 if ( --icount <= 0 ) { if ( icount < 0 ) goto up; iesp--; }\ 832 iref_packed = IREF_NEXT_EITHER(iref_packed); goto top 833 834 #if !PACKED_SPECIAL_OPS 835 # undef next_either 836 # define next_either() next() 837 # undef store_state_either 838 # define store_state_either(ep) store_state(ep) 839 #endif 840 841 /* We want to recognize executable arrays here, */ 842 /* so we push the argument on the estack and enter */ 843 /* the loop at the bottom. */ 844 if (iesp >= estop) 845 return_with_error(e_execstackoverflow, pref); 846 ++iesp; 847 ref_assign_inline(iesp, pref); 848 goto bot; 849 top: 850 /* 851 * This is the top of the interpreter loop. 852 * iref points to the ref being interpreted. 853 * Note that this might be an element of a packed array, 854 * not a real ref: we carefully arranged the first 16 bits of 855 * a ref and of a packed array element so they could be distinguished 856 * from each other. (See ghost.h and packed.h for more detail.) 857 */ 858 INCR(top); 859 #ifdef DEBUG 860 /* Do a little validation on the top o-stack entry. */ 861 if (iosp >= osbot && 862 (r_type(iosp) == t__invalid || r_type(iosp) >= tx_next_op) 863 ) { 864 lprintf("Invalid value on o-stack!\n"); 865 return_with_error_iref(e_Fatal); 866 } 867 if (gs_debug['I'] || 868 (gs_debug['i'] && 869 (r_is_packed(iref_packed) ? 870 r_packed_is_name(iref_packed) : 871 r_has_type(IREF, t_name))) 872 ) { 873 void debug_print_ref(P1(const ref *)); 874 os_ptr save_osp = osp; /* avoid side-effects */ 875 es_ptr save_esp = esp; 876 877 osp = iosp; 878 esp = iesp; 879 dlprintf5("d%u,e%u<%u>0x%lx(%d): ", 880 ref_stack_count(&d_stack), ref_stack_count(&e_stack), 881 ref_stack_count(&o_stack), (ulong)IREF, icount); 882 debug_print_ref(IREF); 883 if (iosp >= osbot) { 884 dputs(" // "); 885 debug_print_ref(iosp); 886 } 887 dputc('\n'); 888 osp = save_osp; 889 esp = save_esp; 890 fflush(dstderr); 891 } 892 #endif 893 /* Objects that have attributes (arrays, dictionaries, files, and strings) */ 894 /* use lit and exec; other objects use plain and plain_exec. */ 895 #define lit(t) type_xe_value(t, a_execute) 896 #define exec(t) type_xe_value(t, a_execute + a_executable) 897 #define nox(t) type_xe_value(t, 0) 898 #define nox_exec(t) type_xe_value(t, a_executable) 899 #define plain(t) type_xe_value(t, 0) 900 #define plain_exec(t) type_xe_value(t, a_executable) 901 /* 902 * We have to populate enough cases of the switch statement to force 903 * some compilers to use a dispatch rather than a testing loop. 904 * What a nuisance! 905 */ 906 switch (r_type_xe(iref_packed)) { 907 /* Access errors. */ 908 #define cases_invalid()\ 909 case plain(t__invalid): case plain_exec(t__invalid) 910 cases_invalid(): 911 return_with_error_iref(e_Fatal); 912 #define cases_nox()\ 913 case nox_exec(t_array): case nox_exec(t_dictionary):\ 914 case nox_exec(t_file): case nox_exec(t_string):\ 915 case nox_exec(t_mixedarray): case nox_exec(t_shortarray) 916 cases_nox(): 917 return_with_error_iref(e_invalidaccess); 918 /* 919 * Literal objects. We have to enumerate all the types. 920 * In fact, we have to include some extra plain_exec entries 921 * just to populate the switch. We break them up into groups 922 * to avoid overflowing some preprocessors. 923 */ 924 #define cases_lit_1()\ 925 case lit(t_array): case nox(t_array):\ 926 case plain(t_boolean): case plain_exec(t_boolean):\ 927 case lit(t_dictionary): case nox(t_dictionary) 928 #define cases_lit_2()\ 929 case lit(t_file): case nox(t_file):\ 930 case plain(t_fontID): case plain_exec(t_fontID):\ 931 case plain(t_integer): case plain_exec(t_integer):\ 932 case plain(t_mark): case plain_exec(t_mark) 933 #define cases_lit_3()\ 934 case plain(t_name):\ 935 case plain(t_null):\ 936 case plain(t_oparray):\ 937 case plain(t_operator) 938 #define cases_lit_4()\ 939 case plain(t_real): case plain_exec(t_real):\ 940 case plain(t_save): case plain_exec(t_save):\ 941 case lit(t_string): case nox(t_string) 942 #define cases_lit_5()\ 943 case lit(t_mixedarray): case nox(t_mixedarray):\ 944 case lit(t_shortarray): case nox(t_shortarray):\ 945 case plain(t_device): case plain_exec(t_device):\ 946 case plain(t_struct): case plain_exec(t_struct):\ 947 case plain(t_astruct): case plain_exec(t_astruct) 948 /* Executable arrays are treated as literals in direct execution. */ 949 #define cases_lit_array()\ 950 case exec(t_array): case exec(t_mixedarray): case exec(t_shortarray) 951 cases_lit_1(): 952 cases_lit_2(): 953 cases_lit_3(): 954 cases_lit_4(): 955 cases_lit_5(): 956 INCR(lit); 957 break; 958 cases_lit_array(): 959 INCR(lit_array); 960 break; 961 /* Special operators. */ 962 case plain_exec(tx_op_add): 963 x_add: INCR(x_add); 964 if ((code = zop_add(iosp)) < 0) 965 return_with_error_code_op(2); 966 iosp--; 967 next_either(); 968 case plain_exec(tx_op_def): 969 x_def: INCR(x_def); 970 osp = iosp; /* sync o_stack */ 971 if ((code = zop_def(i_ctx_p)) < 0) 972 return_with_error_code_op(2); 973 iosp -= 2; 974 next_either(); 975 case plain_exec(tx_op_dup): 976 x_dup: INCR(x_dup); 977 if (iosp < osbot) 978 return_with_error_iref(e_stackunderflow); 979 if (iosp >= ostop) 980 return_with_stackoverflow_iref(); 981 iosp++; 982 ref_assign_inline(iosp, iosp - 1); 983 next_either(); 984 case plain_exec(tx_op_exch): 985 x_exch: INCR(x_exch); 986 if (iosp <= osbot) 987 return_with_error_iref(e_stackunderflow); 988 ref_assign_inline(&token, iosp); 989 ref_assign_inline(iosp, iosp - 1); 990 ref_assign_inline(iosp - 1, &token); 991 next_either(); 992 case plain_exec(tx_op_if): 993 x_if: INCR(x_if); 994 if (!r_has_type(iosp - 1, t_boolean)) 995 return_with_error_iref((iosp <= osbot ? 996 e_stackunderflow : e_typecheck)); 997 if (!r_is_proc(iosp)) 998 return_with_error_iref(check_proc_failed(iosp)); 999 if (!iosp[-1].value.boolval) { 1000 iosp -= 2; 1001 next_either(); 1002 } 1003 if (iesp >= estop) 1004 return_with_error_iref(e_execstackoverflow); 1005 store_state_either(iesp); 1006 whichp = iosp; 1007 iosp -= 2; 1008 goto ifup; 1009 case plain_exec(tx_op_ifelse): 1010 x_ifelse: INCR(x_ifelse); 1011 if (!r_has_type(iosp - 2, t_boolean)) 1012 return_with_error_iref((iosp < osbot + 2 ? 1013 e_stackunderflow : e_typecheck)); 1014 if (!r_is_proc(iosp - 1)) 1015 return_with_error_iref(check_proc_failed(iosp - 1)); 1016 if (!r_is_proc(iosp)) 1017 return_with_error_iref(check_proc_failed(iosp)); 1018 if (iesp >= estop) 1019 return_with_error_iref(e_execstackoverflow); 1020 store_state_either(iesp); 1021 whichp = (iosp[-2].value.boolval ? iosp - 1 : iosp); 1022 iosp -= 3; 1023 /* Open code "up" for the array case(s) */ 1024 ifup:if ((icount = r_size(whichp) - 1) <= 0) { 1025 if (icount < 0) 1026 goto up; /* 0-element proc */ 1027 SET_IREF(whichp->value.refs); /* 1-element proc */ 1028 if (--ticks_left > 0) 1029 goto top; 1030 } 1031 ++iesp; 1032 /* Do a ref_assign, but also set iref. */ 1033 iesp->tas = whichp->tas; 1034 SET_IREF(iesp->value.refs = whichp->value.refs); 1035 if (--ticks_left > 0) 1036 goto top; 1037 goto slice; 1038 case plain_exec(tx_op_index): 1039 x_index: INCR(x_index); 1040 osp = iosp; /* zindex references o_stack */ 1041 if ((code = zindex(i_ctx_p)) < 0) 1042 return_with_error_code_op(1); 1043 next_either(); 1044 case plain_exec(tx_op_pop): 1045 x_pop: INCR(x_pop); 1046 if (iosp < osbot) 1047 return_with_error_iref(e_stackunderflow); 1048 iosp--; 1049 next_either(); 1050 case plain_exec(tx_op_roll): 1051 x_roll: INCR(x_roll); 1052 osp = iosp; /* zroll references o_stack */ 1053 if ((code = zroll(i_ctx_p)) < 0) 1054 return_with_error_code_op(2); 1055 iosp -= 2; 1056 next_either(); 1057 case plain_exec(tx_op_sub): 1058 x_sub: INCR(x_sub); 1059 if ((code = zop_sub(iosp)) < 0) 1060 return_with_error_code_op(2); 1061 iosp--; 1062 next_either(); 1063 /* Executable types. */ 1064 case plain_exec(t_null): 1065 goto bot; 1066 case plain_exec(t_oparray): 1067 /* Replace with the definition and go again. */ 1068 INCR(exec_array); 1069 pvalue = IREF->value.const_refs; 1070 opst: /* Prepare to call a t_oparray procedure in *pvalue. */ 1071 store_state(iesp); 1072 oppr: /* Record the stack depths in case of failure. */ 1073 if (iesp >= estop - 3) 1074 return_with_error_iref(e_execstackoverflow); 1075 iesp += 4; 1076 osp = iosp; /* ref_stack_count_inline needs this */ 1077 make_mark_estack(iesp - 3, es_other, oparray_cleanup); 1078 make_int(iesp - 2, ref_stack_count_inline(&o_stack)); 1079 make_int(iesp - 1, ref_stack_count_inline(&d_stack)); 1080 make_op_estack(iesp, oparray_pop); 1081 goto pr; 1082 prst: /* Prepare to call the procedure (array) in *pvalue. */ 1083 store_state(iesp); 1084 pr: /* Call the array in *pvalue. State has been stored. */ 1085 if ((icount = r_size(pvalue) - 1) <= 0) { 1086 if (icount < 0) 1087 goto up; /* 0-element proc */ 1088 SET_IREF(pvalue->value.refs); /* 1-element proc */ 1089 if (--ticks_left > 0) 1090 goto top; 1091 } 1092 if (iesp >= estop) 1093 return_with_error_iref(e_execstackoverflow); 1094 ++iesp; 1095 /* Do a ref_assign, but also set iref. */ 1096 iesp->tas = pvalue->tas; 1097 SET_IREF(iesp->value.refs = pvalue->value.refs); 1098 if (--ticks_left > 0) 1099 goto top; 1100 goto slice; 1101 case plain_exec(t_operator): 1102 INCR(exec_operator); 1103 if (--ticks_left <= 0) { /* The following doesn't work, */ 1104 /* and I can't figure out why. */ 1105 /****** goto sst; ******/ 1106 } 1107 esp = iesp; /* save for operator */ 1108 osp = iosp; /* ditto */ 1109 /* Operator routines take osp as an argument. */ 1110 /* This is just a convenience, since they adjust */ 1111 /* osp themselves to reflect the results. */ 1112 /* Operators that (net) push information on the */ 1113 /* operand stack must check for overflow: */ 1114 /* this normally happens automatically through */ 1115 /* the push macro (in oper.h). */ 1116 /* Operators that do not typecheck their operands, */ 1117 /* or take a variable number of arguments, */ 1118 /* must check explicitly for stack underflow. */ 1119 /* (See oper.h for more detail.) */ 1120 /* Note that each case must set iosp = osp: */ 1121 /* this is so we can switch on code without having to */ 1122 /* store it and reload it (for dumb compilers). */ 1123 switch (code = call_operator(real_opproc(IREF), i_ctx_p)) { 1124 case 0: /* normal case */ 1125 case 1: /* alternative success case */ 1126 iosp = osp; 1127 next(); 1128 case o_push_estack: /* store the state and go to up */ 1129 store_state(iesp); 1130 opush:iosp = osp; 1131 iesp = esp; 1132 if (--ticks_left > 0) 1133 goto up; 1134 goto slice; 1135 case o_pop_estack: /* just go to up */ 1136 opop:iosp = osp; 1137 if (esp == iesp) 1138 goto bot; 1139 iesp = esp; 1140 goto up; 1141 case o_reschedule: 1142 store_state(iesp); 1143 goto res; 1144 case e_RemapColor: 1145 oe_remap: store_state(iesp); 1146 remap: if (iesp + 2 >= estop) { 1147 esp = iesp; 1148 code = ref_stack_extend(&e_stack, 2); 1149 if (code < 0) 1150 return_with_error_iref(code); 1151 iesp = esp; 1152 } 1153 packed_get(iref_packed, iesp + 1); 1154 make_oper(iesp + 2, 0, 1155 r_ptr(&istate->remap_color_info, 1156 int_remap_color_info_t)->proc); 1157 iesp += 2; 1158 goto up; 1159 } 1160 iosp = osp; 1161 iesp = esp; 1162 return_with_code_iref(); 1163 case plain_exec(t_name): 1164 INCR(exec_name); 1165 pvalue = IREF->value.pname->pvalue; 1166 if (!pv_valid(pvalue)) { 1167 uint nidx = names_index(int_nt, IREF); 1168 uint htemp; 1169 1170 INCR(find_name); 1171 if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) 1172 return_with_error_iref(e_undefined); 1173 } 1174 /* Dispatch on the type of the value. */ 1175 /* Again, we have to over-populate the switch. */ 1176 switch (r_type_xe(pvalue)) { 1177 cases_invalid(): 1178 return_with_error_iref(e_Fatal); 1179 cases_nox(): /* access errors */ 1180 return_with_error_iref(e_invalidaccess); 1181 cases_lit_1(): 1182 cases_lit_2(): 1183 cases_lit_3(): 1184 cases_lit_4(): 1185 cases_lit_5(): 1186 INCR(name_lit); 1187 /* Just push the value */ 1188 if (iosp >= ostop) 1189 return_with_stackoverflow(pvalue); 1190 ++iosp; 1191 ref_assign_inline(iosp, pvalue); 1192 next(); 1193 case exec(t_array): 1194 case exec(t_mixedarray): 1195 case exec(t_shortarray): 1196 INCR(name_proc); 1197 /* This is an executable procedure, execute it. */ 1198 goto prst; 1199 case plain_exec(tx_op_add): 1200 goto x_add; 1201 case plain_exec(tx_op_def): 1202 goto x_def; 1203 case plain_exec(tx_op_dup): 1204 goto x_dup; 1205 case plain_exec(tx_op_exch): 1206 goto x_exch; 1207 case plain_exec(tx_op_if): 1208 goto x_if; 1209 case plain_exec(tx_op_ifelse): 1210 goto x_ifelse; 1211 case plain_exec(tx_op_index): 1212 goto x_index; 1213 case plain_exec(tx_op_pop): 1214 goto x_pop; 1215 case plain_exec(tx_op_roll): 1216 goto x_roll; 1217 case plain_exec(tx_op_sub): 1218 goto x_sub; 1219 case plain_exec(t_null): 1220 goto bot; 1221 case plain_exec(t_oparray): 1222 INCR(name_oparray); 1223 pvalue = (const ref *)pvalue->value.const_refs; 1224 goto opst; 1225 case plain_exec(t_operator): 1226 INCR(name_operator); 1227 { /* Shortcut for operators. */ 1228 /* See above for the logic. */ 1229 if (--ticks_left <= 0) { /* The following doesn't work, */ 1230 /* and I can't figure out why. */ 1231 /****** goto sst; ******/ 1232 } 1233 esp = iesp; 1234 osp = iosp; 1235 switch (code = call_operator(real_opproc(pvalue), 1236 i_ctx_p) 1237 ) { 1238 case 0: /* normal case */ 1239 case 1: /* alternative success case */ 1240 iosp = osp; 1241 next(); 1242 case o_push_estack: 1243 store_state(iesp); 1244 goto opush; 1245 case o_pop_estack: 1246 goto opop; 1247 case o_reschedule: 1248 store_state(iesp); 1249 goto res; 1250 case e_RemapColor: 1251 goto oe_remap; 1252 } 1253 iosp = osp; 1254 iesp = esp; 1255 return_with_error(code, pvalue); 1256 } 1257 case plain_exec(t_name): 1258 case exec(t_file): 1259 case exec(t_string): 1260 default: 1261 /* Not a procedure, reinterpret it. */ 1262 store_state(iesp); 1263 icount = 0; 1264 SET_IREF(pvalue); 1265 goto top; 1266 } 1267 case exec(t_file): 1268 { /* Executable file. Read the next token and interpret it. */ 1269 stream *s; 1270 scanner_state sstate; 1271 1272 check_read_known_file(s, IREF, return_with_error_iref); 1273 rt: 1274 if (iosp >= ostop) /* check early */ 1275 return_with_stackoverflow_iref(); 1276 osp = iosp; /* scan_token uses ostack */ 1277 scanner_state_init_options(&sstate, i_ctx_p->scanner_options); 1278 again: 1279 code = scan_token(i_ctx_p, s, &token, &sstate); 1280 iosp = osp; /* ditto */ 1281 switch (code) { 1282 case 0: /* read a token */ 1283 /* It's worth checking for literals, which make up */ 1284 /* the majority of input tokens, before storing the */ 1285 /* state on the e-stack. Note that because of //, */ 1286 /* the token may have *any* type and attributes. */ 1287 /* Note also that executable arrays aren't executed */ 1288 /* at the top level -- they're treated as literals. */ 1289 if (!r_has_attr(&token, a_executable) || 1290 r_is_array(&token) 1291 ) { /* If scan_token used the o-stack, */ 1292 /* we know we can do a push now; if not, */ 1293 /* the pre-check is still valid. */ 1294 iosp++; 1295 ref_assign_inline(iosp, &token); 1296 goto rt; 1297 } 1298 store_state(iesp); 1299 /* Push the file on the e-stack */ 1300 if (iesp >= estop) 1301 return_with_error_iref(e_execstackoverflow); 1302 esfile_set_cache(++iesp); 1303 ref_assign_inline(iesp, IREF); 1304 SET_IREF(&token); 1305 icount = 0; 1306 goto top; 1307 case scan_EOF: /* end of file */ 1308 esfile_clear_cache(); 1309 goto bot; 1310 case scan_BOS: 1311 /* Binary object sequences */ 1312 /* ARE executed at the top level. */ 1313 store_state(iesp); 1314 /* Push the file on the e-stack */ 1315 if (iesp >= estop) 1316 return_with_error_iref(e_execstackoverflow); 1317 esfile_set_cache(++iesp); 1318 ref_assign_inline(iesp, IREF); 1319 pvalue = &token; 1320 goto pr; 1321 case scan_Refill: 1322 store_state(iesp); 1323 /* iref may point into the exec stack; */ 1324 /* save its referent now. */ 1325 ref_assign_inline(&token, IREF); 1326 /* Push the file on the e-stack */ 1327 if (iesp >= estop) 1328 return_with_error_iref(e_execstackoverflow); 1329 ++iesp; 1330 ref_assign_inline(iesp, &token); 1331 esp = iesp; 1332 osp = iosp; 1333 code = scan_handle_refill(i_ctx_p, &token, &sstate, 1334 true, true, 1335 ztokenexec_continue); 1336 scan_cont: 1337 iosp = osp; 1338 iesp = esp; 1339 switch (code) { 1340 case 0: 1341 iesp--; /* don't push the file */ 1342 goto again; /* stacks are unchanged */ 1343 case o_push_estack: 1344 esfile_clear_cache(); 1345 if (--ticks_left > 0) 1346 goto up; 1347 goto slice; 1348 } 1349 /* must be an error */ 1350 iesp--; /* don't push the file */ 1351 return_with_code_iref(); 1352 case scan_Comment: 1353 case scan_DSC_Comment: { 1354 /* See scan_Refill above for comments. */ 1355 ref file_token; 1356 1357 store_state(iesp); 1358 ref_assign_inline(&file_token, IREF); 1359 if (iesp >= estop) 1360 return_with_error_iref(e_execstackoverflow); 1361 ++iesp; 1362 ref_assign_inline(iesp, &file_token); 1363 esp = iesp; 1364 osp = iosp; 1365 code = ztoken_handle_comment(i_ctx_p, &file_token, 1366 &sstate, &token, 1367 code, true, true, 1368 ztokenexec_continue); 1369 } 1370 goto scan_cont; 1371 default: /* error */ 1372 return_with_code_iref(); 1373 } 1374 } 1375 case exec(t_string): 1376 { /* Executable string. Read a token and interpret it. */ 1377 stream ss; 1378 scanner_state sstate; 1379 1380 scanner_state_init_options(&sstate, SCAN_FROM_STRING); 1381 sread_string(&ss, IREF->value.bytes, r_size(IREF)); 1382 osp = iosp; /* scan_token uses ostack */ 1383 code = scan_token(i_ctx_p, &ss, &token, &sstate); 1384 iosp = osp; /* ditto */ 1385 switch (code) { 1386 case 0: /* read a token */ 1387 case scan_BOS: /* binary object sequence */ 1388 store_state(iesp); 1389 /* If the updated string isn't empty, push it back */ 1390 /* on the e-stack. */ 1391 { 1392 uint size = sbufavailable(&ss); 1393 1394 if (size) { 1395 if (iesp >= estop) 1396 return_with_error_iref(e_execstackoverflow); 1397 ++iesp; 1398 iesp->tas.type_attrs = IREF->tas.type_attrs; 1399 iesp->value.const_bytes = sbufptr(&ss); 1400 r_set_size(iesp, size); 1401 } 1402 } 1403 if (code == 0) { 1404 SET_IREF(&token); 1405 icount = 0; 1406 goto top; 1407 } 1408 /* Handle BOS specially */ 1409 pvalue = &token; 1410 goto pr; 1411 case scan_EOF: /* end of string */ 1412 goto bot; 1413 case scan_Refill: /* error */ 1414 code = gs_note_error(e_syntaxerror); 1415 default: /* error */ 1416 return_with_code_iref(); 1417 } 1418 } 1419 /* Handle packed arrays here by re-dispatching. */ 1420 /* This also picks up some anomalous cases of non-packed arrays. */ 1421 default: 1422 { 1423 uint index; 1424 1425 switch (*iref_packed >> r_packed_type_shift) { 1426 case pt_full_ref: 1427 case pt_full_ref + 1: 1428 INCR(p_full); 1429 if (iosp >= ostop) 1430 return_with_stackoverflow_iref(); 1431 /* We know this can't be an executable object */ 1432 /* requiring special handling, so we just push it. */ 1433 ++iosp; 1434 /* We know that refs are properly aligned: */ 1435 /* see packed.h for details. */ 1436 ref_assign_inline(iosp, IREF); 1437 next(); 1438 case pt_executable_operator: 1439 index = *iref_packed & packed_value_mask; 1440 if (--ticks_left <= 0) { /* The following doesn't work, */ 1441 /* and I can't figure out why. */ 1442 /****** goto sst_short; ******/ 1443 } 1444 if (!op_index_is_operator(index)) { 1445 INCR(p_exec_oparray); 1446 store_state_short(iesp); 1447 /* Call the operator procedure. */ 1448 index -= op_def_count; 1449 pvalue = (const ref *) 1450 (index < r_size(&op_array_table_global.table) ? 1451 op_array_table_global.table.value.const_refs + 1452 index : 1453 op_array_table_local.table.value.const_refs + 1454 (index - r_size(&op_array_table_global.table))); 1455 goto oppr; 1456 } 1457 INCR(p_exec_operator); 1458 /* See the main plain_exec(t_operator) case */ 1459 /* for details of what happens here. */ 1460 #if PACKED_SPECIAL_OPS 1461 /* 1462 * We arranged in iinit.c that the special ops 1463 * have operator indices starting at 1. 1464 * 1465 * The (int) cast in the next line is required 1466 * because some compilers don't allow arithmetic 1467 * involving two different enumerated types. 1468 */ 1469 # define case_xop(xop) case xop - (int)tx_op + 1 1470 switch (index) { 1471 case_xop(tx_op_add):goto x_add; 1472 case_xop(tx_op_def):goto x_def; 1473 case_xop(tx_op_dup):goto x_dup; 1474 case_xop(tx_op_exch):goto x_exch; 1475 case_xop(tx_op_if):goto x_if; 1476 case_xop(tx_op_ifelse):goto x_ifelse; 1477 case_xop(tx_op_index):goto x_index; 1478 case_xop(tx_op_pop):goto x_pop; 1479 case_xop(tx_op_roll):goto x_roll; 1480 case_xop(tx_op_sub):goto x_sub; 1481 case 0: /* for dumb compilers */ 1482 default: 1483 ; 1484 } 1485 # undef case_xop 1486 #endif 1487 INCR(p_exec_non_x_operator); 1488 esp = iesp; 1489 osp = iosp; 1490 switch (code = call_operator(op_index_proc(index), i_ctx_p)) { 1491 case 0: 1492 case 1: 1493 iosp = osp; 1494 next_short(); 1495 case o_push_estack: 1496 store_state_short(iesp); 1497 goto opush; 1498 case o_pop_estack: 1499 iosp = osp; 1500 if (esp == iesp) { 1501 next_short(); 1502 } 1503 iesp = esp; 1504 goto up; 1505 case o_reschedule: 1506 store_state_short(iesp); 1507 goto res; 1508 case e_RemapColor: 1509 store_state_short(iesp); 1510 goto remap; 1511 } 1512 iosp = osp; 1513 iesp = esp; 1514 return_with_code_iref(); 1515 case pt_integer: 1516 INCR(p_integer); 1517 if (iosp >= ostop) 1518 return_with_stackoverflow_iref(); 1519 ++iosp; 1520 make_int(iosp, 1521 ((int)*iref_packed & packed_int_mask) + 1522 packed_min_intval); 1523 next_short(); 1524 case pt_literal_name: 1525 INCR(p_lit_name); 1526 { 1527 uint nidx = *iref_packed & packed_value_mask; 1528 1529 if (iosp >= ostop) 1530 return_with_stackoverflow_iref(); 1531 ++iosp; 1532 name_index_ref_inline(int_nt, nidx, iosp); 1533 next_short(); 1534 } 1535 case pt_executable_name: 1536 INCR(p_exec_name); 1537 { 1538 uint nidx = *iref_packed & packed_value_mask; 1539 1540 pvalue = name_index_ptr_inline(int_nt, nidx)->pvalue; 1541 if (!pv_valid(pvalue)) { 1542 uint htemp; 1543 1544 INCR(p_find_name); 1545 if ((pvalue = dict_find_name_by_index_inline(nidx, htemp)) == 0) { 1546 names_index_ref(int_nt, nidx, &token); 1547 return_with_error(e_undefined, &token); 1548 } 1549 } 1550 if (r_has_masked_attrs(pvalue, a_execute, a_execute + a_executable)) { /* Literal, push it. */ 1551 INCR(p_name_lit); 1552 if (iosp >= ostop) 1553 return_with_stackoverflow_iref(); 1554 ++iosp; 1555 ref_assign_inline(iosp, pvalue); 1556 next_short(); 1557 } 1558 if (r_is_proc(pvalue)) { /* This is an executable procedure, */ 1559 /* execute it. */ 1560 INCR(p_name_proc); 1561 store_state_short(iesp); 1562 goto pr; 1563 } 1564 /* Not a literal or procedure, reinterpret it. */ 1565 store_state_short(iesp); 1566 icount = 0; 1567 SET_IREF(pvalue); 1568 goto top; 1569 } 1570 /* default can't happen here */ 1571 } 1572 } 1573 } 1574 /* Literal type, just push it. */ 1575 if (iosp >= ostop) 1576 return_with_stackoverflow_iref(); 1577 ++iosp; 1578 ref_assign_inline(iosp, IREF); 1579 bot:next(); 1580 out: /* At most 1 more token in the current procedure. */ 1581 /* (We already decremented icount.) */ 1582 if (!icount) { 1583 /* Pop the execution stack for tail recursion. */ 1584 iesp--; 1585 iref_packed = IREF_NEXT(iref_packed); 1586 goto top; 1587 } 1588 up:if (--ticks_left < 0) 1589 goto slice; 1590 /* See if there is anything left on the execution stack. */ 1591 if (!r_is_proc(iesp)) { 1592 SET_IREF(iesp--); 1593 icount = 0; 1594 goto top; 1595 } 1596 SET_IREF(iesp->value.refs); /* next element of array */ 1597 icount = r_size(iesp) - 1; 1598 if (icount <= 0) { /* <= 1 more elements */ 1599 iesp--; /* pop, or tail recursion */ 1600 if (icount < 0) 1601 goto up; 1602 } 1603 goto top; 1604 res: 1605 /* Some operator has asked for context rescheduling. */ 1606 /* We've done a store_state. */ 1607 *pi_ctx_p = i_ctx_p; 1608 code = (*gs_interp_reschedule_proc)(pi_ctx_p); 1609 i_ctx_p = *pi_ctx_p; 1610 sched: /* We've just called a scheduling procedure. */ 1611 /* The interpreter state is in memory; iref is not current. */ 1612 if (code < 0) { 1613 set_error(code); 1614 /* 1615 * We need a real object to return as the error object. 1616 * (It only has to last long enough to store in 1617 * *perror_object.) 1618 */ 1619 make_null_proc(&ierror.full); 1620 SET_IREF(ierror.obj = &ierror.full); 1621 goto error_exit; 1622 } 1623 /* Reload state information from memory. */ 1624 iosp = osp; 1625 iesp = esp; 1626 goto up; 1627 #if 0 /****** ****** ***** */ 1628 sst: /* Time-slice, but push the current object first. */ 1629 store_state(iesp); 1630 if (iesp >= estop) 1631 return_with_error_iref(e_execstackoverflow); 1632 iesp++; 1633 ref_assign_inline(iesp, iref); 1634 #endif /****** ****** ***** */ 1635 slice: /* It's time to time-slice or garbage collect. */ 1636 /* iref is not live, so we don't need to do a store_state. */ 1637 osp = iosp; 1638 esp = iesp; 1639 /* If ticks_left <= -100, we need to GC now. */ 1640 if (ticks_left <= -100) { /* We need to garbage collect now. */ 1641 *pi_ctx_p = i_ctx_p; 1642 code = interp_reclaim(pi_ctx_p, -1); 1643 i_ctx_p = *pi_ctx_p; 1644 } else if (gs_interp_time_slice_proc) { 1645 *pi_ctx_p = i_ctx_p; 1646 code = (*gs_interp_time_slice_proc)(pi_ctx_p); 1647 i_ctx_p = *pi_ctx_p; 1648 } else 1649 code = 0; 1650 ticks_left = gs_interp_time_slice_ticks; 1651 goto sched; 1652 1653 /* Error exits. */ 1654 1655 rweci: 1656 ierror.code = code; 1657 rwei: 1658 ierror.obj = IREF; 1659 rwe: 1660 if (!r_is_packed(iref_packed)) 1661 store_state(iesp); 1662 else { 1663 /* 1664 * We need a real object to return as the error object. 1665 * (It only has to last long enough to store in *perror_object.) 1666 */ 1667 packed_get((const ref_packed *)ierror.obj, &ierror.full); 1668 store_state_short(iesp); 1669 if (IREF == ierror.obj) 1670 SET_IREF(&ierror.full); 1671 ierror.obj = &ierror.full; 1672 } 1673 error_exit: 1674 if (ERROR_IS_INTERRUPT(ierror.code)) { /* We must push the current object being interpreted */ 1675 /* back on the e-stack so it will be re-executed. */ 1676 /* Currently, this is always an executable operator, */ 1677 /* but it might be something else someday if we check */ 1678 /* for interrupts in the interpreter loop itself. */ 1679 if (iesp >= estop) 1680 code = e_execstackoverflow; 1681 else { 1682 iesp++; 1683 ref_assign_inline(iesp, IREF); 1684 } 1685 } 1686 esp = iesp; 1687 osp = iosp; 1688 ref_assign_inline(perror_object, ierror.obj); 1689 return gs_log_error(ierror.code, __FILE__, ierror.line); 1690 } 1691 1692 /* Pop the bookkeeping information for a normal exit from a t_oparray. */ 1693 private int 1694 oparray_pop(i_ctx_t *i_ctx_p) 1695 { 1696 esp -= 3; 1697 return o_pop_estack; 1698 } 1699 1700 /* Restore the stack pointers after an error inside a t_oparray procedure. */ 1701 /* This procedure is called only from pop_estack. */ 1702 private int 1703 oparray_cleanup(i_ctx_t *i_ctx_p) 1704 { /* esp points just below the cleanup procedure. */ 1705 es_ptr ep = esp; 1706 uint ocount_old = (uint) ep[2].value.intval; 1707 uint dcount_old = (uint) ep[3].value.intval; 1708 uint ocount = ref_stack_count(&o_stack); 1709 uint dcount = ref_stack_count(&d_stack); 1710 1711 if (ocount > ocount_old) 1712 ref_stack_pop(&o_stack, ocount - ocount_old); 1713 if (dcount > dcount_old) { 1714 ref_stack_pop(&d_stack, dcount - dcount_old); 1715 dict_set_top(); 1716 } 1717 return 0; 1718 } 1719 1720 /* Don't restore the stack pointers. */ 1721 private int 1722 oparray_no_cleanup(i_ctx_t *i_ctx_p) 1723 { 1724 return 0; 1725 } 1726 1727 /* Find the innermost oparray. */ 1728 private ref * 1729 oparray_find(i_ctx_t *i_ctx_p) 1730 { 1731 long i; 1732 ref *ep; 1733 1734 for (i = 0; (ep = ref_stack_index(&e_stack, i)) != 0; ++i) { 1735 if (r_is_estack_mark(ep) && 1736 (ep->value.opproc == oparray_cleanup || 1737 ep->value.opproc == oparray_no_cleanup) 1738 ) 1739 return ep; 1740 } 1741 return 0; 1742 } 1743 1744 /* <bool> .setstackprotect - */ 1745 /* Set whether to protect the stack for the innermost oparray. */ 1746 private int 1747 zsetstackprotect(i_ctx_t *i_ctx_p) 1748 { 1749 os_ptr op = osp; 1750 ref *ep = oparray_find(i_ctx_p); 1751 1752 check_type(*op, t_boolean); 1753 if (ep == 0) 1754 return_error(e_rangecheck); 1755 ep->value.opproc = 1756 (op->value.boolval ? oparray_cleanup : oparray_no_cleanup); 1757 pop(1); 1758 return 0; 1759 } 1760 1761 /* - .currentstackprotect <bool> */ 1762 /* Return the stack protection status. */ 1763 private int 1764 zcurrentstackprotect(i_ctx_t *i_ctx_p) 1765 { 1766 os_ptr op = osp; 1767 ref *ep = oparray_find(i_ctx_p); 1768 1769 if (ep == 0) 1770 return_error(e_rangecheck); 1771 push(1); 1772 make_bool(op, ep->value.opproc == oparray_cleanup); 1773 return 0; 1774 } 1775