1 /* Copyright (C) 1998, 1999 Aladdin Enterprises. All rights reserved.
2
3 This software is provided AS-IS with no warranty, either express or
4 implied.
5
6 This software is distributed under license and may not be copied,
7 modified or distributed except as expressly authorized under the terms
8 of the license contained in the file LICENSE in this distribution.
9
10 For more information about licensing, please refer to
11 http://www.ghostscript.com/licensing/. For information on
12 commercial licensing, go to http://www.artifex.com/licensing/ or
13 contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14 San Rafael, CA 94903, U.S.A., +1(415)492-9861.
15 */
16
17 /* $Id: gdevprna.c,v 1.6 2004/08/04 19:36:12 stefan Exp $ */
18 /* Generic asynchronous printer driver support */
19
20 /* Initial version 2/1/98 by John Desrosiers (soho@crl.com) */
21 /* Revised 8/7/98 by L. Peter Deutsch (ghost@aladdin.com) for */
22 /* memory manager changes */
23 /* 12/1/98 soho@crl.com - Removed unnecessary flush & reopen in */
24 /* gdev_prn_async_write_get_hardware_params */
25 #include "gdevprna.h"
26 #include "gsalloc.h"
27 #include "gsdevice.h"
28 #include "gsmemlok.h"
29 #include "gsmemret.h"
30 #include "gsnogc.h"
31 #include "gxcldev.h"
32 #include "gxclpath.h"
33 #include "gxpageq.h"
34 #include "gzht.h" /* for gx_ht_cache_default_bits */
35
36 /* ----------------- Constants ----------------------- */
37 /*
38 * Fixed overhead # bytes to run renderer in (+ driver-spec'd variable bytes):
39 * empirical & still very subject to change.
40 */
41 #define RendererAllocationOverheadBytes 503000 /* minimum is 503,000 as of 4/26/99 */
42
43 #ifdef DEBUG
44 /* 196000 is pretty much the minimum, given 16K phys memfile blocks */
45 /*# define DebugBandlistMemorySize 196000*/ /* comment out to disable fixed (debug) bandlist size */
46 #endif /* defined(DEBUG) */
47
48 /* ---------------- Standard device procedures ---------------- */
49 private dev_proc_close_device(gdev_prn_async_write_close_device);
50 private dev_proc_output_page(gdev_prn_async_write_output_page);
51 private dev_proc_put_params(gdev_prn_async_write_put_params);
52 private dev_proc_get_hardware_params(gdev_prn_async_write_get_hardware_params);
53 private dev_proc_put_params(gdev_prn_async_render_put_params);
54
55 /* ---------------- Forward Declarations ---------------------- */
56 private void gdev_prn_dealloc(gx_device_printer *);
57 private proc_free_up_bandlist_memory(gdev_prn_async_write_free_up_bandlist_memory);
58 private int flush_page(gx_device_printer *, bool);
59 private int reopen_clist_after_flush(gx_device_printer *);
60 private void reinit_printer_into_printera(gx_device_printer * const);
61 private int alloc_bandlist_memory(gs_memory_t **, gs_memory_t *);
62 private void free_bandlist_memory(gs_memory_t *);
63 private int alloc_render_memory(gs_memory_t **, gs_memory_t *, long);
64 private void free_render_memory(gs_memory_t *);
65 private gs_memory_recover_status_t
66 prna_mem_recover(gs_memory_retrying_t *rmem, void *proc_data);
67
68 /* ------ Open/close ------ */
69
70 /*
71 * Open this printer device in ASYNC (overlapped) mode.
72 * This routine must always called by the concrete device's xx_open routine
73 * in lieu of gdev_prn_open.
74 */
75 int
gdev_prn_async_write_open(gx_device_printer * pwdev,int max_raster,int min_band_height,int max_src_image_row)76 gdev_prn_async_write_open(gx_device_printer * pwdev, int max_raster,
77 int min_band_height, int max_src_image_row)
78 {
79 gx_device *const pdev = (gx_device *) pwdev;
80 int code;
81 bool writer_is_open = false;
82 gx_device_clist_writer *const pcwdev =
83 &((gx_device_clist *) pwdev)->writer;
84 gx_device_clist_reader *pcrdev = 0;
85 gx_device_printer *prdev = 0;
86 gs_memory_t *render_memory = 0; /* renderer's mem allocator */
87
88 pwdev->page_queue = 0;
89 pwdev->bandlist_memory = 0;
90 pwdev->async_renderer = 0;
91
92 /* allocate & init render memory */
93 /* The big memory consumers are: */
94 /* - the buffer used to read images from the command list */
95 /* - buffer used by gx_real_default_strip_copy_rop() */
96 /* - line pointer tables for memory devices used in plane extraction */
97 /* - the halftone cache */
98 /* - the band rendering buffer */
99 /* The * 2's in the next statement are a ****** HACK ****** to deal with */
100 /* sandbars in the memory manager. */
101 if ((code = alloc_render_memory(&render_memory,
102 pwdev->memory->non_gc_memory, RendererAllocationOverheadBytes + max_raster
103 /* the first * 2 is not a hack */
104 + (max_raster + sizeof(void *) * 2) * min_band_height
105 + max_src_image_row + gx_ht_cache_default_bits() * 2)) < 0)
106 goto open_err;
107
108 /* Alloc & init bandlist allocators */
109 /* Bandlist mem is threadsafe & common to rdr/wtr, so it's used */
110 /* for page queue & cmd list buffers. */
111 if ((code = alloc_bandlist_memory
112 (&pwdev->bandlist_memory, pwdev->memory->non_gc_memory)) < 0)
113 goto open_err;
114
115 /* Dictate banding parameters for both renderer & writer */
116 /* Protect from user change, since user changing these won't be */
117 /* detected, ergo the necessary close/reallocate/open wouldn't happen. */
118 pwdev->space_params.banding_type = BandingAlways;
119 pwdev->space_params.params_are_read_only = true;
120
121 /* Make a copy of device for use as rendering device b4 opening writer */
122 code = gs_copydevice((gx_device **) & prdev, pdev, render_memory);
123 pcrdev = &((gx_device_clist *) prdev)->reader;
124 if (code < 0)
125 goto open_err;
126
127 /* -------------- Open cmd list WRITER instance of device ------- */
128 /* --------------------------------------------------------------- */
129 /* This is wrong, because it causes the same thing in the renderer */
130 pwdev->OpenOutputFile = 0; /* Don't open output file in writer */
131
132 /* Hack: set this vector to tell gdev_prn_open to allocate for async rendering */
133 pwdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
134
135 /* prevent clist writer from queuing path graphics & force it to split images */
136 pwdev->clist_disable_mask |= clist_disable_fill_path |
137 clist_disable_stroke_path | clist_disable_complex_clip |
138 clist_disable_nonrect_hl_image | clist_disable_pass_thru_params;
139
140 if ((code = gdev_prn_open(pdev)) >= 0) {
141 writer_is_open = true;
142
143 /* set up constant async-specific fields in device */
144 reinit_printer_into_printera(pwdev);
145
146 /* keep ptr to renderer device */
147 pwdev->async_renderer = prdev;
148
149 /* Allocate the page queue, then initialize it */
150 /* Use bandlist memory since it's shared between rdr & wtr */
151 if ((pwdev->page_queue = gx_page_queue_alloc(pwdev->bandlist_memory)) == 0)
152 code = gs_note_error(gs_error_VMerror);
153 else
154 /* Allocate from clist allocator since it is thread-safe */
155 code = gx_page_queue_init(pwdev->page_queue, pwdev->bandlist_memory);
156 }
157 /* ------------ Open cmd list RENDERER instance of device ------- */
158 /* --------------------------------------------------------------- */
159 if (code >= 0) {
160 gx_semaphore_t *open_semaphore;
161
162 /* Force writer's actual band params into reader's requested params */
163 prdev->space_params.band = pcwdev->page_info.band_params;
164
165 /* copydevice has already set up prdev->memory = render_memory */
166 /* prdev->bandlist_memory = pwdev->bandlist_memory; */
167 prdev->buffer_memory = prdev->memory;
168
169 /* enable renderer to accept changes to params computed by writer */
170 prdev->space_params.params_are_read_only = false;
171
172 /* page queue is common to both devices */
173 prdev->page_queue = pwdev->page_queue;
174
175 /* Start renderer thread & wait for its successful open of device */
176 if (!(open_semaphore = gx_semaphore_alloc(prdev->memory)))
177 code = gs_note_error(gs_error_VMerror);
178 else {
179 gdev_prn_start_render_params thread_params;
180
181 thread_params.writer_device = pwdev;
182 thread_params.open_semaphore = open_semaphore;
183 thread_params.open_code = 0;
184 code = (*pwdev->printer_procs.start_render_thread)
185 (&thread_params);
186 if (code >= 0)
187 gx_semaphore_wait(open_semaphore);
188 code = thread_params.open_code;
189 gx_semaphore_free(open_semaphore);
190 }
191 }
192 /* ----- Set the recovery procedure for the mem allocator ----- */
193 if (code >= 0) {
194 gs_memory_retrying_set_recover(
195 (gs_memory_retrying_t *)pwdev->memory->non_gc_memory,
196 prna_mem_recover,
197 (void *)pcwdev
198 );
199 }
200 /* --------------------- Wrap up --------------------------------- */
201 /* --------------------------------------------------------------- */
202 if (code < 0) {
203 open_err:
204 /* error mop-up */
205 if (render_memory && !prdev)
206 free_render_memory(render_memory);
207
208 gdev_prn_dealloc(pwdev);
209 if (writer_is_open) {
210 gdev_prn_close(pdev);
211 pwdev->free_up_bandlist_memory = 0;
212 }
213 }
214 return code;
215 }
216
217 /* This procedure is called from within the memory allocator when regular */
218 /* malloc's fail -- this procedure tries to free up pages from the queue */
219 /* and returns a status code indicating whether any more can be freed. */
220 private gs_memory_recover_status_t
prna_mem_recover(gs_memory_retrying_t * rmem,void * proc_data)221 prna_mem_recover(gs_memory_retrying_t *rmem, void *proc_data)
222 {
223 int pages_remain = 0;
224 gx_device_clist_writer *cldev = proc_data;
225
226 if (cldev->free_up_bandlist_memory != NULL)
227 pages_remain =
228 (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
229 return (pages_remain > 0) ? RECOVER_STATUS_RETRY_OK : RECOVER_STATUS_NO_RETRY;
230 }
231
232 /* (Re)set printer device fields which get trampled by gdevprn_open & put_params */
233 private void
reinit_printer_into_printera(gx_device_printer * const pdev)234 reinit_printer_into_printera(
235 gx_device_printer * const pdev /* printer to convert */
236 )
237 {
238 /* Change some of the procedure vector to point at async procedures */
239 /* Originals were already saved by gdev_prn_open */
240 if (dev_proc(pdev, close_device) == gdev_prn_close)
241 set_dev_proc(pdev, close_device, gdev_prn_async_write_close_device);
242 set_dev_proc(pdev, output_page, gdev_prn_async_write_output_page);
243 set_dev_proc(pdev, put_params, gdev_prn_async_write_put_params);
244 set_dev_proc(pdev, get_xfont_procs, gx_default_get_xfont_procs);
245 set_dev_proc(pdev, get_xfont_device, gx_default_get_xfont_device);
246 set_dev_proc(pdev, get_hardware_params, gdev_prn_async_write_get_hardware_params);
247
248 /* clist writer calls this if it runs out of memory & wants to retry */
249 pdev->free_up_bandlist_memory = &gdev_prn_async_write_free_up_bandlist_memory;
250 }
251
252 /* Generic closing for the writer device. */
253 private int
gdev_prn_async_write_close_device(gx_device * pdev)254 gdev_prn_async_write_close_device(gx_device * pdev)
255 {
256 gx_device_printer *const pwdev = (gx_device_printer *) pdev;
257
258 /* Signal render thread to close & terminate when done */
259 gx_page_queue_add_page(pwdev->page_queue,
260 GX_PAGE_QUEUE_ACTION_TERMINATE, 0, 0);
261
262 /* Wait for renderer to finish all pages & terminate req */
263 gx_page_queue_wait_until_empty(pwdev->page_queue);
264
265 /* Cascade down to original close rtn */
266 gdev_prn_close(pdev);
267 pwdev->free_up_bandlist_memory = 0;
268
269 /* Deallocte dynamic stuff */
270 gdev_prn_dealloc(pwdev);
271 return 0;
272 }
273
274 /* Deallocte dynamic memory attached to device. Aware of possible imcomplete open */
275 private void
gdev_prn_dealloc(gx_device_printer * pwdev)276 gdev_prn_dealloc(gx_device_printer * pwdev)
277 {
278 gx_device_printer *const prdev = pwdev->async_renderer;
279
280 /* Delete renderer device & its memory allocator */
281 if (prdev) {
282 gs_memory_t *render_alloc = prdev->memory;
283
284 gs_free_object(render_alloc, prdev, "gdev_prn_dealloc");
285 free_render_memory(render_alloc);
286 }
287 /* Free page queue */
288 if (pwdev->page_queue) {
289 gx_page_queue_dnit(pwdev->page_queue);
290 gs_free_object(pwdev->bandlist_memory, pwdev->page_queue,
291 "gdev_prn_dealloc");
292 pwdev->page_queue = 0;
293 }
294 /* Free memory bandlist allocators */
295 if (pwdev->bandlist_memory)
296 free_bandlist_memory(pwdev->bandlist_memory);
297 }
298
299 /* Open the render portion of a printer device in ASYNC (overlapped) mode.
300
301 * This routine is always called by concrete device's xx_open_render_device
302 * in lieu of gdev_prn_open.
303 */
304 int
gdev_prn_async_render_open(gx_device_printer * prdev)305 gdev_prn_async_render_open(gx_device_printer * prdev)
306 {
307 gx_device *const pdev = (gx_device *) prdev;
308
309 prdev->is_async_renderer = true;
310 return gdev_prn_open(pdev);
311 }
312
313 /* Generic closing for the rendering device. */
314 int
gdev_prn_async_render_close_device(gx_device_printer * prdev)315 gdev_prn_async_render_close_device(gx_device_printer * prdev)
316 {
317 gx_device *const pdev = (gx_device *) prdev;
318
319 return gdev_prn_close(pdev);
320 }
321
322 /* (Re)set renderer device fields which get trampled by gdevprn_open & put_params */
323 private void
reinit_printer_into_renderer(gx_device_printer * const pdev)324 reinit_printer_into_renderer(
325 gx_device_printer * const pdev /* printer to convert */
326 )
327 {
328 set_dev_proc(pdev, put_params, gdev_prn_async_render_put_params);
329 }
330
331 /* ---------- Start rasterizer thread ------------ */
332 /*
333 * Must be called by async device driver implementation (see gdevprna.h
334 * under "Synchronizing the Instances"). This is the rendering loop, which
335 * requires its own thread for as long as the device is open. This proc only
336 * returns after the device is closed, or if the open failed. NB that an
337 * open error leaves things in a state where the writer thread will not be
338 * able to close since it's expecting the renderer to acknowledge its
339 * requests before the writer can close. Ergo, if this routine fails you'll
340 * crash unless the caller fixes the problem & successfully retries this.
341 */
342 int /* rets 0 ok, -ve error code if open failed */
gdev_prn_async_render_thread(gdev_prn_start_render_params * params)343 gdev_prn_async_render_thread(
344 gdev_prn_start_render_params * params
345 )
346 {
347 gx_device_printer *const pwdev = params->writer_device;
348 gx_device_printer *const prdev = pwdev->async_renderer;
349 gx_page_queue_entry_t *entry;
350 int code;
351
352 /* Open device, but don't use default if user didn't override */
353 if (prdev->printer_procs.open_render_device ==
354 gx_default_open_render_device)
355 code = gdev_prn_async_render_open(prdev);
356 else
357 code = (*prdev->printer_procs.open_render_device) (prdev);
358 reinit_printer_into_renderer(prdev);
359
360 /* The cmd list logic assumes reader's & writer's tile caches are same size */
361 if (code >= 0 &&
362 ((gx_device_clist *) pwdev)->writer.page_tile_cache_size !=
363 ((gx_device_clist *) prdev)->writer.page_tile_cache_size) {
364 gdev_prn_async_render_close_device(prdev);
365 code = gs_note_error(gs_error_VMerror);
366 }
367 params->open_code = code;
368 gx_semaphore_signal(params->open_semaphore);
369 if (code < 0)
370 return code;
371
372 /* fake open, since not called by gs_opendevice */
373 prdev->is_open = true;
374
375 /* Successful open */
376 while ((entry = gx_page_queue_start_dequeue(prdev->page_queue))
377 && entry->action != GX_PAGE_QUEUE_ACTION_TERMINATE) {
378 /* Force printer open again if it mysteriously closed. */
379 /* This shouldn't ever happen, but... */
380 if (!prdev->is_open) {
381 if (prdev->printer_procs.open_render_device ==
382 gx_default_open_render_device)
383 code = gdev_prn_async_render_open(prdev);
384 else
385 code = (*prdev->printer_procs.open_render_device) (prdev);
386 reinit_printer_into_renderer(prdev);
387
388 if (code >= 0) {
389 prdev->is_open = true;
390 gdev_prn_output_page((gx_device *) prdev, 0, true);
391 }
392 }
393 if (prdev->is_open) {
394 /* Force retrieved entry onto render device */
395 ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
396
397 /* Set up device geometry */
398 if (clist_setup_params((gx_device *) prdev) >= 0)
399 /* Go this again, since setup_params may have trashed it */
400 ((gx_device_clist *) prdev)->common.page_info = entry->page_info;
401
402 /* Call appropriate renderer routine to deal w/buffer */
403 /* Ignore status, since we don't know how to deal w/errors! */
404 switch (entry->action) {
405
406 case GX_PAGE_QUEUE_ACTION_FULL_PAGE:
407 (*dev_proc(prdev, output_page))((gx_device *) prdev,
408 entry->num_copies, true);
409 break;
410
411 case GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE:
412 case GX_PAGE_QUEUE_ACTION_COPY_PAGE:
413 (*dev_proc(prdev, output_page))((gx_device *) prdev,
414 entry->num_copies, false);
415 break;
416 }
417 /*
418 * gx_page_queue_finish_dequeue will close and free the band
419 * list files, so we don't need to call clist_close_output_file.
420 */
421 }
422 /* Finalize dequeue & free retrieved queue entry */
423 gx_page_queue_finish_dequeue(entry);
424 }
425
426 /* Close device, but don't use default if user hasn't overriden. */
427 /* Ignore status, since returning bad status means open failed */
428 if (prdev->printer_procs.close_render_device ==
429 gx_default_close_render_device)
430 gdev_prn_async_render_close_device(prdev);
431 else
432 (*prdev->printer_procs.close_render_device)(prdev);
433
434 /* undo fake open, since not called by gs_closedevice */
435 prdev->is_open = false;
436
437 /* Now that device is closed, acknowledge gx_page_queue_terminate */
438 gx_page_queue_finish_dequeue(entry);
439
440 return 0;
441 }
442
443 /* ------ Get/put parameters ------ */
444
445 /* Put parameters. */
446 private int
gdev_prn_async_write_put_params(gx_device * pdev,gs_param_list * plist)447 gdev_prn_async_write_put_params(gx_device * pdev, gs_param_list * plist)
448 {
449 gx_device_clist_writer *const pclwdev =
450 &((gx_device_clist *) pdev)->writer;
451 gx_device_printer *const pwdev = (gx_device_printer *) pdev;
452 gdev_prn_space_params save_sp = pwdev->space_params;
453 int save_height = pwdev->height;
454 int save_width = pwdev->width;
455 int code, ecode;
456
457 if (!pwdev->is_open)
458 return (*pwdev->orig_procs.put_params) (pdev, plist);
459
460 /*
461 * First, cascade to real device's put_params.
462 * If that put_params made any changes that require re-opening
463 * the device, just flush the page; the parameter block at the
464 * head of the next page will reflect the changes just made.
465 * If the put_params requires no re-open, just slip it into the
466 * stream in the command buffer. This way, the
467 * writer device should parallel the renderer status at the same point
468 * in their respective executions.
469 *
470 * NB. that all this works only because we take the position that
471 * put_params can make no change that actually affects hardware's state
472 * before the final output_page on the RASTERIZER.
473 */
474 /* Call original procedure, but "closed" to prevent closing device */
475 pwdev->is_open = false; /* prevent put_params from closing device */
476 code = (*pwdev->orig_procs.put_params) (pdev, plist);
477 pwdev->is_open = true;
478 pwdev->OpenOutputFile = 0; /* This is wrong, because it causes the same thing in the renderer */
479
480 /* Flush device or emit to command list, depending if device changed geometry */
481 if (memcmp(&pwdev->space_params, &save_sp, sizeof(save_sp)) != 0 ||
482 pwdev->width != save_width || pwdev->height != save_height
483 ) {
484 int pageq_remaining;
485 int new_width = pwdev->width;
486 int new_height = pwdev->height;
487 gdev_prn_space_params new_sp = pwdev->space_params;
488
489 /* Need to start a new page, reallocate clist memory */
490 pwdev->width = save_width;
491 pwdev->height = save_height;
492 pwdev->space_params = save_sp;
493
494 /* First, get rid of any pending partial pages */
495 code = flush_page(pwdev, false);
496
497 /* Free and reallocate the printer memory. */
498 pageq_remaining = 1; /* assume there are pages left in queue */
499 do {
500 ecode =
501 gdev_prn_reallocate_memory(pdev,
502 &new_sp, new_width, new_height);
503 if (ecode >= 0)
504 break; /* managed to recover enough memory */
505 if (!pdev->is_open) {
506 /* Disaster! Device was forced closed, which async drivers */
507 /* aren't suppsed to do. */
508 gdev_prn_async_write_close_device(pdev);
509 return ecode; /* caller 'spozed to know could be closed now */
510 }
511 pclwdev->error_is_retryable = (ecode == gs_error_VMerror);
512 }
513 while (pageq_remaining >= 1 &&
514 (pageq_remaining = ecode =
515 clist_VMerror_recover(pclwdev, ecode)) >= 0);
516 if (ecode < 0) {
517 gdev_prn_free_memory(pdev);
518 pclwdev->is_open = false;
519 code = ecode;
520 }
521 } else if (code >= 0) {
522 do
523 if ((ecode = cmd_put_params(pclwdev, plist)) >= 0)
524 break;
525 while ((ecode = clist_VMerror_recover(pclwdev, ecode)) >= 0);
526 if (ecode < 0 && pclwdev->error_is_retryable &&
527 pclwdev->driver_call_nesting == 0
528 )
529 ecode = clist_VMerror_recover_flush(pclwdev, ecode);
530 if (ecode < 0)
531 code = ecode;
532 }
533 /* Reset fields that got trashed by gdev_prn_put_params and/or gdev_prn_open */
534 reinit_printer_into_printera(pwdev);
535
536 return code;
537 }
538
539 /* Get hardware-detected params. Drain page queue, then call renderer version */
540 private int
gdev_prn_async_write_get_hardware_params(gx_device * pdev,gs_param_list * plist)541 gdev_prn_async_write_get_hardware_params(gx_device * pdev, gs_param_list * plist)
542 {
543 gx_device_printer *const pwdev = (gx_device_printer *) pdev;
544 gx_device_printer *const prdev = pwdev->async_renderer;
545
546 if (!pwdev->is_open || !prdev)
547 /* if not open, just use device's get hw params */
548 return (dev_proc(pwdev, get_hardware_params))(pdev, plist);
549 else {
550 /* wait for empty pipeline */
551 gx_page_queue_wait_until_empty(pwdev->page_queue);
552
553 /* get reader's h/w params, now that writer & reader are sync'ed */
554 return (dev_proc(prdev, get_hardware_params))
555 ((gx_device *) prdev, plist);
556 }
557 }
558
559 /* Put parameters on RENDERER. */
560 private int /* returns -ve err code only if FATAL error (can't keep rendering) */
gdev_prn_async_render_put_params(gx_device * pdev,gs_param_list * plist)561 gdev_prn_async_render_put_params(gx_device * pdev, gs_param_list * plist)
562 {
563 gx_device_printer *const prdev = (gx_device_printer *) pdev;
564 bool save_is_open = prdev->is_open;
565
566 /* put_parms from clist are guaranteed to never re-init device */
567 /* They're also pretty much guaranteed to always succeed */
568 (*prdev->orig_procs.put_params) (pdev, plist);
569
570 /* If device closed itself, try to open & clear it */
571 if (!prdev->is_open && save_is_open) {
572 int code;
573
574 if (prdev->printer_procs.open_render_device ==
575 gx_default_open_render_device)
576 code = gdev_prn_async_render_open(prdev);
577 else
578 code = (*prdev->printer_procs.open_render_device) (prdev);
579 reinit_printer_into_renderer(prdev);
580 if (code >= 0)
581 /****** CLEAR PAGE SOMEHOW ******/;
582 else
583 return code; /* this'll cause clist to stop processing this band! */
584 }
585 return 0; /* return this unless FATAL status */
586 }
587
588
589 /* ------ Others ------ */
590
591 /* Output page causes file to get added to page queue for later rasterizing */
592 private int
gdev_prn_async_write_output_page(gx_device * pdev,int num_copies,int flush)593 gdev_prn_async_write_output_page(gx_device * pdev, int num_copies, int flush)
594 {
595 gx_device_printer *const pwdev = (gx_device_printer *) pdev;
596 gx_device_clist_writer *const pcwdev =
597 &((gx_device_clist *) pdev)->writer;
598 int flush_code;
599 int add_code;
600 int open_code;
601 int one_last_time = 1;
602
603 /* do NOT close files before sending to page queue */
604 flush_code = clist_end_page(pcwdev);
605 add_code = gx_page_queue_add_page(pwdev->page_queue,
606 (flush ? GX_PAGE_QUEUE_ACTION_FULL_PAGE :
607 GX_PAGE_QUEUE_ACTION_COPY_PAGE),
608 &pcwdev->page_info, num_copies);
609 if (flush && (flush_code >= 0) && (add_code >= 0)) {
610 /* This page is finished */
611 gx_finish_output_page(pdev, num_copies, flush);
612 }
613
614 /* Open new band files to take the place of ones added to page queue */
615 while ((open_code = (*gs_clist_device_procs.open_device)
616 ((gx_device *) pdev)) == gs_error_VMerror) {
617 /* Open failed, try after a page gets rendered */
618 if (!gx_page_queue_wait_one_page(pwdev->page_queue)
619 && one_last_time-- <= 0)
620 break;
621 }
622
623 return
624 (flush_code < 0 ? flush_code : open_code < 0 ? open_code :
625 add_code < 0 ? add_code : 0);
626 }
627
628 /* Free bandlist memory waits until the rasterizer runs enough to free some mem */
629 private int /* -ve err, 0 if no pages remain to rasterize, 1 if more pages to go */
gdev_prn_async_write_free_up_bandlist_memory(gx_device * pdev,bool flush_current)630 gdev_prn_async_write_free_up_bandlist_memory(gx_device * pdev, bool flush_current)
631 {
632 gx_device_printer *const pwdev = (gx_device_printer *) pdev;
633
634 if (flush_current) {
635 int code = flush_page(pwdev, true);
636
637 if (code < 0)
638 return code;
639 }
640 return gx_page_queue_wait_one_page(pwdev->page_queue);
641 }
642
643 /* -------- Utility Routines --------- */
644
645 /* Flush out any partial pages accumulated in device */
646 /* LEAVE DEVICE in a state where it must be re-opened/re-init'd */
647 private int /* ret 0 ok no flush, -ve error code */
flush_page(gx_device_printer * pwdev,bool partial)648 flush_page(
649 gx_device_printer * pwdev, /* async writer device to flush */
650 bool partial /* true if only partial page */
651 )
652 {
653 gx_device_clist *const pcldev = (gx_device_clist *) pwdev;
654 gx_device_clist_writer *const pcwdev = &pcldev->writer;
655 int flush_code = 0;
656 int add_code = 0;
657
658 /* do NOT close files before sending to page queue */
659 flush_code = clist_end_page(pcwdev);
660 add_code = gx_page_queue_add_page(pwdev->page_queue,
661 (partial ? GX_PAGE_QUEUE_ACTION_PARTIAL_PAGE :
662 GX_PAGE_QUEUE_ACTION_FULL_PAGE),
663 &pcwdev->page_info, 0);
664
665 /* Device no longer has BANDFILES, so it must be re-init'd by caller */
666 pcwdev->page_info.bfile = pcwdev->page_info.cfile = 0;
667
668 /* return the worst of the status. */
669 if (flush_code < 0)
670 return flush_code;
671 return add_code;
672 }
673
674 /* Flush any pending partial pages, re-open device */
675 private int
reopen_clist_after_flush(gx_device_printer * pwdev)676 reopen_clist_after_flush(
677 gx_device_printer * pwdev /* async writer device to flush */
678 )
679 {
680 int open_code;
681 int one_last_time = 1;
682
683 /* Open new band files to take the place of ones added to page queue */
684 while ((open_code = (*gs_clist_device_procs.open_device)
685 ((gx_device *) pwdev)) == gs_error_VMerror) {
686 /* Open failed, try after a page gets rendered */
687 if (!gx_page_queue_wait_one_page(pwdev->page_queue)
688 && one_last_time-- <= 0)
689 break;
690 }
691 return open_code;
692 }
693
694 /*
695 * The bandlist does allocations on the writer's thread & deallocations on
696 * the reader's thread, so it needs to have mutual exclusion from itself, as
697 * well as from other memory allocators since the reader can run at the same
698 * time as the interpreter. The bandlist allocator therefore consists of
699 * a monitor-locking wrapper around either a direct heap allocator or (for
700 * testing) a fixed-limit allocator.
701 */
702
703 /* Create a bandlist allocator. */
704 private int
alloc_bandlist_memory(gs_memory_t ** final_allocator,gs_memory_t * base_allocator)705 alloc_bandlist_memory(gs_memory_t ** final_allocator,
706 gs_memory_t * base_allocator)
707 {
708 gs_memory_t *data_allocator = 0;
709 gs_memory_locked_t *locked_allocator = 0;
710 int code = 0;
711
712 #if defined(DEBUG) && defined(DebugBandlistMemorySize)
713 code = alloc_render_memory(&data_allocator, base_allocator,
714 DebugBandlistMemorySize);
715 if (code < 0)
716 return code;
717 #else
718 data_allocator = (gs_memory_t *)gs_malloc_memory_init();
719 if (!data_allocator)
720 return_error(gs_error_VMerror);
721 #endif
722 locked_allocator = (gs_memory_locked_t *)
723 gs_alloc_bytes_immovable(data_allocator, sizeof(gs_memory_locked_t),
724 "alloc_bandlist_memory(locked allocator)");
725 if (!locked_allocator)
726 goto alloc_err;
727 code = gs_memory_locked_init(locked_allocator, data_allocator);
728 if (code < 0)
729 goto alloc_err;
730 *final_allocator = (gs_memory_t *)locked_allocator;
731 return 0;
732 alloc_err:
733 if (locked_allocator)
734 free_bandlist_memory((gs_memory_t *)locked_allocator);
735 else if (data_allocator)
736 gs_memory_free_all(data_allocator, FREE_ALL_EVERYTHING,
737 "alloc_bandlist_memory(data allocator)");
738 return (code < 0 ? code : gs_note_error(gs_error_VMerror));
739 }
740
741 /* Free a bandlist allocator. */
742 private void
free_bandlist_memory(gs_memory_t * bandlist_allocator)743 free_bandlist_memory(gs_memory_t *bandlist_allocator)
744 {
745 gs_memory_locked_t *const lmem = (gs_memory_locked_t *)bandlist_allocator;
746 gs_memory_t *data_mem = gs_memory_locked_target(lmem);
747
748 gs_memory_free_all(bandlist_allocator,
749 FREE_ALL_STRUCTURES | FREE_ALL_ALLOCATOR,
750 "free_bandlist_memory(locked allocator)");
751 if (data_mem)
752 gs_memory_free_all(data_mem, FREE_ALL_EVERYTHING,
753 "free_bandlist_memory(data allocator)");
754 }
755
756 /* Create an allocator with a fixed memory limit. */
757 private int
alloc_render_memory(gs_memory_t ** final_allocator,gs_memory_t * base_allocator,long space)758 alloc_render_memory(gs_memory_t **final_allocator,
759 gs_memory_t *base_allocator, long space)
760 {
761 gs_ref_memory_t *rmem =
762 ialloc_alloc_state((gs_memory_t *)base_allocator, space);
763 vm_spaces spaces;
764 int i, code;
765
766 if (rmem == 0)
767 return_error(gs_error_VMerror);
768 code = ialloc_add_chunk(rmem, space, "alloc_render_memory");
769 if (code < 0) {
770 gs_memory_free_all((gs_memory_t *)rmem, FREE_ALL_EVERYTHING,
771 "alloc_render_memory");
772 return code;
773 }
774 *final_allocator = (gs_memory_t *)rmem;
775
776 /* Call the reclaim procedure to delete the string marking tables */
777 /* Only need this once since no other chunks will ever exist */
778
779 for ( i = 0; i < countof(spaces_indexed); ++i )
780 spaces_indexed[i] = 0;
781 space_local = space_global = (gs_ref_memory_t *)rmem;
782 spaces.vm_reclaim = gs_nogc_reclaim; /* no real GC on this chunk */
783 GS_RECLAIM(&spaces, false);
784
785 return 0;
786 }
787
788 /* Free an allocator with a fixed memory limit. */
789 private void
free_render_memory(gs_memory_t * render_allocator)790 free_render_memory(gs_memory_t *render_allocator)
791 {
792 if (render_allocator)
793 gs_memory_free_all(render_allocator, FREE_ALL_EVERYTHING,
794 "free_render_memory");
795 }
796