xref: /plan9/sys/src/cmd/gs/src/gxclist.c (revision 593dc095aefb2a85c828727bbfa9da139a49bdf4)
1 /* Copyright (C) 1991, 2000 Aladdin Enterprises.  All rights reserved.
2 
3   This software is provided AS-IS with no warranty, either express or
4   implied.
5 
6   This software is distributed under license and may not be copied,
7   modified or distributed except as expressly authorized under the terms
8   of the license contained in the file LICENSE in this distribution.
9 
10   For more information about licensing, please refer to
11   http://www.ghostscript.com/licensing/. For information on
12   commercial licensing, go to http://www.artifex.com/licensing/ or
13   contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14   San Rafael, CA  94903, U.S.A., +1(415)492-9861.
15 */
16 
17 /*$Id: gxclist.c,v 1.15 2005/03/14 18:08:36 dan Exp $ */
18 /* Command list document- and page-level code. */
19 #include "memory_.h"
20 #include "string_.h"
21 #include "gx.h"
22 #include "gp.h"
23 #include "gpcheck.h"
24 #include "gserrors.h"
25 #include "gxdevice.h"
26 #include "gxdevmem.h"		/* must precede gxcldev.h */
27 #include "gxcldev.h"
28 #include "gxclpath.h"
29 #include "gsparams.h"
30 #include "gxdcolor.h"
31 
32 /* GC information */
33 #define CLIST_IS_WRITER(cdev) ((cdev)->common.ymin < 0)
34 extern_st(st_imager_state);
35 private
36 ENUM_PTRS_WITH(device_clist_enum_ptrs, gx_device_clist *cdev)
37     if (index < st_device_forward_max_ptrs) {
38 	gs_ptr_type_t ret = ENUM_USING_PREFIX(st_device_forward, 0);
39 
40 	return (ret ? ret : ENUM_OBJ(0));
41     }
42     if (!CLIST_IS_WRITER(cdev))
43 	return 0;
44     index -= st_device_forward_max_ptrs;
45     switch (index) {
46     case 0: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
47 			     cdev->writer.clip_path : 0));
48     case 1: return ENUM_OBJ((cdev->writer.image_enum_id != gs_no_id ?
49 			     cdev->writer.color_space.space : 0));
50     default:
51 	return ENUM_USING(st_imager_state, &cdev->writer.imager_state,
52 			  sizeof(gs_imager_state), index - 2);
53     }
54 ENUM_PTRS_END
55 private
RELOC_PTRS_WITH(device_clist_reloc_ptrs,gx_device_clist * cdev)56 RELOC_PTRS_WITH(device_clist_reloc_ptrs, gx_device_clist *cdev)
57 {
58     RELOC_PREFIX(st_device_forward);
59     if (!CLIST_IS_WRITER(cdev))
60 	return;
61     if (cdev->writer.image_enum_id != gs_no_id) {
62 	RELOC_VAR(cdev->writer.clip_path);
63 	RELOC_VAR(cdev->writer.color_space.space);
64     }
65     RELOC_USING(st_imager_state, &cdev->writer.imager_state,
66 		sizeof(gs_imager_state));
67 } RELOC_PTRS_END
68 public_st_device_clist();
69 
70 /* Forward declarations of driver procedures */
71 private dev_proc_open_device(clist_open);
72 private dev_proc_output_page(clist_output_page);
73 private dev_proc_close_device(clist_close);
74 private dev_proc_get_band(clist_get_band);
75 /* Driver procedures defined in other files are declared in gxcldev.h. */
76 
77 /* Other forward declarations */
78 private int clist_put_current_params(gx_device_clist_writer *cldev);
79 
80 /* The device procedures */
81 const gx_device_procs gs_clist_device_procs = {
82     clist_open,
83     gx_forward_get_initial_matrix,
84     gx_default_sync_output,
85     clist_output_page,
86     clist_close,
87     gx_forward_map_rgb_color,
88     gx_forward_map_color_rgb,
89     clist_fill_rectangle,
90     gx_default_tile_rectangle,
91     clist_copy_mono,
92     clist_copy_color,
93     gx_default_draw_line,
94     gx_default_get_bits,
95     gx_forward_get_params,
96     gx_forward_put_params,
97     gx_forward_map_cmyk_color,
98     gx_forward_get_xfont_procs,
99     gx_forward_get_xfont_device,
100     gx_forward_map_rgb_alpha_color,
101     gx_forward_get_page_device,
102     gx_forward_get_alpha_bits,
103     clist_copy_alpha,
104     clist_get_band,
105     gx_default_copy_rop,
106     clist_fill_path,
107     clist_stroke_path,
108     clist_fill_mask,
109     gx_default_fill_trapezoid,
110     clist_fill_parallelogram,
111     clist_fill_triangle,
112     gx_default_draw_thin_line,
113     gx_default_begin_image,
114     gx_default_image_data,
115     gx_default_end_image,
116     clist_strip_tile_rectangle,
117     clist_strip_copy_rop,
118     gx_forward_get_clipping_box,
119     clist_begin_typed_image,
120     clist_get_bits_rectangle,
121     gx_forward_map_color_rgb_alpha,
122     clist_create_compositor,
123     gx_forward_get_hardware_params,
124     gx_default_text_begin,
125     gx_default_finish_copydevice,
126     NULL,			/* begin_transparency_group */
127     NULL,			/* end_transparency_group */
128     NULL,			/* begin_transparency_mask */
129     NULL,			/* end_transparency_mask */
130     NULL,			/* discard_transparency_layer */
131     gx_forward_get_color_mapping_procs,
132     gx_forward_get_color_comp_index,
133     gx_forward_encode_color,
134     gx_forward_decode_color,
135     gx_default_pattern_manage,
136     gx_default_fill_rectangle_hl_color,
137     gx_default_include_color_space,
138     gx_default_fill_linear_color_scanline,
139     gx_default_fill_linear_color_trapezoid, /* fixme : write to clist. */
140     gx_default_fill_linear_color_triangle,
141     gx_forward_update_spot_equivalent_colors
142 };
143 
144 /* ------ Define the command set and syntax ------ */
145 
146 /* Initialization for imager state. */
147 /* The initial scale is arbitrary. */
148 const gs_imager_state clist_imager_state_initial =
149 {gs_imager_state_initial(300.0 / 72.0)};
150 
151 /*
152  * The buffer area (data, data_size) holds a bitmap cache when both writing
153  * and reading.  The rest of the space is used for the command buffer and
154  * band state bookkeeping when writing, and for the rendering buffer (image
155  * device) when reading.  For the moment, we divide the space up
156  * arbitrarily, except that we allocate less space for the bitmap cache if
157  * the device doesn't need halftoning.
158  *
159  * All the routines for allocating tables in the buffer are idempotent, so
160  * they can be used to check whether a given-size buffer is large enough.
161  */
162 
163 /*
164  * Calculate the desired size for the tile cache.
165  */
166 private uint
clist_tile_cache_size(const gx_device * target,uint data_size)167 clist_tile_cache_size(const gx_device * target, uint data_size)
168 {
169     uint bits_size =
170     (data_size / 5) & -align_cached_bits_mod;	/* arbitrary */
171 
172     if (!gx_device_must_halftone(target)) {	/* No halftones -- cache holds only Patterns & characters. */
173 	bits_size -= bits_size >> 2;
174     }
175 #define min_bits_size 1024
176     if (bits_size < min_bits_size)
177 	bits_size = min_bits_size;
178 #undef min_bits_size
179     return bits_size;
180 }
181 
182 /*
183  * Initialize the allocation for the tile cache.  Sets: tile_hash_mask,
184  * tile_max_count, tile_table, chunk (structure), bits (structure).
185  */
186 private int
clist_init_tile_cache(gx_device * dev,byte * init_data,ulong data_size)187 clist_init_tile_cache(gx_device * dev, byte * init_data, ulong data_size)
188 {
189     gx_device_clist_writer * const cdev =
190 	&((gx_device_clist *)dev)->writer;
191     byte *data = init_data;
192     uint bits_size = data_size;
193     /*
194      * Partition the bits area between the hash table and the actual
195      * bitmaps.  The per-bitmap overhead is about 24 bytes; if the
196      * average character size is 10 points, its bitmap takes about 24 +
197      * 0.5 * 10/72 * xdpi * 10/72 * ydpi / 8 bytes (the 0.5 being a
198      * fudge factor to account for characters being narrower than they
199      * are tall), which gives us a guideline for the size of the hash
200      * table.
201      */
202     uint avg_char_size =
203 	(uint)(dev->HWResolution[0] * dev->HWResolution[1] *
204 	       (0.5 * 10 / 72 * 10 / 72 / 8)) + 24;
205     uint hc = bits_size / avg_char_size;
206     uint hsize;
207 
208     while ((hc + 1) & hc)
209 	hc |= hc >> 1;		/* make mask (power of 2 - 1) */
210     if (hc < 0xff)
211 	hc = 0xff;		/* make allowance for halftone tiles */
212     else if (hc > 0xfff)
213 	hc = 0xfff;		/* cmd_op_set_tile_index has 12-bit operand */
214     /* Make sure the tables will fit. */
215     while (hc >= 3 && (hsize = (hc + 1) * sizeof(tile_hash)) >= bits_size)
216 	hc >>= 1;
217     if (hc < 3)
218 	return_error(gs_error_rangecheck);
219     cdev->tile_hash_mask = hc;
220     cdev->tile_max_count = hc - (hc >> 2);
221     cdev->tile_table = (tile_hash *) data;
222     data += hsize;
223     bits_size -= hsize;
224     gx_bits_cache_chunk_init(&cdev->chunk, data, bits_size);
225     gx_bits_cache_init(&cdev->bits, &cdev->chunk);
226     return 0;
227 }
228 
229 /*
230  * Initialize the allocation for the bands.  Requires: target.  Sets:
231  * page_band_height (=page_info.band_params.BandHeight), nbands.
232  */
233 private int
clist_init_bands(gx_device * dev,gx_device_memory * bdev,uint data_size,int band_width,int band_height)234 clist_init_bands(gx_device * dev, gx_device_memory *bdev, uint data_size,
235 		 int band_width, int band_height)
236 {
237     gx_device_clist_writer * const cdev =
238 	&((gx_device_clist *)dev)->writer;
239     int nbands;
240 
241     if (gdev_mem_data_size(bdev, band_width, band_height) > data_size)
242 	return_error(gs_error_rangecheck);
243     cdev->page_band_height = band_height;
244     nbands = (cdev->target->height + band_height - 1) / band_height;
245     cdev->nbands = nbands;
246 #ifdef DEBUG
247     if (gs_debug_c('l') | gs_debug_c(':'))
248 	dlprintf4("[:]width=%d, band_width=%d, band_height=%d, nbands=%d\n",
249 		  bdev->width, band_width, band_height, nbands);
250 #endif
251     return 0;
252 }
253 
254 /*
255  * Initialize the allocation for the band states, which are used only
256  * when writing.  Requires: nbands.  Sets: states, cbuf, cend.
257  */
258 private int
clist_init_states(gx_device * dev,byte * init_data,uint data_size)259 clist_init_states(gx_device * dev, byte * init_data, uint data_size)
260 {
261     gx_device_clist_writer * const cdev =
262 	&((gx_device_clist *)dev)->writer;
263     ulong state_size = cdev->nbands * (ulong) sizeof(gx_clist_state);
264 
265     /*
266      * The +100 in the next line is bogus, but we don't know what the
267      * real check should be. We're effectively assuring that at least 100
268      * bytes will be available to buffer command operands.
269      */
270     if (state_size + sizeof(cmd_prefix) + cmd_largest_size + 100 > data_size)
271 	return_error(gs_error_rangecheck);
272     cdev->states = (gx_clist_state *) init_data;
273     cdev->cbuf = init_data + state_size;
274     cdev->cend = init_data + data_size;
275     return 0;
276 }
277 
278 /*
279  * Initialize all the data allocations.  Requires: target.  Sets:
280  * page_tile_cache_size, page_info.band_params.BandWidth,
281  * page_info.band_params.BandBufferSpace, + see above.
282  */
283 private int
clist_init_data(gx_device * dev,byte * init_data,uint data_size)284 clist_init_data(gx_device * dev, byte * init_data, uint data_size)
285 {
286     gx_device_clist_writer * const cdev =
287 	&((gx_device_clist *)dev)->writer;
288     gx_device *target = cdev->target;
289     const int band_width =
290 	cdev->page_info.band_params.BandWidth =
291 	(cdev->band_params.BandWidth ? cdev->band_params.BandWidth :
292 	 target->width);
293     int band_height = cdev->band_params.BandHeight;
294     bool page_uses_transparency = cdev->page_uses_transparency;
295     const uint band_space =
296     cdev->page_info.band_params.BandBufferSpace =
297 	(cdev->band_params.BandBufferSpace ?
298 	 cdev->band_params.BandBufferSpace : data_size);
299     byte *data = init_data;
300     uint size = band_space;
301     uint bits_size;
302     gx_device_memory bdev;
303     gx_device *pbdev = (gx_device *)&bdev;
304     int code;
305 
306     /* Call create_buf_device to get the memory planarity set up. */
307     cdev->buf_procs.create_buf_device(&pbdev, target, NULL, NULL, true);
308     /* HACK - if the buffer device can't do copy_alpha, disallow */
309     /* copy_alpha in the commmand list device as well. */
310     if (dev_proc(pbdev, copy_alpha) == gx_no_copy_alpha)
311 	cdev->disable_mask |= clist_disable_copy_alpha;
312     if (band_height) {
313 	/*
314 	 * The band height is fixed, so the band buffer requirement
315 	 * is completely determined.
316 	 */
317 	uint band_data_size =
318 	    gdev_mem_data_size(&bdev, band_width, band_height);
319 
320 	if (band_data_size >= band_space)
321 	    return_error(gs_error_rangecheck);
322 	bits_size = min(band_space - band_data_size, data_size >> 1);
323     } else {
324 	/*
325 	 * Choose the largest band height that will fit in the
326 	 * rendering-time buffer.
327 	 */
328 	bits_size = clist_tile_cache_size(target, band_space);
329 	bits_size = min(bits_size, data_size >> 1);
330 	band_height = gdev_mem_max_height(&bdev, band_width,
331 			  band_space - bits_size, page_uses_transparency);
332 	if (band_height == 0)
333 	    return_error(gs_error_rangecheck);
334     }
335     code = clist_init_tile_cache(dev, data, bits_size);
336     if (code < 0)
337 	return code;
338     cdev->page_tile_cache_size = bits_size;
339     data += bits_size;
340     size -= bits_size;
341     code = clist_init_bands(dev, &bdev, size, band_width, band_height);
342     if (code < 0)
343 	return code;
344     return clist_init_states(dev, data, data_size - bits_size);
345 }
346 /*
347  * Reset the device state (for writing).  This routine requires only
348  * data, data_size, and target to be set, and is idempotent.
349  */
350 private int
clist_reset(gx_device * dev)351 clist_reset(gx_device * dev)
352 {
353     gx_device_clist_writer * const cdev =
354 	&((gx_device_clist *)dev)->writer;
355     int code = clist_init_data(dev, cdev->data, cdev->data_size);
356     int nbands;
357 
358     if (code < 0)
359 	return (cdev->permanent_error = code);
360     /* Now initialize the rest of the state. */
361     cdev->permanent_error = 0;
362     nbands = cdev->nbands;
363     cdev->ymin = cdev->ymax = -1;	/* render_init not done yet */
364     memset(cdev->tile_table, 0, (cdev->tile_hash_mask + 1) *
365 	   sizeof(*cdev->tile_table));
366     cdev->cnext = cdev->cbuf;
367     cdev->ccl = 0;
368     cdev->band_range_list.head = cdev->band_range_list.tail = 0;
369     cdev->band_range_min = 0;
370     cdev->band_range_max = nbands - 1;
371     {
372 	int band;
373 	gx_clist_state *states = cdev->states;
374 
375 	for (band = 0; band < nbands; band++, states++) {
376 	    static const gx_clist_state cls_initial =
377 	    {cls_initial_values};
378 
379 	    *states = cls_initial;
380 	}
381     }
382     /*
383      * Round up the size of the per-tile band mask so that the bits,
384      * which follow it, stay aligned.
385      */
386     cdev->tile_band_mask_size =
387 	((nbands + (align_bitmap_mod * 8 - 1)) >> 3) &
388 	~(align_bitmap_mod - 1);
389     /*
390      * Initialize the all-band parameters to impossible values,
391      * to force them to be written the first time they are used.
392      */
393     memset(&cdev->tile_params, 0, sizeof(cdev->tile_params));
394     cdev->tile_depth = 0;
395     cdev->tile_known_min = nbands;
396     cdev->tile_known_max = -1;
397     cdev->imager_state = clist_imager_state_initial;
398     cdev->clip_path = NULL;
399     cdev->clip_path_id = gs_no_id;
400     cdev->color_space.byte1 = 0;
401     cdev->color_space.id = gs_no_id;
402     cdev->color_space.space = 0;
403     {
404 	int i;
405 
406 	for (i = 0; i < countof(cdev->transfer_ids); ++i)
407 	    cdev->transfer_ids[i] = gs_no_id;
408     }
409     cdev->black_generation_id = gs_no_id;
410     cdev->undercolor_removal_id = gs_no_id;
411     cdev->device_halftone_id = gs_no_id;
412     cdev->image_enum_id = gs_no_id;
413     return 0;
414 }
415 /*
416  * Initialize the device state (for writing).  This routine requires only
417  * data, data_size, and target to be set, and is idempotent.
418  */
419 private int
clist_init(gx_device * dev)420 clist_init(gx_device * dev)
421 {
422     gx_device_clist_writer * const cdev =
423 	&((gx_device_clist *)dev)->writer;
424     int code = clist_reset(dev);
425 
426     if (code >= 0) {
427 	cdev->image_enum_id = gs_no_id;
428 	cdev->error_is_retryable = 0;
429 	cdev->driver_call_nesting = 0;
430 	cdev->ignore_lo_mem_warnings = 0;
431     }
432     return code;
433 }
434 
435 /* (Re)init open band files for output (set block size, etc). */
436 private int	/* ret 0 ok, -ve error code */
clist_reinit_output_file(gx_device * dev)437 clist_reinit_output_file(gx_device *dev)
438 {    gx_device_clist_writer * const cdev =
439 	&((gx_device_clist *)dev)->writer;
440     int code = 0;
441 
442     /* bfile needs to guarantee cmd_blocks for: 1 band range, nbands */
443     /*  & terminating entry */
444     int b_block = sizeof(cmd_block) * (cdev->nbands + 2);
445 
446     /* cfile needs to guarantee one writer buffer */
447     /*  + one end_clip cmd (if during image's clip path setup) */
448     /*  + an end_image cmd for each band (if during image) */
449     /*  + end_cmds for each band and one band range */
450     int c_block =
451 	cdev->cend - cdev->cbuf + 2 + cdev->nbands * 2 + (cdev->nbands + 1);
452 
453     /* All this is for partial page rendering's benefit, do only */
454     /* if partial page rendering is available */
455     if ( clist_test_VMerror_recoverable(cdev) )
456 	{ if (cdev->page_bfile != 0)
457 	    code = clist_set_memory_warning(cdev->page_bfile, b_block);
458 	if (code >= 0 && cdev->page_cfile != 0)
459 	    code = clist_set_memory_warning(cdev->page_cfile, c_block);
460 	}
461     return code;
462 }
463 
464 /* Write out the current parameters that must be at the head of each page */
465 /* if async rendering is in effect */
466 private int
clist_emit_page_header(gx_device * dev)467 clist_emit_page_header(gx_device *dev)
468 {
469     gx_device_clist_writer * const cdev =
470 	&((gx_device_clist *)dev)->writer;
471     int code = 0;
472 
473     if ((cdev->disable_mask & clist_disable_pass_thru_params)) {
474 	do
475 	    if ((code = clist_put_current_params(cdev)) >= 0)
476 	        break;
477 	while ((code = clist_VMerror_recover(cdev, code)) >= 0);
478 	cdev->permanent_error = (code < 0 ? code : 0);
479 	if (cdev->permanent_error < 0)
480 	    cdev->error_is_retryable = 0;
481     }
482     return code;
483 }
484 
485 /* Reset parameters for the beginning of a page. */
486 private void
clist_reset_page(gx_device_clist_writer * cwdev)487 clist_reset_page(gx_device_clist_writer *cwdev)
488 {
489     cwdev->page_bfile_end_pos = 0;
490     /* Indicate that the colors_used information hasn't been computed. */
491     cwdev->page_info.scan_lines_per_colors_used = 0;
492     memset(cwdev->page_info.band_colors_used, 0,
493 	   sizeof(cwdev->page_info.band_colors_used));
494 }
495 
496 /* Open the device's bandfiles */
497 private int
clist_open_output_file(gx_device * dev)498 clist_open_output_file(gx_device *dev)
499 {
500     gx_device_clist_writer * const cdev =
501 	&((gx_device_clist *)dev)->writer;
502     char fmode[4];
503     int code;
504 
505     if (cdev->do_not_open_or_close_bandfiles)
506 	return 0; /* external bandfile open/close managed externally */
507     cdev->page_cfile = 0;	/* in case of failure */
508     cdev->page_bfile = 0;	/* ditto */
509     code = clist_init(dev);
510     if (code < 0)
511 	return code;
512     strcpy(fmode, "w+");
513     strcat(fmode, gp_fmode_binary_suffix);
514     cdev->page_cfname[0] = 0;	/* create a new file */
515     cdev->page_bfname[0] = 0;	/* ditto */
516     clist_reset_page(cdev);
517     if ((code = clist_fopen(cdev->page_cfname, fmode, &cdev->page_cfile,
518 			    cdev->bandlist_memory, cdev->bandlist_memory,
519 			    true)) < 0 ||
520 	(code = clist_fopen(cdev->page_bfname, fmode, &cdev->page_bfile,
521 			    cdev->bandlist_memory, cdev->bandlist_memory,
522 			    true)) < 0 ||
523 	(code = clist_reinit_output_file(dev)) < 0
524 	) {
525 	clist_close_output_file(dev);
526 	cdev->permanent_error = code;
527 	cdev->error_is_retryable = 0;
528     }
529     return code;
530 }
531 
532 /* Close, and free the contents of, the temporary files of a page. */
533 /* Note that this does not deallocate the buffer. */
534 int
clist_close_page_info(gx_band_page_info_t * ppi)535 clist_close_page_info(gx_band_page_info_t *ppi)
536 {
537     if (ppi->cfile != NULL) {
538 	clist_fclose(ppi->cfile, ppi->cfname, true);
539 	ppi->cfile = NULL;
540     }
541     if (ppi->bfile != NULL) {
542 	clist_fclose(ppi->bfile, ppi->bfname, true);
543 	ppi->bfile = NULL;
544     }
545     return 0;
546 }
547 
548 /* Close the device by freeing the temporary files. */
549 /* Note that this does not deallocate the buffer. */
550 int
clist_close_output_file(gx_device * dev)551 clist_close_output_file(gx_device *dev)
552 {
553     gx_device_clist_writer * const cdev =
554 	&((gx_device_clist *)dev)->writer;
555 
556     return clist_close_page_info(&cdev->page_info);
557 }
558 
559 /* Open the device by initializing the device state and opening the */
560 /* scratch files. */
561 private int
clist_open(gx_device * dev)562 clist_open(gx_device *dev)
563 {
564     gx_device_clist_writer * const cdev =
565 	&((gx_device_clist *)dev)->writer;
566     int code;
567 
568     cdev->permanent_error = 0;
569     code = clist_init(dev);
570     if (code < 0)
571 	return code;
572     code = clist_open_output_file(dev);
573     if ( code >= 0)
574 	code = clist_emit_page_header(dev);
575     return code;
576 }
577 
578 private int
clist_close(gx_device * dev)579 clist_close(gx_device *dev)
580 {
581     gx_device_clist_writer * const cdev =
582 	&((gx_device_clist *)dev)->writer;
583 
584     if (cdev->do_not_open_or_close_bandfiles)
585 	return 0;
586     return clist_close_output_file(dev);
587 }
588 
589 /* The output_page procedure should never be called! */
590 private int
clist_output_page(gx_device * dev,int num_copies,int flush)591 clist_output_page(gx_device * dev, int num_copies, int flush)
592 {
593     return_error(gs_error_Fatal);
594 }
595 
596 /* Reset (or prepare to append to) the command list after printing a page. */
597 int
clist_finish_page(gx_device * dev,bool flush)598 clist_finish_page(gx_device *dev, bool flush)
599 {
600     gx_device_clist_writer * const cdev =
601 	&((gx_device_clist *)dev)->writer;
602     int code;
603 
604     if (flush) {
605 	if (cdev->page_cfile != 0)
606 	    clist_rewind(cdev->page_cfile, true, cdev->page_cfname);
607 	if (cdev->page_bfile != 0)
608 	    clist_rewind(cdev->page_bfile, true, cdev->page_bfname);
609 	clist_reset_page(cdev);
610     } else {
611 	if (cdev->page_cfile != 0)
612 	    clist_fseek(cdev->page_cfile, 0L, SEEK_END, cdev->page_cfname);
613 	if (cdev->page_bfile != 0)
614 	    clist_fseek(cdev->page_bfile, 0L, SEEK_END, cdev->page_bfname);
615     }
616     code = clist_init(dev);		/* reinitialize */
617     if (code >= 0)
618 	code = clist_reinit_output_file(dev);
619     if (code >= 0)
620 	code = clist_emit_page_header(dev);
621 
622     return code;
623 }
624 
625 /* ------ Writing ------ */
626 
627 /* End a page by flushing the buffer and terminating the command list. */
628 int	/* ret 0 all-ok, -ve error code, or +1 ok w/low-mem warning */
clist_end_page(gx_device_clist_writer * cldev)629 clist_end_page(gx_device_clist_writer * cldev)
630 {
631     int code = cmd_write_buffer(cldev, cmd_opv_end_page);
632     cmd_block cb;
633     int ecode = 0;
634 
635     if (code >= 0) {
636 	/*
637 	 * Write the terminating entry in the block file.
638 	 * Note that because of copypage, there may be many such entries.
639 	 */
640 	cb.band_min = cb.band_max = cmd_band_end;
641 	cb.pos = (cldev->page_cfile == 0 ? 0 : clist_ftell(cldev->page_cfile));
642 	code = clist_fwrite_chars(&cb, sizeof(cb), cldev->page_bfile);
643 	if (code > 0)
644 	    code = 0;
645     }
646     if (code >= 0) {
647 	clist_compute_colors_used(cldev);
648 	ecode |= code;
649 	cldev->page_bfile_end_pos = clist_ftell(cldev->page_bfile);
650     }
651     if (code < 0)
652 	ecode = code;
653 
654     /* Reset warning margin to 0 to release reserve memory if mem files */
655     if (cldev->page_bfile != 0)
656 	clist_set_memory_warning(cldev->page_bfile, 0);
657     if (cldev->page_cfile != 0)
658 	clist_set_memory_warning(cldev->page_cfile, 0);
659 
660 #ifdef DEBUG
661     if (gs_debug_c('l') | gs_debug_c(':'))
662 	dlprintf2("[:]clist_end_page at cfile=%ld, bfile=%ld\n",
663 		  cb.pos, cldev->page_bfile_end_pos);
664 #endif
665     return 0;
666 }
667 
668 /* Compute the set of used colors in the page_info structure. */
669 void
clist_compute_colors_used(gx_device_clist_writer * cldev)670 clist_compute_colors_used(gx_device_clist_writer *cldev)
671 {
672     int nbands = cldev->nbands;
673     int bands_per_colors_used =
674 	(nbands + PAGE_INFO_NUM_COLORS_USED - 1) /
675 	PAGE_INFO_NUM_COLORS_USED;
676     int band;
677 
678     cldev->page_info.scan_lines_per_colors_used =
679 	cldev->page_band_height * bands_per_colors_used;
680     memset(cldev->page_info.band_colors_used, 0,
681 	   sizeof(cldev->page_info.band_colors_used));
682     for (band = 0; band < nbands; ++band) {
683 	int entry = band / bands_per_colors_used;
684 
685 	cldev->page_info.band_colors_used[entry].or |=
686 	    cldev->states[band].colors_used.or;
687 	cldev->page_info.band_colors_used[entry].slow_rop |=
688 	    cldev->states[band].colors_used.slow_rop;
689     }
690 }
691 
692 /* Recover recoverable VM error if possible without flushing */
693 int	/* ret -ve err, >= 0 if recovered w/# = cnt pages left in page queue */
clist_VMerror_recover(gx_device_clist_writer * cldev,int old_error_code)694 clist_VMerror_recover(gx_device_clist_writer *cldev,
695 		      int old_error_code)
696 {
697     int code = old_error_code;
698     int pages_remain;
699 
700     if (!clist_test_VMerror_recoverable(cldev) ||
701 	!cldev->error_is_retryable ||
702 	old_error_code != gs_error_VMerror
703 	)
704 	return old_error_code;
705 
706     /* Do some rendering, return if enough memory is now free */
707     do {
708 	pages_remain =
709 	    (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, false );
710 	if (pages_remain < 0) {
711 	    code = pages_remain;	/* abort, error or interrupt req */
712 	    break;
713 	}
714 	if (clist_reinit_output_file( (gx_device *)cldev ) == 0) {
715 	    code = pages_remain;	/* got enough memory to continue */
716 	    break;
717 	}
718     } while (pages_remain);
719 
720     if_debug1('L', "[L]soft flush of command list, status: %d\n", code);
721     return code;
722 }
723 
724 /* If recoverable VM error, flush & try to recover it */
725 int	/* ret 0 ok, else -ve error */
clist_VMerror_recover_flush(gx_device_clist_writer * cldev,int old_error_code)726 clist_VMerror_recover_flush(gx_device_clist_writer *cldev,
727 			    int old_error_code)
728 {
729     int free_code = 0;
730     int reset_code = 0;
731     int code;
732 
733     /* If the device has the ability to render partial pages, flush
734      * out the bandlist, and reset the writing state. Then, get the
735      * device to render this band. When done, see if there's now enough
736      * memory to satisfy the minimum low-memory guarantees. If not,
737      * get the device to render some more. If there's nothing left to
738      * render & still insufficient memory, declare an error condition.
739      */
740     if (!clist_test_VMerror_recoverable(cldev) ||
741 	old_error_code != gs_error_VMerror
742 	)
743 	return old_error_code;	/* sorry, don't have any means to recover this error */
744     free_code = (*cldev->free_up_bandlist_memory)( (gx_device *)cldev, true );
745 
746     /* Reset the state of bands to "don't know anything" */
747     reset_code = clist_reset( (gx_device *)cldev );
748     if (reset_code >= 0)
749 	reset_code = clist_open_output_file( (gx_device *)cldev );
750     if ( reset_code >= 0 &&
751 	 (cldev->disable_mask & clist_disable_pass_thru_params)
752 	 )
753 	reset_code = clist_put_current_params(cldev);
754     if (reset_code < 0) {
755 	cldev->permanent_error = reset_code;
756 	cldev->error_is_retryable = 0;
757     }
758 
759     code = (reset_code < 0 ? reset_code : free_code < 0 ? old_error_code : 0);
760     if_debug1('L', "[L]hard flush of command list, status: %d\n", code);
761     return code;
762 }
763 
764 /* Write the target device's current parameter list */
765 private int	/* ret 0 all ok, -ve error */
clist_put_current_params(gx_device_clist_writer * cldev)766 clist_put_current_params(gx_device_clist_writer *cldev)
767 {
768     gx_device *target = cldev->target;
769     gs_c_param_list param_list;
770     int code;
771 
772     /*
773      * If a put_params call fails, the device will be left in a closed
774      * state, but higher-level code won't notice this fact.  We flag this by
775      * setting permanent_error, which prevents writing to the command list.
776      */
777 
778     if (cldev->permanent_error)
779 	return cldev->permanent_error;
780     gs_c_param_list_write(&param_list, cldev->memory);
781     code = (*dev_proc(target, get_params))
782 	(target, (gs_param_list *)&param_list);
783     if (code >= 0) {
784 	gs_c_param_list_read(&param_list);
785 	code = cmd_put_params( cldev, (gs_param_list *)&param_list );
786     }
787     gs_c_param_list_release(&param_list);
788 
789     return code;
790 }
791 
792 /* ---------------- Driver interface ---------------- */
793 
794 private int
clist_get_band(gx_device * dev,int y,int * band_start)795 clist_get_band(gx_device * dev, int y, int *band_start)
796 {
797     gx_device_clist_writer * const cdev =
798 	&((gx_device_clist *)dev)->writer;
799     int band_height = cdev->page_band_height;
800     int start;
801 
802     if (y < 0)
803 	y = 0;
804     else if (y >= dev->height)
805 	y = dev->height;
806     *band_start = start = y - y % band_height;
807     return min(dev->height - start, band_height);
808 }
809