1 /* Copyright (C) 1989, 1996, 1997, 1998, 1999 Aladdin Enterprises. All rights reserved.
2
3 This software is provided AS-IS with no warranty, either express or
4 implied.
5
6 This software is distributed under license and may not be copied,
7 modified or distributed except as expressly authorized under the terms
8 of the license contained in the file LICENSE in this distribution.
9
10 For more information about licensing, please refer to
11 http://www.ghostscript.com/licensing/. For information on
12 commercial licensing, go to http://www.artifex.com/licensing/ or
13 contact Artifex Software, Inc., 101 Lucas Valley Road #110,
14 San Rafael, CA 94903, U.S.A., +1(415)492-9861.
15 */
16
17 /* $Id: gspaint.c,v 1.10 2005/10/12 17:59:55 leonardo Exp $ */
18 /* Painting procedures for Ghostscript library */
19 #include "math_.h" /* for fabs */
20 #include "gx.h"
21 #include "gpcheck.h"
22 #include "gserrors.h"
23 #include "gsropt.h" /* for gxpaint.h */
24 #include "gxfixed.h"
25 #include "gxmatrix.h" /* for gs_state */
26 #include "gspaint.h"
27 #include "gspath.h"
28 #include "gzpath.h"
29 #include "gxpaint.h"
30 #include "gzstate.h"
31 #include "gxdevice.h"
32 #include "gxdevmem.h"
33 #include "gzcpath.h"
34 #include "gxhldevc.h"
35
36 /* Define the nominal size for alpha buffers. */
37 #define abuf_nominal_SMALL 500
38 #define abuf_nominal_LARGE 2000
39 #if arch_small_memory
40 # define abuf_nominal abuf_nominal_SMALL
41 #else
42 # define abuf_nominal\
43 (gs_debug_c('.') ? abuf_nominal_SMALL : abuf_nominal_LARGE)
44 #endif
45
46 /* Erase the page */
47 int
gs_erasepage(gs_state * pgs)48 gs_erasepage(gs_state * pgs)
49 {
50 /*
51 * We can't just fill with device white; we must take the
52 * transfer function into account.
53 */
54 int code;
55
56 if ((code = gs_gsave(pgs)) < 0)
57 return code;
58 if ((code = gs_setgray(pgs, 1.0)) >= 0) {
59 /* Fill the page directly, ignoring clipping. */
60 code = gs_fillpage(pgs);
61 }
62 gs_grestore(pgs);
63 return code;
64 }
65
66 /* Fill the page with the current color. */
67 int
gs_fillpage(gs_state * pgs)68 gs_fillpage(gs_state * pgs)
69 {
70 gx_device *dev;
71 int code = 0;
72 gs_logical_operation_t save_lop;
73 bool hl_color_available = gx_hld_is_hl_color_available((gs_imager_state *)pgs,
74 pgs->dev_color);
75 gx_set_dev_color(pgs);
76 dev = gs_currentdevice(pgs);
77 /* Fill the page directly, ignoring clipping. */
78 /* Use the default RasterOp. */
79 save_lop = pgs->log_op;
80 gs_init_rop(pgs);
81 if (hl_color_available) {
82 gs_fixed_rect rect;
83
84 rect.p.x = rect.p.y = 0;
85 rect.q.x = int2fixed(dev->width);
86 rect.q.y = int2fixed(dev->height);
87 code = dev_proc(pgs->device, fill_rectangle_hl_color)(pgs->device,
88 &rect, (const gs_imager_state *)pgs, pgs->dev_color, NULL);
89 }
90 if (!hl_color_available || code == gs_error_rangecheck)
91 code = gx_fill_rectangle(0, 0, dev->width, dev->height,
92 pgs->dev_color, pgs);
93 pgs->log_op = save_lop;
94 if (code < 0)
95 return code;
96 return (*dev_proc(dev, sync_output)) (dev);
97 }
98
99 /*
100 * Determine the number of bits of alpha buffer for a stroke or fill.
101 * We should do alpha buffering iff this value is >1.
102 */
103 private int
alpha_buffer_bits(gs_state * pgs)104 alpha_buffer_bits(gs_state * pgs)
105 {
106 gx_device *dev;
107
108 if (!color_is_pure(pgs->dev_color))
109 return 0;
110 dev = gs_currentdevice_inline(pgs);
111 if (gs_device_is_abuf(dev)) {
112 /* We're already writing into an alpha buffer. */
113 return 0;
114 }
115 return (*dev_proc(dev, get_alpha_bits))
116 (dev, (pgs->in_cachedevice ? go_text : go_graphics));
117 }
118 /*
119 * Set up an alpha buffer for a stroke or fill operation. Return 0
120 * if no buffer could be allocated, 1 if a buffer was installed,
121 * or the usual negative error code.
122 *
123 * The fill/stroke code sets up a clipping device if needed; however,
124 * since we scale up all the path coordinates, we either need to scale up
125 * the clipping region, or do clipping after, rather than before,
126 * alpha buffering. Either of these is a little inconvenient, but
127 * the former is less inconvenient.
128 */
129 private int
scale_paths(gs_state * pgs,int log2_scale_x,int log2_scale_y,bool do_path)130 scale_paths(gs_state * pgs, int log2_scale_x, int log2_scale_y, bool do_path)
131 {
132 /*
133 * Because of clip and clippath, any of path, clip_path, and view_clip
134 * may be aliases for each other. The only reliable way to detect
135 * this is by comparing the segments pointers. Note that we must
136 * scale the non-segment parts of the paths even if the segments are
137 * aliased.
138 */
139 const gx_path_segments *seg_clip =
140 (pgs->clip_path->path_valid ? pgs->clip_path->path.segments : 0);
141 const gx_clip_rect_list *list_clip = pgs->clip_path->rect_list;
142 const gx_path_segments *seg_view_clip;
143 const gx_clip_rect_list *list_view_clip;
144 const gx_path_segments *seg_effective_clip =
145 (pgs->effective_clip_path->path_valid ?
146 pgs->effective_clip_path->path.segments : 0);
147 const gx_clip_rect_list *list_effective_clip =
148 pgs->effective_clip_path->rect_list;
149
150 gx_cpath_scale_exp2_shared(pgs->clip_path, log2_scale_x, log2_scale_y,
151 false, false);
152 if (pgs->view_clip != 0 && pgs->view_clip != pgs->clip_path) {
153 seg_view_clip =
154 (pgs->view_clip->path_valid ? pgs->view_clip->path.segments : 0);
155 list_view_clip = pgs->view_clip->rect_list;
156 gx_cpath_scale_exp2_shared(pgs->view_clip, log2_scale_x, log2_scale_y,
157 list_view_clip == list_clip,
158 seg_view_clip && seg_view_clip == seg_clip);
159 } else
160 seg_view_clip = 0, list_view_clip = 0;
161 if (pgs->effective_clip_path != pgs->clip_path &&
162 pgs->effective_clip_path != pgs->view_clip
163 )
164 gx_cpath_scale_exp2_shared(pgs->effective_clip_path, log2_scale_x,
165 log2_scale_y,
166 list_effective_clip == list_clip ||
167 list_effective_clip == list_view_clip,
168 seg_effective_clip &&
169 (seg_effective_clip == seg_clip ||
170 seg_effective_clip == seg_view_clip));
171 if (do_path) {
172 const gx_path_segments *seg_path = pgs->path->segments;
173
174 gx_path_scale_exp2_shared(pgs->path, log2_scale_x, log2_scale_y,
175 seg_path == seg_clip ||
176 seg_path == seg_view_clip ||
177 seg_path == seg_effective_clip);
178 }
179 return 0;
180 }
181 private void
scale_dash_pattern(gs_state * pgs,floatp scale)182 scale_dash_pattern(gs_state * pgs, floatp scale)
183 {
184 int i;
185
186 for (i = 0; i < pgs->line_params.dash.pattern_size; ++i)
187 pgs->line_params.dash.pattern[i] *= scale;
188 pgs->line_params.dash.offset *= scale;
189 pgs->line_params.dash.pattern_length *= scale;
190 pgs->line_params.dash.init_dist_left *= scale;
191 if (pgs->line_params.dot_length_absolute)
192 pgs->line_params.dot_length *= scale;
193 }
194 private int
alpha_buffer_init(gs_state * pgs,fixed extra_x,fixed extra_y,int alpha_bits)195 alpha_buffer_init(gs_state * pgs, fixed extra_x, fixed extra_y, int alpha_bits)
196 {
197 gx_device *dev = gs_currentdevice_inline(pgs);
198 int log2_alpha_bits = ilog2(alpha_bits);
199 gs_fixed_rect bbox;
200 gs_int_rect ibox;
201 uint width, raster, band_space;
202 uint height;
203 gs_log2_scale_point log2_scale;
204 gs_memory_t *mem;
205 gx_device_memory *mdev;
206
207 log2_scale.x = log2_scale.y = log2_alpha_bits;
208 gx_path_bbox(pgs->path, &bbox);
209 ibox.p.x = fixed2int(bbox.p.x - extra_x) - 1;
210 ibox.p.y = fixed2int(bbox.p.y - extra_y) - 1;
211 ibox.q.x = fixed2int_ceiling(bbox.q.x + extra_x) + 1;
212 ibox.q.y = fixed2int_ceiling(bbox.q.y + extra_y) + 1;
213 width = (ibox.q.x - ibox.p.x) << log2_scale.x;
214 raster = bitmap_raster(width);
215 band_space = raster << log2_scale.y;
216 height = (abuf_nominal / band_space) << log2_scale.y;
217 if (height == 0)
218 height = 1 << log2_scale.y;
219 mem = pgs->memory;
220 mdev = gs_alloc_struct(mem, gx_device_memory, &st_device_memory,
221 "alpha_buffer_init");
222 if (mdev == 0)
223 return 0; /* if no room, don't buffer */
224 gs_make_mem_abuf_device(mdev, mem, dev, &log2_scale,
225 alpha_bits, ibox.p.x << log2_scale.x);
226 mdev->width = width;
227 mdev->height = height;
228 mdev->bitmap_memory = mem;
229 if ((*dev_proc(mdev, open_device)) ((gx_device *) mdev) < 0) {
230 /* No room for bits, punt. */
231 gs_free_object(mem, mdev, "alpha_buffer_init");
232 return 0;
233 }
234 gx_set_device_only(pgs, (gx_device *) mdev);
235 scale_paths(pgs, log2_scale.x, log2_scale.y, true);
236 return 1;
237 }
238
239 /* Release an alpha buffer. */
240 private void
alpha_buffer_release(gs_state * pgs,bool newpath)241 alpha_buffer_release(gs_state * pgs, bool newpath)
242 {
243 gx_device_memory *mdev =
244 (gx_device_memory *) gs_currentdevice_inline(pgs);
245
246 (*dev_proc(mdev, close_device)) ((gx_device *) mdev);
247 scale_paths(pgs, -mdev->log2_scale.x, -mdev->log2_scale.y,
248 !(newpath && !gx_path_is_shared(pgs->path)));
249 /* Reference counting will free mdev. */
250 gx_set_device_only(pgs, mdev->target);
251 }
252
253 /* Fill the current path using a specified rule. */
254 private int
fill_with_rule(gs_state * pgs,int rule)255 fill_with_rule(gs_state * pgs, int rule)
256 {
257 int code;
258
259 /* If we're inside a charpath, just merge the current path */
260 /* into the parent's path. */
261 if (pgs->in_charpath)
262 code = gx_path_add_char_path(pgs->show_gstate->path, pgs->path,
263 pgs->in_charpath);
264 else if (gs_is_null_device(pgs->device)) {
265 /* Handle separately to prevent gs_state_color_load - bug 688308. */
266 gs_newpath(pgs);
267 code = 0;
268 } else {
269 int abits, acode;
270
271 gx_set_dev_color(pgs);
272 code = gs_state_color_load(pgs);
273 if (code < 0)
274 return code;
275 abits = alpha_buffer_bits(pgs);
276 if (abits > 1) {
277 acode = alpha_buffer_init(pgs, pgs->fill_adjust.x,
278 pgs->fill_adjust.y, abits);
279 if (acode < 0)
280 return acode;
281 } else
282 acode = 0;
283 code = gx_fill_path(pgs->path, pgs->dev_color, pgs, rule,
284 pgs->fill_adjust.x, pgs->fill_adjust.y);
285 if (acode > 0)
286 alpha_buffer_release(pgs, code >= 0);
287 if (code >= 0)
288 gs_newpath(pgs);
289
290 }
291 return code;
292 }
293 /* Fill using the winding number rule */
294 int
gs_fill(gs_state * pgs)295 gs_fill(gs_state * pgs)
296 {
297 return fill_with_rule(pgs, gx_rule_winding_number);
298 }
299 /* Fill using the even/odd rule */
300 int
gs_eofill(gs_state * pgs)301 gs_eofill(gs_state * pgs)
302 {
303 return fill_with_rule(pgs, gx_rule_even_odd);
304 }
305
306 /* Stroke the current path */
307 int
gs_stroke(gs_state * pgs)308 gs_stroke(gs_state * pgs)
309 {
310 int code;
311
312 /*
313 * If we're inside a charpath, just merge the current path
314 * into the parent's path.
315 */
316 if (pgs->in_charpath) {
317 if (pgs->in_charpath == cpm_true_charpath) {
318 /*
319 * A stroke inside a true charpath should do the
320 * equivalent of strokepath.
321 */
322 code = gs_strokepath(pgs);
323 if (code < 0)
324 return code;
325 }
326 code = gx_path_add_char_path(pgs->show_gstate->path, pgs->path,
327 pgs->in_charpath);
328 } else if (gs_is_null_device(pgs->device)) {
329 /* Handle separately to prevent gs_state_color_load. */
330 gs_newpath(pgs);
331 code = 0;
332 } else {
333 int abits, acode;
334
335 gx_set_dev_color(pgs);
336 code = gs_state_color_load(pgs);
337 if (code < 0)
338 return code;
339 abits = alpha_buffer_bits(pgs);
340 if (abits > 1) {
341 /*
342 * Expand the bounding box by the line width.
343 * This is expensive to compute, so we only do it
344 * if we know we're going to buffer.
345 */
346 float xxyy = fabs(pgs->ctm.xx) + fabs(pgs->ctm.yy);
347 float xyyx = fabs(pgs->ctm.xy) + fabs(pgs->ctm.yx);
348 float scale = (float)(1 << (abits / 2));
349 float orig_width = gs_currentlinewidth(pgs);
350 float new_width = orig_width * scale;
351 fixed extra_adjust =
352 float2fixed(max(xxyy, xyyx) * new_width / 2);
353 float orig_flatness = gs_currentflat(pgs);
354 gx_path spath;
355
356 /* Scale up the line width, dash pattern, and flatness. */
357 if (extra_adjust < fixed_1)
358 extra_adjust = fixed_1;
359 acode = alpha_buffer_init(pgs,
360 pgs->fill_adjust.x + extra_adjust,
361 pgs->fill_adjust.y + extra_adjust,
362 abits);
363 if (acode < 0)
364 return acode;
365 gs_setlinewidth(pgs, new_width);
366 scale_dash_pattern(pgs, scale);
367 gs_setflat(pgs, orig_flatness * scale);
368 /*
369 * The alpha-buffer device requires that we fill the
370 * entire path as a single unit.
371 */
372 gx_path_init_local(&spath, pgs->memory);
373 code = gx_stroke_add(pgs->path, &spath, pgs);
374 gs_setlinewidth(pgs, orig_width);
375 scale_dash_pattern(pgs, 1.0 / scale);
376 if (code >= 0)
377 code = gx_fill_path(&spath, pgs->dev_color, pgs,
378 gx_rule_winding_number,
379 pgs->fill_adjust.x,
380 pgs->fill_adjust.y);
381 gs_setflat(pgs, orig_flatness);
382 gx_path_free(&spath, "gs_stroke");
383 if (acode > 0)
384 alpha_buffer_release(pgs, code >= 0);
385 } else
386 code = gx_stroke_fill(pgs->path, pgs);
387 if (code >= 0)
388 gs_newpath(pgs);
389 }
390 return code;
391 }
392
393 /* Compute the stroked outline of the current path */
394 int
gs_strokepath(gs_state * pgs)395 gs_strokepath(gs_state * pgs)
396 {
397 gx_path spath;
398 int code;
399
400 gx_path_init_local(&spath, pgs->path->memory);
401 code = gx_stroke_add(pgs->path, &spath, pgs);
402 if (code < 0) {
403 gx_path_free(&spath, "gs_strokepath");
404 return code;
405 }
406 code = gx_path_assign_free(pgs->path, &spath);
407 if (code < 0)
408 return code;
409 gx_setcurrentpoint(pgs, fixed2float(spath.position.x), fixed2float(spath.position.y));
410 return 0;
411 }
412