1 /* $NetBSD: r128_state.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $ */
2
3 /* r128_state.c -- State support for r128 -*- linux-c -*-
4 * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
5 */
6 /*
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * All Rights Reserved.
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a
11 * copy of this software and associated documentation files (the "Software"),
12 * to deal in the Software without restriction, including without limitation
13 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
14 * and/or sell copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice (including the next
18 * paragraph) shall be included in all copies or substantial portions of the
19 * Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
25 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
26 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
27 * DEALINGS IN THE SOFTWARE.
28 *
29 * Authors:
30 * Gareth Hughes <gareth@valinux.com>
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: r128_state.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $");
35
36 #include <linux/pci.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
39
40 #include <drm/drm_device.h>
41 #include <drm/drm_file.h>
42 #include <drm/drm_print.h>
43 #include <drm/r128_drm.h>
44
45 #include "r128_drv.h"
46
47 /* ================================================================
48 * CCE hardware state programming functions
49 */
50
r128_emit_clip_rects(drm_r128_private_t * dev_priv,struct drm_clip_rect * boxes,int count)51 static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
52 struct drm_clip_rect *boxes, int count)
53 {
54 u32 aux_sc_cntl = 0x00000000;
55 RING_LOCALS;
56 DRM_DEBUG("\n");
57
58 BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
59
60 if (count >= 1) {
61 OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
62 OUT_RING(boxes[0].x1);
63 OUT_RING(boxes[0].x2 - 1);
64 OUT_RING(boxes[0].y1);
65 OUT_RING(boxes[0].y2 - 1);
66
67 aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
68 }
69 if (count >= 2) {
70 OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
71 OUT_RING(boxes[1].x1);
72 OUT_RING(boxes[1].x2 - 1);
73 OUT_RING(boxes[1].y1);
74 OUT_RING(boxes[1].y2 - 1);
75
76 aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
77 }
78 if (count >= 3) {
79 OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
80 OUT_RING(boxes[2].x1);
81 OUT_RING(boxes[2].x2 - 1);
82 OUT_RING(boxes[2].y1);
83 OUT_RING(boxes[2].y2 - 1);
84
85 aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
86 }
87
88 OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
89 OUT_RING(aux_sc_cntl);
90
91 ADVANCE_RING();
92 }
93
r128_emit_core(drm_r128_private_t * dev_priv)94 static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
95 {
96 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
97 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
98 RING_LOCALS;
99 DRM_DEBUG("\n");
100
101 BEGIN_RING(2);
102
103 OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
104 OUT_RING(ctx->scale_3d_cntl);
105
106 ADVANCE_RING();
107 }
108
r128_emit_context(drm_r128_private_t * dev_priv)109 static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
110 {
111 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
112 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
113 RING_LOCALS;
114 DRM_DEBUG("\n");
115
116 BEGIN_RING(13);
117
118 OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
119 OUT_RING(ctx->dst_pitch_offset_c);
120 OUT_RING(ctx->dp_gui_master_cntl_c);
121 OUT_RING(ctx->sc_top_left_c);
122 OUT_RING(ctx->sc_bottom_right_c);
123 OUT_RING(ctx->z_offset_c);
124 OUT_RING(ctx->z_pitch_c);
125 OUT_RING(ctx->z_sten_cntl_c);
126 OUT_RING(ctx->tex_cntl_c);
127 OUT_RING(ctx->misc_3d_state_cntl_reg);
128 OUT_RING(ctx->texture_clr_cmp_clr_c);
129 OUT_RING(ctx->texture_clr_cmp_msk_c);
130 OUT_RING(ctx->fog_color_c);
131
132 ADVANCE_RING();
133 }
134
r128_emit_setup(drm_r128_private_t * dev_priv)135 static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
136 {
137 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
138 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
139 RING_LOCALS;
140 DRM_DEBUG("\n");
141
142 BEGIN_RING(3);
143
144 OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
145 OUT_RING(ctx->setup_cntl);
146 OUT_RING(ctx->pm4_vc_fpu_setup);
147
148 ADVANCE_RING();
149 }
150
r128_emit_masks(drm_r128_private_t * dev_priv)151 static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
152 {
153 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
154 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
155 RING_LOCALS;
156 DRM_DEBUG("\n");
157
158 BEGIN_RING(5);
159
160 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
161 OUT_RING(ctx->dp_write_mask);
162
163 OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
164 OUT_RING(ctx->sten_ref_mask_c);
165 OUT_RING(ctx->plane_3d_mask_c);
166
167 ADVANCE_RING();
168 }
169
r128_emit_window(drm_r128_private_t * dev_priv)170 static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
171 {
172 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
173 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
174 RING_LOCALS;
175 DRM_DEBUG("\n");
176
177 BEGIN_RING(2);
178
179 OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
180 OUT_RING(ctx->window_xy_offset);
181
182 ADVANCE_RING();
183 }
184
r128_emit_tex0(drm_r128_private_t * dev_priv)185 static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
186 {
187 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
188 drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
189 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
190 int i;
191 RING_LOCALS;
192 DRM_DEBUG("\n");
193
194 BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
195
196 OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
197 2 + R128_MAX_TEXTURE_LEVELS));
198 OUT_RING(tex->tex_cntl);
199 OUT_RING(tex->tex_combine_cntl);
200 OUT_RING(ctx->tex_size_pitch_c);
201 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
202 OUT_RING(tex->tex_offset[i]);
203
204 OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
205 OUT_RING(ctx->constant_color_c);
206 OUT_RING(tex->tex_border_color);
207
208 ADVANCE_RING();
209 }
210
r128_emit_tex1(drm_r128_private_t * dev_priv)211 static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
212 {
213 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
214 drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
215 int i;
216 RING_LOCALS;
217 DRM_DEBUG("\n");
218
219 BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
220
221 OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
222 OUT_RING(tex->tex_cntl);
223 OUT_RING(tex->tex_combine_cntl);
224 for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
225 OUT_RING(tex->tex_offset[i]);
226
227 OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
228 OUT_RING(tex->tex_border_color);
229
230 ADVANCE_RING();
231 }
232
r128_emit_state(drm_r128_private_t * dev_priv)233 static void r128_emit_state(drm_r128_private_t *dev_priv)
234 {
235 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
236 unsigned int dirty = sarea_priv->dirty;
237
238 DRM_DEBUG("dirty=0x%08x\n", dirty);
239
240 if (dirty & R128_UPLOAD_CORE) {
241 r128_emit_core(dev_priv);
242 sarea_priv->dirty &= ~R128_UPLOAD_CORE;
243 }
244
245 if (dirty & R128_UPLOAD_CONTEXT) {
246 r128_emit_context(dev_priv);
247 sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
248 }
249
250 if (dirty & R128_UPLOAD_SETUP) {
251 r128_emit_setup(dev_priv);
252 sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
253 }
254
255 if (dirty & R128_UPLOAD_MASKS) {
256 r128_emit_masks(dev_priv);
257 sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
258 }
259
260 if (dirty & R128_UPLOAD_WINDOW) {
261 r128_emit_window(dev_priv);
262 sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
263 }
264
265 if (dirty & R128_UPLOAD_TEX0) {
266 r128_emit_tex0(dev_priv);
267 sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
268 }
269
270 if (dirty & R128_UPLOAD_TEX1) {
271 r128_emit_tex1(dev_priv);
272 sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
273 }
274
275 /* Turn off the texture cache flushing */
276 sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
277
278 sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
279 }
280
281 #if R128_PERFORMANCE_BOXES
282 /* ================================================================
283 * Performance monitoring functions
284 */
285
r128_clear_box(drm_r128_private_t * dev_priv,int x,int y,int w,int h,int r,int g,int b)286 static void r128_clear_box(drm_r128_private_t *dev_priv,
287 int x, int y, int w, int h, int r, int g, int b)
288 {
289 u32 pitch, offset;
290 u32 fb_bpp, color;
291 RING_LOCALS;
292
293 switch (dev_priv->fb_bpp) {
294 case 16:
295 fb_bpp = R128_GMC_DST_16BPP;
296 color = (((r & 0xf8) << 8) |
297 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
298 break;
299 case 24:
300 fb_bpp = R128_GMC_DST_24BPP;
301 color = ((r << 16) | (g << 8) | b);
302 break;
303 case 32:
304 fb_bpp = R128_GMC_DST_32BPP;
305 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
306 break;
307 default:
308 return;
309 }
310
311 offset = dev_priv->back_offset;
312 pitch = dev_priv->back_pitch >> 3;
313
314 BEGIN_RING(6);
315
316 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
317 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
318 R128_GMC_BRUSH_SOLID_COLOR |
319 fb_bpp |
320 R128_GMC_SRC_DATATYPE_COLOR |
321 R128_ROP3_P |
322 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
323
324 OUT_RING((pitch << 21) | (offset >> 5));
325 OUT_RING(color);
326
327 OUT_RING((x << 16) | y);
328 OUT_RING((w << 16) | h);
329
330 ADVANCE_RING();
331 }
332
r128_cce_performance_boxes(drm_r128_private_t * dev_priv)333 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
334 {
335 if (atomic_read(&dev_priv->idle_count) == 0)
336 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
337 else
338 atomic_set(&dev_priv->idle_count, 0);
339 }
340
341 #endif
342
343 /* ================================================================
344 * CCE command dispatch functions
345 */
346
r128_print_dirty(const char * msg,unsigned int flags)347 static void r128_print_dirty(const char *msg, unsigned int flags)
348 {
349 DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
350 msg,
351 flags,
352 (flags & R128_UPLOAD_CORE) ? "core, " : "",
353 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
354 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
355 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
356 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
357 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
358 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
359 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
360 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
361 }
362
r128_cce_dispatch_clear(struct drm_device * dev,drm_r128_clear_t * clear)363 static void r128_cce_dispatch_clear(struct drm_device *dev,
364 drm_r128_clear_t *clear)
365 {
366 drm_r128_private_t *dev_priv = dev->dev_private;
367 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
368 int nbox = sarea_priv->nbox;
369 struct drm_clip_rect *pbox = sarea_priv->boxes;
370 unsigned int flags = clear->flags;
371 int i;
372 RING_LOCALS;
373 DRM_DEBUG("\n");
374
375 if (dev_priv->page_flipping && dev_priv->current_page == 1) {
376 unsigned int tmp = flags;
377
378 flags &= ~(R128_FRONT | R128_BACK);
379 if (tmp & R128_FRONT)
380 flags |= R128_BACK;
381 if (tmp & R128_BACK)
382 flags |= R128_FRONT;
383 }
384
385 for (i = 0; i < nbox; i++) {
386 int x = pbox[i].x1;
387 int y = pbox[i].y1;
388 int w = pbox[i].x2 - x;
389 int h = pbox[i].y2 - y;
390
391 DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
392 pbox[i].x1, pbox[i].y1, pbox[i].x2,
393 pbox[i].y2, flags);
394
395 if (flags & (R128_FRONT | R128_BACK)) {
396 BEGIN_RING(2);
397
398 OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
399 OUT_RING(clear->color_mask);
400
401 ADVANCE_RING();
402 }
403
404 if (flags & R128_FRONT) {
405 BEGIN_RING(6);
406
407 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
408 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
409 R128_GMC_BRUSH_SOLID_COLOR |
410 (dev_priv->color_fmt << 8) |
411 R128_GMC_SRC_DATATYPE_COLOR |
412 R128_ROP3_P |
413 R128_GMC_CLR_CMP_CNTL_DIS |
414 R128_GMC_AUX_CLIP_DIS);
415
416 OUT_RING(dev_priv->front_pitch_offset_c);
417 OUT_RING(clear->clear_color);
418
419 OUT_RING((x << 16) | y);
420 OUT_RING((w << 16) | h);
421
422 ADVANCE_RING();
423 }
424
425 if (flags & R128_BACK) {
426 BEGIN_RING(6);
427
428 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
429 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
430 R128_GMC_BRUSH_SOLID_COLOR |
431 (dev_priv->color_fmt << 8) |
432 R128_GMC_SRC_DATATYPE_COLOR |
433 R128_ROP3_P |
434 R128_GMC_CLR_CMP_CNTL_DIS |
435 R128_GMC_AUX_CLIP_DIS);
436
437 OUT_RING(dev_priv->back_pitch_offset_c);
438 OUT_RING(clear->clear_color);
439
440 OUT_RING((x << 16) | y);
441 OUT_RING((w << 16) | h);
442
443 ADVANCE_RING();
444 }
445
446 if (flags & R128_DEPTH) {
447 BEGIN_RING(6);
448
449 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
450 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
451 R128_GMC_BRUSH_SOLID_COLOR |
452 (dev_priv->depth_fmt << 8) |
453 R128_GMC_SRC_DATATYPE_COLOR |
454 R128_ROP3_P |
455 R128_GMC_CLR_CMP_CNTL_DIS |
456 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
457
458 OUT_RING(dev_priv->depth_pitch_offset_c);
459 OUT_RING(clear->clear_depth);
460
461 OUT_RING((x << 16) | y);
462 OUT_RING((w << 16) | h);
463
464 ADVANCE_RING();
465 }
466 }
467 }
468
r128_cce_dispatch_swap(struct drm_device * dev)469 static void r128_cce_dispatch_swap(struct drm_device *dev)
470 {
471 drm_r128_private_t *dev_priv = dev->dev_private;
472 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
473 int nbox = sarea_priv->nbox;
474 struct drm_clip_rect *pbox = sarea_priv->boxes;
475 int i;
476 RING_LOCALS;
477 DRM_DEBUG("\n");
478
479 #if R128_PERFORMANCE_BOXES
480 /* Do some trivial performance monitoring...
481 */
482 r128_cce_performance_boxes(dev_priv);
483 #endif
484
485 for (i = 0; i < nbox; i++) {
486 int x = pbox[i].x1;
487 int y = pbox[i].y1;
488 int w = pbox[i].x2 - x;
489 int h = pbox[i].y2 - y;
490
491 BEGIN_RING(7);
492
493 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
494 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
495 R128_GMC_DST_PITCH_OFFSET_CNTL |
496 R128_GMC_BRUSH_NONE |
497 (dev_priv->color_fmt << 8) |
498 R128_GMC_SRC_DATATYPE_COLOR |
499 R128_ROP3_S |
500 R128_DP_SRC_SOURCE_MEMORY |
501 R128_GMC_CLR_CMP_CNTL_DIS |
502 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
503
504 /* Make this work even if front & back are flipped:
505 */
506 if (dev_priv->current_page == 0) {
507 OUT_RING(dev_priv->back_pitch_offset_c);
508 OUT_RING(dev_priv->front_pitch_offset_c);
509 } else {
510 OUT_RING(dev_priv->front_pitch_offset_c);
511 OUT_RING(dev_priv->back_pitch_offset_c);
512 }
513
514 OUT_RING((x << 16) | y);
515 OUT_RING((x << 16) | y);
516 OUT_RING((w << 16) | h);
517
518 ADVANCE_RING();
519 }
520
521 /* Increment the frame counter. The client-side 3D driver must
522 * throttle the framerate by waiting for this value before
523 * performing the swapbuffer ioctl.
524 */
525 dev_priv->sarea_priv->last_frame++;
526
527 BEGIN_RING(2);
528
529 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
530 OUT_RING(dev_priv->sarea_priv->last_frame);
531
532 ADVANCE_RING();
533 }
534
r128_cce_dispatch_flip(struct drm_device * dev)535 static void r128_cce_dispatch_flip(struct drm_device *dev)
536 {
537 drm_r128_private_t *dev_priv = dev->dev_private;
538 RING_LOCALS;
539 DRM_DEBUG("page=%d pfCurrentPage=%d\n",
540 dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
541
542 #if R128_PERFORMANCE_BOXES
543 /* Do some trivial performance monitoring...
544 */
545 r128_cce_performance_boxes(dev_priv);
546 #endif
547
548 BEGIN_RING(4);
549
550 R128_WAIT_UNTIL_PAGE_FLIPPED();
551 OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
552
553 if (dev_priv->current_page == 0)
554 OUT_RING(dev_priv->back_offset);
555 else
556 OUT_RING(dev_priv->front_offset);
557
558 ADVANCE_RING();
559
560 /* Increment the frame counter. The client-side 3D driver must
561 * throttle the framerate by waiting for this value before
562 * performing the swapbuffer ioctl.
563 */
564 dev_priv->sarea_priv->last_frame++;
565 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
566 1 - dev_priv->current_page;
567
568 BEGIN_RING(2);
569
570 OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
571 OUT_RING(dev_priv->sarea_priv->last_frame);
572
573 ADVANCE_RING();
574 }
575
r128_cce_dispatch_vertex(struct drm_device * dev,struct drm_buf * buf)576 static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
577 {
578 drm_r128_private_t *dev_priv = dev->dev_private;
579 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
580 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
581 int format = sarea_priv->vc_format;
582 int offset = buf->bus_address;
583 int size = buf->used;
584 int prim = buf_priv->prim;
585 int i = 0;
586 RING_LOCALS;
587 DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
588
589 if (0)
590 r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
591
592 if (buf->used) {
593 buf_priv->dispatched = 1;
594
595 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
596 r128_emit_state(dev_priv);
597
598 do {
599 /* Emit the next set of up to three cliprects */
600 if (i < sarea_priv->nbox) {
601 r128_emit_clip_rects(dev_priv,
602 &sarea_priv->boxes[i],
603 sarea_priv->nbox - i);
604 }
605
606 /* Emit the vertex buffer rendering commands */
607 BEGIN_RING(5);
608
609 OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
610 OUT_RING(offset);
611 OUT_RING(size);
612 OUT_RING(format);
613 OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
614 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
615
616 ADVANCE_RING();
617
618 i += 3;
619 } while (i < sarea_priv->nbox);
620 }
621
622 if (buf_priv->discard) {
623 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
624
625 /* Emit the vertex buffer age */
626 BEGIN_RING(2);
627
628 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
629 OUT_RING(buf_priv->age);
630
631 ADVANCE_RING();
632
633 buf->pending = 1;
634 buf->used = 0;
635 /* FIXME: Check dispatched field */
636 buf_priv->dispatched = 0;
637 }
638
639 dev_priv->sarea_priv->last_dispatch++;
640
641 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
642 sarea_priv->nbox = 0;
643 }
644
r128_cce_dispatch_indirect(struct drm_device * dev,struct drm_buf * buf,int start,int end)645 static void r128_cce_dispatch_indirect(struct drm_device *dev,
646 struct drm_buf *buf, int start, int end)
647 {
648 drm_r128_private_t *dev_priv = dev->dev_private;
649 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
650 RING_LOCALS;
651 DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
652
653 if (start != end) {
654 int offset = buf->bus_address + start;
655 int dwords = (end - start + 3) / sizeof(u32);
656
657 /* Indirect buffer data must be an even number of
658 * dwords, so if we've been given an odd number we must
659 * pad the data with a Type-2 CCE packet.
660 */
661 if (dwords & 1) {
662 u32 *data = (u32 *)
663 ((char *)dev->agp_buffer_map->handle
664 + buf->offset + start);
665 data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
666 }
667
668 buf_priv->dispatched = 1;
669
670 /* Fire off the indirect buffer */
671 BEGIN_RING(3);
672
673 OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
674 OUT_RING(offset);
675 OUT_RING(dwords);
676
677 ADVANCE_RING();
678 }
679
680 if (buf_priv->discard) {
681 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
682
683 /* Emit the indirect buffer age */
684 BEGIN_RING(2);
685
686 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
687 OUT_RING(buf_priv->age);
688
689 ADVANCE_RING();
690
691 buf->pending = 1;
692 buf->used = 0;
693 /* FIXME: Check dispatched field */
694 buf_priv->dispatched = 0;
695 }
696
697 dev_priv->sarea_priv->last_dispatch++;
698 }
699
r128_cce_dispatch_indices(struct drm_device * dev,struct drm_buf * buf,int start,int end,int count)700 static void r128_cce_dispatch_indices(struct drm_device *dev,
701 struct drm_buf *buf,
702 int start, int end, int count)
703 {
704 drm_r128_private_t *dev_priv = dev->dev_private;
705 drm_r128_buf_priv_t *buf_priv = buf->dev_private;
706 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
707 int format = sarea_priv->vc_format;
708 int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
709 int prim = buf_priv->prim;
710 u32 *data;
711 int dwords;
712 int i = 0;
713 RING_LOCALS;
714 DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
715
716 if (0)
717 r128_print_dirty("dispatch_indices", sarea_priv->dirty);
718
719 if (start != end) {
720 buf_priv->dispatched = 1;
721
722 if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
723 r128_emit_state(dev_priv);
724
725 dwords = (end - start + 3) / sizeof(u32);
726
727 data = (u32 *) ((char *)dev->agp_buffer_map->handle
728 + buf->offset + start);
729
730 data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
731 dwords - 2));
732
733 data[1] = cpu_to_le32(offset);
734 data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
735 data[3] = cpu_to_le32(format);
736 data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
737 (count << 16)));
738
739 if (count & 0x1) {
740 #ifdef __LITTLE_ENDIAN
741 data[dwords - 1] &= 0x0000ffff;
742 #else
743 data[dwords - 1] &= 0xffff0000;
744 #endif
745 }
746
747 do {
748 /* Emit the next set of up to three cliprects */
749 if (i < sarea_priv->nbox) {
750 r128_emit_clip_rects(dev_priv,
751 &sarea_priv->boxes[i],
752 sarea_priv->nbox - i);
753 }
754
755 r128_cce_dispatch_indirect(dev, buf, start, end);
756
757 i += 3;
758 } while (i < sarea_priv->nbox);
759 }
760
761 if (buf_priv->discard) {
762 buf_priv->age = dev_priv->sarea_priv->last_dispatch;
763
764 /* Emit the vertex buffer age */
765 BEGIN_RING(2);
766
767 OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
768 OUT_RING(buf_priv->age);
769
770 ADVANCE_RING();
771
772 buf->pending = 1;
773 /* FIXME: Check dispatched field */
774 buf_priv->dispatched = 0;
775 }
776
777 dev_priv->sarea_priv->last_dispatch++;
778
779 sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
780 sarea_priv->nbox = 0;
781 }
782
r128_cce_dispatch_blit(struct drm_device * dev,struct drm_file * file_priv,drm_r128_blit_t * blit)783 static int r128_cce_dispatch_blit(struct drm_device *dev,
784 struct drm_file *file_priv,
785 drm_r128_blit_t *blit)
786 {
787 drm_r128_private_t *dev_priv = dev->dev_private;
788 struct drm_device_dma *dma = dev->dma;
789 struct drm_buf *buf;
790 drm_r128_buf_priv_t *buf_priv;
791 u32 *data;
792 int dword_shift, dwords;
793 RING_LOCALS;
794 DRM_DEBUG("\n");
795
796 /* The compiler won't optimize away a division by a variable,
797 * even if the only legal values are powers of two. Thus, we'll
798 * use a shift instead.
799 */
800 switch (blit->format) {
801 case R128_DATATYPE_ARGB8888:
802 dword_shift = 0;
803 break;
804 case R128_DATATYPE_ARGB1555:
805 case R128_DATATYPE_RGB565:
806 case R128_DATATYPE_ARGB4444:
807 case R128_DATATYPE_YVYU422:
808 case R128_DATATYPE_VYUY422:
809 dword_shift = 1;
810 break;
811 case R128_DATATYPE_CI8:
812 case R128_DATATYPE_RGB8:
813 dword_shift = 2;
814 break;
815 default:
816 DRM_ERROR("invalid blit format %d\n", blit->format);
817 return -EINVAL;
818 }
819
820 /* Flush the pixel cache, and mark the contents as Read Invalid.
821 * This ensures no pixel data gets mixed up with the texture
822 * data from the host data blit, otherwise part of the texture
823 * image may be corrupted.
824 */
825 BEGIN_RING(2);
826
827 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
828 OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
829
830 ADVANCE_RING();
831
832 /* Dispatch the indirect buffer.
833 */
834 buf = dma->buflist[blit->idx];
835 buf_priv = buf->dev_private;
836
837 if (buf->file_priv != file_priv) {
838 DRM_ERROR("process %d using buffer owned by %p\n",
839 task_pid_nr(current), buf->file_priv);
840 return -EINVAL;
841 }
842 if (buf->pending) {
843 DRM_ERROR("sending pending buffer %d\n", blit->idx);
844 return -EINVAL;
845 }
846
847 buf_priv->discard = 1;
848
849 dwords = (blit->width * blit->height) >> dword_shift;
850
851 data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
852
853 data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
854 data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
855 R128_GMC_BRUSH_NONE |
856 (blit->format << 8) |
857 R128_GMC_SRC_DATATYPE_COLOR |
858 R128_ROP3_S |
859 R128_DP_SRC_SOURCE_HOST_DATA |
860 R128_GMC_CLR_CMP_CNTL_DIS |
861 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
862
863 data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
864 data[3] = cpu_to_le32(0xffffffff);
865 data[4] = cpu_to_le32(0xffffffff);
866 data[5] = cpu_to_le32((blit->y << 16) | blit->x);
867 data[6] = cpu_to_le32((blit->height << 16) | blit->width);
868 data[7] = cpu_to_le32(dwords);
869
870 buf->used = (dwords + 8) * sizeof(u32);
871
872 r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
873
874 /* Flush the pixel cache after the blit completes. This ensures
875 * the texture data is written out to memory before rendering
876 * continues.
877 */
878 BEGIN_RING(2);
879
880 OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
881 OUT_RING(R128_PC_FLUSH_GUI);
882
883 ADVANCE_RING();
884
885 return 0;
886 }
887
888 /* ================================================================
889 * Tiled depth buffer management
890 *
891 * FIXME: These should all set the destination write mask for when we
892 * have hardware stencil support.
893 */
894
r128_cce_dispatch_write_span(struct drm_device * dev,drm_r128_depth_t * depth)895 static int r128_cce_dispatch_write_span(struct drm_device *dev,
896 drm_r128_depth_t *depth)
897 {
898 drm_r128_private_t *dev_priv = dev->dev_private;
899 int count, x, y;
900 u32 *buffer;
901 u8 *mask;
902 int i, buffer_size, mask_size;
903 RING_LOCALS;
904 DRM_DEBUG("\n");
905
906 count = depth->n;
907 if (count > 4096 || count <= 0)
908 return -EMSGSIZE;
909
910 if (copy_from_user(&x, depth->x, sizeof(x)))
911 return -EFAULT;
912 if (copy_from_user(&y, depth->y, sizeof(y)))
913 return -EFAULT;
914
915 buffer_size = depth->n * sizeof(u32);
916 buffer = memdup_user(depth->buffer, buffer_size);
917 if (IS_ERR(buffer))
918 return PTR_ERR(buffer);
919
920 mask_size = depth->n;
921 if (depth->mask) {
922 mask = memdup_user(depth->mask, mask_size);
923 if (IS_ERR(mask)) {
924 kfree(buffer);
925 return PTR_ERR(mask);
926 }
927
928 for (i = 0; i < count; i++, x++) {
929 if (mask[i]) {
930 BEGIN_RING(6);
931
932 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
933 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
934 R128_GMC_BRUSH_SOLID_COLOR |
935 (dev_priv->depth_fmt << 8) |
936 R128_GMC_SRC_DATATYPE_COLOR |
937 R128_ROP3_P |
938 R128_GMC_CLR_CMP_CNTL_DIS |
939 R128_GMC_WR_MSK_DIS);
940
941 OUT_RING(dev_priv->depth_pitch_offset_c);
942 OUT_RING(buffer[i]);
943
944 OUT_RING((x << 16) | y);
945 OUT_RING((1 << 16) | 1);
946
947 ADVANCE_RING();
948 }
949 }
950
951 kfree(mask);
952 } else {
953 for (i = 0; i < count; i++, x++) {
954 BEGIN_RING(6);
955
956 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
957 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
958 R128_GMC_BRUSH_SOLID_COLOR |
959 (dev_priv->depth_fmt << 8) |
960 R128_GMC_SRC_DATATYPE_COLOR |
961 R128_ROP3_P |
962 R128_GMC_CLR_CMP_CNTL_DIS |
963 R128_GMC_WR_MSK_DIS);
964
965 OUT_RING(dev_priv->depth_pitch_offset_c);
966 OUT_RING(buffer[i]);
967
968 OUT_RING((x << 16) | y);
969 OUT_RING((1 << 16) | 1);
970
971 ADVANCE_RING();
972 }
973 }
974
975 kfree(buffer);
976
977 return 0;
978 }
979
r128_cce_dispatch_write_pixels(struct drm_device * dev,drm_r128_depth_t * depth)980 static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
981 drm_r128_depth_t *depth)
982 {
983 drm_r128_private_t *dev_priv = dev->dev_private;
984 int count, *x, *y;
985 u32 *buffer;
986 u8 *mask;
987 int i, xbuf_size, ybuf_size, buffer_size, mask_size;
988 RING_LOCALS;
989 DRM_DEBUG("\n");
990
991 count = depth->n;
992 if (count > 4096 || count <= 0)
993 return -EMSGSIZE;
994
995 xbuf_size = count * sizeof(*x);
996 ybuf_size = count * sizeof(*y);
997 x = memdup_user(depth->x, xbuf_size);
998 if (IS_ERR(x))
999 return PTR_ERR(x);
1000 y = memdup_user(depth->y, ybuf_size);
1001 if (IS_ERR(y)) {
1002 kfree(x);
1003 return PTR_ERR(y);
1004 }
1005 buffer_size = depth->n * sizeof(u32);
1006 buffer = memdup_user(depth->buffer, buffer_size);
1007 if (IS_ERR(buffer)) {
1008 kfree(x);
1009 kfree(y);
1010 return PTR_ERR(buffer);
1011 }
1012
1013 if (depth->mask) {
1014 mask_size = depth->n;
1015 mask = memdup_user(depth->mask, mask_size);
1016 if (IS_ERR(mask)) {
1017 kfree(x);
1018 kfree(y);
1019 kfree(buffer);
1020 return PTR_ERR(mask);
1021 }
1022
1023 for (i = 0; i < count; i++) {
1024 if (mask[i]) {
1025 BEGIN_RING(6);
1026
1027 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1028 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1029 R128_GMC_BRUSH_SOLID_COLOR |
1030 (dev_priv->depth_fmt << 8) |
1031 R128_GMC_SRC_DATATYPE_COLOR |
1032 R128_ROP3_P |
1033 R128_GMC_CLR_CMP_CNTL_DIS |
1034 R128_GMC_WR_MSK_DIS);
1035
1036 OUT_RING(dev_priv->depth_pitch_offset_c);
1037 OUT_RING(buffer[i]);
1038
1039 OUT_RING((x[i] << 16) | y[i]);
1040 OUT_RING((1 << 16) | 1);
1041
1042 ADVANCE_RING();
1043 }
1044 }
1045
1046 kfree(mask);
1047 } else {
1048 for (i = 0; i < count; i++) {
1049 BEGIN_RING(6);
1050
1051 OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
1052 OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
1053 R128_GMC_BRUSH_SOLID_COLOR |
1054 (dev_priv->depth_fmt << 8) |
1055 R128_GMC_SRC_DATATYPE_COLOR |
1056 R128_ROP3_P |
1057 R128_GMC_CLR_CMP_CNTL_DIS |
1058 R128_GMC_WR_MSK_DIS);
1059
1060 OUT_RING(dev_priv->depth_pitch_offset_c);
1061 OUT_RING(buffer[i]);
1062
1063 OUT_RING((x[i] << 16) | y[i]);
1064 OUT_RING((1 << 16) | 1);
1065
1066 ADVANCE_RING();
1067 }
1068 }
1069
1070 kfree(x);
1071 kfree(y);
1072 kfree(buffer);
1073
1074 return 0;
1075 }
1076
r128_cce_dispatch_read_span(struct drm_device * dev,drm_r128_depth_t * depth)1077 static int r128_cce_dispatch_read_span(struct drm_device *dev,
1078 drm_r128_depth_t *depth)
1079 {
1080 drm_r128_private_t *dev_priv = dev->dev_private;
1081 int count, x, y;
1082 RING_LOCALS;
1083 DRM_DEBUG("\n");
1084
1085 count = depth->n;
1086 if (count > 4096 || count <= 0)
1087 return -EMSGSIZE;
1088
1089 if (copy_from_user(&x, depth->x, sizeof(x)))
1090 return -EFAULT;
1091 if (copy_from_user(&y, depth->y, sizeof(y)))
1092 return -EFAULT;
1093
1094 BEGIN_RING(7);
1095
1096 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1097 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1098 R128_GMC_DST_PITCH_OFFSET_CNTL |
1099 R128_GMC_BRUSH_NONE |
1100 (dev_priv->depth_fmt << 8) |
1101 R128_GMC_SRC_DATATYPE_COLOR |
1102 R128_ROP3_S |
1103 R128_DP_SRC_SOURCE_MEMORY |
1104 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1105
1106 OUT_RING(dev_priv->depth_pitch_offset_c);
1107 OUT_RING(dev_priv->span_pitch_offset_c);
1108
1109 OUT_RING((x << 16) | y);
1110 OUT_RING((0 << 16) | 0);
1111 OUT_RING((count << 16) | 1);
1112
1113 ADVANCE_RING();
1114
1115 return 0;
1116 }
1117
r128_cce_dispatch_read_pixels(struct drm_device * dev,drm_r128_depth_t * depth)1118 static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
1119 drm_r128_depth_t *depth)
1120 {
1121 drm_r128_private_t *dev_priv = dev->dev_private;
1122 int count, *x, *y;
1123 int i, xbuf_size, ybuf_size;
1124 RING_LOCALS;
1125 DRM_DEBUG("\n");
1126
1127 count = depth->n;
1128 if (count > 4096 || count <= 0)
1129 return -EMSGSIZE;
1130
1131 if (count > dev_priv->depth_pitch)
1132 count = dev_priv->depth_pitch;
1133
1134 xbuf_size = count * sizeof(*x);
1135 ybuf_size = count * sizeof(*y);
1136 x = kmalloc(xbuf_size, GFP_KERNEL);
1137 if (x == NULL)
1138 return -ENOMEM;
1139 y = kmalloc(ybuf_size, GFP_KERNEL);
1140 if (y == NULL) {
1141 kfree(x);
1142 return -ENOMEM;
1143 }
1144 if (copy_from_user(x, depth->x, xbuf_size)) {
1145 kfree(x);
1146 kfree(y);
1147 return -EFAULT;
1148 }
1149 if (copy_from_user(y, depth->y, ybuf_size)) {
1150 kfree(x);
1151 kfree(y);
1152 return -EFAULT;
1153 }
1154
1155 for (i = 0; i < count; i++) {
1156 BEGIN_RING(7);
1157
1158 OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
1159 OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
1160 R128_GMC_DST_PITCH_OFFSET_CNTL |
1161 R128_GMC_BRUSH_NONE |
1162 (dev_priv->depth_fmt << 8) |
1163 R128_GMC_SRC_DATATYPE_COLOR |
1164 R128_ROP3_S |
1165 R128_DP_SRC_SOURCE_MEMORY |
1166 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
1167
1168 OUT_RING(dev_priv->depth_pitch_offset_c);
1169 OUT_RING(dev_priv->span_pitch_offset_c);
1170
1171 OUT_RING((x[i] << 16) | y[i]);
1172 OUT_RING((i << 16) | 0);
1173 OUT_RING((1 << 16) | 1);
1174
1175 ADVANCE_RING();
1176 }
1177
1178 kfree(x);
1179 kfree(y);
1180
1181 return 0;
1182 }
1183
1184 /* ================================================================
1185 * Polygon stipple
1186 */
1187
r128_cce_dispatch_stipple(struct drm_device * dev,u32 * stipple)1188 static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
1189 {
1190 drm_r128_private_t *dev_priv = dev->dev_private;
1191 int i;
1192 RING_LOCALS;
1193 DRM_DEBUG("\n");
1194
1195 BEGIN_RING(33);
1196
1197 OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
1198 for (i = 0; i < 32; i++)
1199 OUT_RING(stipple[i]);
1200
1201 ADVANCE_RING();
1202 }
1203
1204 /* ================================================================
1205 * IOCTL functions
1206 */
1207
r128_cce_clear(struct drm_device * dev,void * data,struct drm_file * file_priv)1208 static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
1209 {
1210 drm_r128_private_t *dev_priv = dev->dev_private;
1211 drm_r128_sarea_t *sarea_priv;
1212 drm_r128_clear_t *clear = data;
1213 DRM_DEBUG("\n");
1214
1215 LOCK_TEST_WITH_RETURN(dev, file_priv);
1216
1217 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1218
1219 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1220
1221 sarea_priv = dev_priv->sarea_priv;
1222
1223 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1224 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1225
1226 r128_cce_dispatch_clear(dev, clear);
1227 COMMIT_RING();
1228
1229 /* Make sure we restore the 3D state next time.
1230 */
1231 dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
1232
1233 return 0;
1234 }
1235
r128_do_init_pageflip(struct drm_device * dev)1236 static int r128_do_init_pageflip(struct drm_device *dev)
1237 {
1238 drm_r128_private_t *dev_priv = dev->dev_private;
1239 DRM_DEBUG("\n");
1240
1241 dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
1242 dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
1243
1244 R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
1245 R128_WRITE(R128_CRTC_OFFSET_CNTL,
1246 dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
1247
1248 dev_priv->page_flipping = 1;
1249 dev_priv->current_page = 0;
1250 dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
1251
1252 return 0;
1253 }
1254
r128_do_cleanup_pageflip(struct drm_device * dev)1255 static int r128_do_cleanup_pageflip(struct drm_device *dev)
1256 {
1257 drm_r128_private_t *dev_priv = dev->dev_private;
1258 DRM_DEBUG("\n");
1259
1260 R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
1261 R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
1262
1263 if (dev_priv->current_page != 0) {
1264 r128_cce_dispatch_flip(dev);
1265 COMMIT_RING();
1266 }
1267
1268 dev_priv->page_flipping = 0;
1269 return 0;
1270 }
1271
1272 /* Swapping and flipping are different operations, need different ioctls.
1273 * They can & should be intermixed to support multiple 3d windows.
1274 */
1275
r128_cce_flip(struct drm_device * dev,void * data,struct drm_file * file_priv)1276 static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
1277 {
1278 drm_r128_private_t *dev_priv = dev->dev_private;
1279 DRM_DEBUG("\n");
1280
1281 LOCK_TEST_WITH_RETURN(dev, file_priv);
1282
1283 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1284
1285 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1286
1287 if (!dev_priv->page_flipping)
1288 r128_do_init_pageflip(dev);
1289
1290 r128_cce_dispatch_flip(dev);
1291
1292 COMMIT_RING();
1293 return 0;
1294 }
1295
r128_cce_swap(struct drm_device * dev,void * data,struct drm_file * file_priv)1296 static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
1297 {
1298 drm_r128_private_t *dev_priv = dev->dev_private;
1299 drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
1300 DRM_DEBUG("\n");
1301
1302 LOCK_TEST_WITH_RETURN(dev, file_priv);
1303
1304 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1305
1306 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1307
1308 if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
1309 sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
1310
1311 r128_cce_dispatch_swap(dev);
1312 dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
1313 R128_UPLOAD_MASKS);
1314
1315 COMMIT_RING();
1316 return 0;
1317 }
1318
r128_cce_vertex(struct drm_device * dev,void * data,struct drm_file * file_priv)1319 static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
1320 {
1321 drm_r128_private_t *dev_priv = dev->dev_private;
1322 struct drm_device_dma *dma = dev->dma;
1323 struct drm_buf *buf;
1324 drm_r128_buf_priv_t *buf_priv;
1325 drm_r128_vertex_t *vertex = data;
1326
1327 LOCK_TEST_WITH_RETURN(dev, file_priv);
1328
1329 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1330
1331 DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
1332 task_pid_nr(current), vertex->idx, vertex->count, vertex->discard);
1333
1334 if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
1335 DRM_ERROR("buffer index %d (of %d max)\n",
1336 vertex->idx, dma->buf_count - 1);
1337 return -EINVAL;
1338 }
1339 if (vertex->prim < 0 ||
1340 vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1341 DRM_ERROR("buffer prim %d\n", vertex->prim);
1342 return -EINVAL;
1343 }
1344
1345 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1346 VB_AGE_TEST_WITH_RETURN(dev_priv);
1347
1348 buf = dma->buflist[vertex->idx];
1349 buf_priv = buf->dev_private;
1350
1351 if (buf->file_priv != file_priv) {
1352 DRM_ERROR("process %d using buffer owned by %p\n",
1353 task_pid_nr(current), buf->file_priv);
1354 return -EINVAL;
1355 }
1356 if (buf->pending) {
1357 DRM_ERROR("sending pending buffer %d\n", vertex->idx);
1358 return -EINVAL;
1359 }
1360
1361 buf->used = vertex->count;
1362 buf_priv->prim = vertex->prim;
1363 buf_priv->discard = vertex->discard;
1364
1365 r128_cce_dispatch_vertex(dev, buf);
1366
1367 COMMIT_RING();
1368 return 0;
1369 }
1370
r128_cce_indices(struct drm_device * dev,void * data,struct drm_file * file_priv)1371 static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
1372 {
1373 drm_r128_private_t *dev_priv = dev->dev_private;
1374 struct drm_device_dma *dma = dev->dma;
1375 struct drm_buf *buf;
1376 drm_r128_buf_priv_t *buf_priv;
1377 drm_r128_indices_t *elts = data;
1378 int count;
1379
1380 LOCK_TEST_WITH_RETURN(dev, file_priv);
1381
1382 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1383
1384 DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current),
1385 elts->idx, elts->start, elts->end, elts->discard);
1386
1387 if (elts->idx < 0 || elts->idx >= dma->buf_count) {
1388 DRM_ERROR("buffer index %d (of %d max)\n",
1389 elts->idx, dma->buf_count - 1);
1390 return -EINVAL;
1391 }
1392 if (elts->prim < 0 ||
1393 elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
1394 DRM_ERROR("buffer prim %d\n", elts->prim);
1395 return -EINVAL;
1396 }
1397
1398 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1399 VB_AGE_TEST_WITH_RETURN(dev_priv);
1400
1401 buf = dma->buflist[elts->idx];
1402 buf_priv = buf->dev_private;
1403
1404 if (buf->file_priv != file_priv) {
1405 DRM_ERROR("process %d using buffer owned by %p\n",
1406 task_pid_nr(current), buf->file_priv);
1407 return -EINVAL;
1408 }
1409 if (buf->pending) {
1410 DRM_ERROR("sending pending buffer %d\n", elts->idx);
1411 return -EINVAL;
1412 }
1413
1414 count = (elts->end - elts->start) / sizeof(u16);
1415 elts->start -= R128_INDEX_PRIM_OFFSET;
1416
1417 if (elts->start & 0x7) {
1418 DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
1419 return -EINVAL;
1420 }
1421 if (elts->start < buf->used) {
1422 DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
1423 return -EINVAL;
1424 }
1425
1426 buf->used = elts->end;
1427 buf_priv->prim = elts->prim;
1428 buf_priv->discard = elts->discard;
1429
1430 r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
1431
1432 COMMIT_RING();
1433 return 0;
1434 }
1435
r128_cce_blit(struct drm_device * dev,void * data,struct drm_file * file_priv)1436 static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
1437 {
1438 struct drm_device_dma *dma = dev->dma;
1439 drm_r128_private_t *dev_priv = dev->dev_private;
1440 drm_r128_blit_t *blit = data;
1441 int ret;
1442
1443 LOCK_TEST_WITH_RETURN(dev, file_priv);
1444
1445 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1446
1447 DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx);
1448
1449 if (blit->idx < 0 || blit->idx >= dma->buf_count) {
1450 DRM_ERROR("buffer index %d (of %d max)\n",
1451 blit->idx, dma->buf_count - 1);
1452 return -EINVAL;
1453 }
1454
1455 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1456 VB_AGE_TEST_WITH_RETURN(dev_priv);
1457
1458 ret = r128_cce_dispatch_blit(dev, file_priv, blit);
1459
1460 COMMIT_RING();
1461 return ret;
1462 }
1463
r128_cce_depth(struct drm_device * dev,void * data,struct drm_file * file_priv)1464 int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
1465 {
1466 drm_r128_private_t *dev_priv = dev->dev_private;
1467 drm_r128_depth_t *depth = data;
1468 int ret;
1469
1470 LOCK_TEST_WITH_RETURN(dev, file_priv);
1471
1472 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1473
1474 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1475
1476 ret = -EINVAL;
1477 switch (depth->func) {
1478 case R128_WRITE_SPAN:
1479 ret = r128_cce_dispatch_write_span(dev, depth);
1480 break;
1481 case R128_WRITE_PIXELS:
1482 ret = r128_cce_dispatch_write_pixels(dev, depth);
1483 break;
1484 case R128_READ_SPAN:
1485 ret = r128_cce_dispatch_read_span(dev, depth);
1486 break;
1487 case R128_READ_PIXELS:
1488 ret = r128_cce_dispatch_read_pixels(dev, depth);
1489 break;
1490 }
1491
1492 COMMIT_RING();
1493 return ret;
1494 }
1495
r128_cce_stipple(struct drm_device * dev,void * data,struct drm_file * file_priv)1496 int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
1497 {
1498 drm_r128_private_t *dev_priv = dev->dev_private;
1499 drm_r128_stipple_t *stipple = data;
1500 u32 mask[32];
1501
1502 LOCK_TEST_WITH_RETURN(dev, file_priv);
1503
1504 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1505
1506 if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
1507 return -EFAULT;
1508
1509 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1510
1511 r128_cce_dispatch_stipple(dev, mask);
1512
1513 COMMIT_RING();
1514 return 0;
1515 }
1516
r128_cce_indirect(struct drm_device * dev,void * data,struct drm_file * file_priv)1517 static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
1518 {
1519 drm_r128_private_t *dev_priv = dev->dev_private;
1520 struct drm_device_dma *dma = dev->dma;
1521 struct drm_buf *buf;
1522 drm_r128_buf_priv_t *buf_priv;
1523 drm_r128_indirect_t *indirect = data;
1524 #if 0
1525 RING_LOCALS;
1526 #endif
1527
1528 LOCK_TEST_WITH_RETURN(dev, file_priv);
1529
1530 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1531
1532 DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
1533 indirect->idx, indirect->start, indirect->end,
1534 indirect->discard);
1535
1536 if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
1537 DRM_ERROR("buffer index %d (of %d max)\n",
1538 indirect->idx, dma->buf_count - 1);
1539 return -EINVAL;
1540 }
1541
1542 buf = dma->buflist[indirect->idx];
1543 buf_priv = buf->dev_private;
1544
1545 if (buf->file_priv != file_priv) {
1546 DRM_ERROR("process %d using buffer owned by %p\n",
1547 task_pid_nr(current), buf->file_priv);
1548 return -EINVAL;
1549 }
1550 if (buf->pending) {
1551 DRM_ERROR("sending pending buffer %d\n", indirect->idx);
1552 return -EINVAL;
1553 }
1554
1555 if (indirect->start < buf->used) {
1556 DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
1557 indirect->start, buf->used);
1558 return -EINVAL;
1559 }
1560
1561 RING_SPACE_TEST_WITH_RETURN(dev_priv);
1562 VB_AGE_TEST_WITH_RETURN(dev_priv);
1563
1564 buf->used = indirect->end;
1565 buf_priv->discard = indirect->discard;
1566
1567 #if 0
1568 /* Wait for the 3D stream to idle before the indirect buffer
1569 * containing 2D acceleration commands is processed.
1570 */
1571 BEGIN_RING(2);
1572 RADEON_WAIT_UNTIL_3D_IDLE();
1573 ADVANCE_RING();
1574 #endif
1575
1576 /* Dispatch the indirect buffer full of commands from the
1577 * X server. This is insecure and is thus only available to
1578 * privileged clients.
1579 */
1580 r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
1581
1582 COMMIT_RING();
1583 return 0;
1584 }
1585
r128_getparam(struct drm_device * dev,void * data,struct drm_file * file_priv)1586 int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1587 {
1588 drm_r128_private_t *dev_priv = dev->dev_private;
1589 drm_r128_getparam_t *param = data;
1590 int value;
1591
1592 DEV_INIT_TEST_WITH_RETURN(dev_priv);
1593
1594 DRM_DEBUG("pid=%d\n", task_pid_nr(current));
1595
1596 switch (param->param) {
1597 case R128_PARAM_IRQ_NR:
1598 value = dev->pdev->irq;
1599 break;
1600 default:
1601 return -EINVAL;
1602 }
1603
1604 if (copy_to_user(param->value, &value, sizeof(int))) {
1605 DRM_ERROR("copy_to_user\n");
1606 return -EFAULT;
1607 }
1608
1609 return 0;
1610 }
1611
r128_driver_preclose(struct drm_device * dev,struct drm_file * file_priv)1612 void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
1613 {
1614 if (dev->dev_private) {
1615 drm_r128_private_t *dev_priv = dev->dev_private;
1616 if (dev_priv->page_flipping)
1617 r128_do_cleanup_pageflip(dev);
1618 }
1619 }
r128_driver_lastclose(struct drm_device * dev)1620 void r128_driver_lastclose(struct drm_device *dev)
1621 {
1622 r128_do_cleanup_cce(dev);
1623 }
1624
1625 const struct drm_ioctl_desc r128_ioctls[] = {
1626 DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1627 DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1628 DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1629 DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1630 DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
1631 DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
1632 DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
1633 DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
1634 DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
1635 DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
1636 DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
1637 DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
1638 DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
1639 DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
1640 DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
1641 DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1642 DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
1643 };
1644
1645 int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
1646