1 /* savage_state.c -- State and drawing support for Savage
2 *
3 * Copyright 2004 Felix Kuehling
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #include "drmP.h"
26 #include "savage_drm.h"
27 #include "savage_drv.h"
28
savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,const struct drm_clip_rect * pbox)29 void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
30 const struct drm_clip_rect *pbox)
31 {
32 uint32_t scstart = dev_priv->state.s3d.new_scstart;
33 uint32_t scend = dev_priv->state.s3d.new_scend;
34 scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
35 ((uint32_t)pbox->x1 & 0x000007ff) |
36 (((uint32_t)pbox->y1 << 16) & 0x07ff0000);
37 scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
38 (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
39 ((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);
40 if (scstart != dev_priv->state.s3d.scstart ||
41 scend != dev_priv->state.s3d.scend) {
42 DMA_LOCALS;
43 BEGIN_DMA(4);
44 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
46 DMA_WRITE(scstart);
47 DMA_WRITE(scend);
48 dev_priv->state.s3d.scstart = scstart;
49 dev_priv->state.s3d.scend = scend;
50 dev_priv->waiting = 1;
51 DMA_COMMIT();
52 }
53 }
54
savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,const struct drm_clip_rect * pbox)55 void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
56 const struct drm_clip_rect *pbox)
57 {
58 uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
59 uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
60 drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
61 ((uint32_t)pbox->x1 & 0x000007ff) |
62 (((uint32_t)pbox->y1 << 12) & 0x00fff000);
63 drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
64 (((uint32_t)pbox->x2 - 1) & 0x000007ff) |
65 ((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);
66 if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
67 drawctrl1 != dev_priv->state.s4.drawctrl1) {
68 DMA_LOCALS;
69 BEGIN_DMA(4);
70 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
72 DMA_WRITE(drawctrl0);
73 DMA_WRITE(drawctrl1);
74 dev_priv->state.s4.drawctrl0 = drawctrl0;
75 dev_priv->state.s4.drawctrl1 = drawctrl1;
76 dev_priv->waiting = 1;
77 DMA_COMMIT();
78 }
79 }
80
savage_verify_texaddr(drm_savage_private_t * dev_priv,int unit,uint32_t addr)81 static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
82 uint32_t addr)
83 {
84 if ((addr & 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
86 return -EINVAL;
87 }
88 if (!(addr & 1)) { /* local */
89 addr &= ~7;
90 if (addr < dev_priv->texture_offset ||
91 addr >= dev_priv->texture_offset + dev_priv->texture_size) {
92 DRM_ERROR
93 ("bad texAddr%d %08x (local addr out of range)\n",
94 unit, addr);
95 return -EINVAL;
96 }
97 } else { /* AGP */
98 if (!dev_priv->agp_textures) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
100 unit, addr);
101 return -EINVAL;
102 }
103 addr &= ~7;
104 if (addr < dev_priv->agp_textures->offset ||
105 addr >= (dev_priv->agp_textures->offset +
106 dev_priv->agp_textures->size)) {
107 DRM_ERROR
108 ("bad texAddr%d %08x (AGP addr out of range)\n",
109 unit, addr);
110 return -EINVAL;
111 }
112 }
113 return 0;
114 }
115
116 #define SAVE_STATE(reg,where) \
117 if(start <= reg && start + count > reg) \
118 dev_priv->state.where = regs[reg - start]
119 #define SAVE_STATE_MASK(reg,where,mask) do { \
120 if(start <= reg && start + count > reg) { \
121 uint32_t tmp; \
122 tmp = regs[reg - start]; \
123 dev_priv->state.where = (tmp & (mask)) | \
124 (dev_priv->state.where & ~(mask)); \
125 } \
126 } while (0)
savage_verify_state_s3d(drm_savage_private_t * dev_priv,unsigned int start,unsigned int count,const uint32_t * regs)127 static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
128 unsigned int start, unsigned int count,
129 const uint32_t *regs)
130 {
131 if (start < SAVAGE_TEXPALADDR_S3D ||
132 start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
133 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
134 start, start + count - 1);
135 return -EINVAL;
136 }
137
138 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
139 ~SAVAGE_SCISSOR_MASK_S3D);
140 SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
141 ~SAVAGE_SCISSOR_MASK_S3D);
142
143 /* if any texture regs were changed ... */
144 if (start <= SAVAGE_TEXCTRL_S3D &&
145 start + count > SAVAGE_TEXPALADDR_S3D) {
146 /* ... check texture state */
147 SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
148 SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
149 if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
150 return savage_verify_texaddr(dev_priv, 0,
151 dev_priv->state.s3d.texaddr);
152 }
153
154 return 0;
155 }
156
savage_verify_state_s4(drm_savage_private_t * dev_priv,unsigned int start,unsigned int count,const uint32_t * regs)157 static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
158 unsigned int start, unsigned int count,
159 const uint32_t *regs)
160 {
161 int ret = 0;
162
163 if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
164 start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
165 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
166 start, start + count - 1);
167 return -EINVAL;
168 }
169
170 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
171 ~SAVAGE_SCISSOR_MASK_S4);
172 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
173 ~SAVAGE_SCISSOR_MASK_S4);
174
175 /* if any texture regs were changed ... */
176 if (start <= SAVAGE_TEXDESCR_S4 &&
177 start + count > SAVAGE_TEXPALADDR_S4) {
178 /* ... check texture state */
179 SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
180 SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
181 SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
182 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
183 ret |= savage_verify_texaddr(dev_priv, 0,
184 dev_priv->state.s4.texaddr0);
185 if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
186 ret |= savage_verify_texaddr(dev_priv, 1,
187 dev_priv->state.s4.texaddr1);
188 }
189
190 return ret;
191 }
192 #undef SAVE_STATE
193 #undef SAVE_STATE_MASK
194
savage_dispatch_state(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint32_t * regs)195 static int savage_dispatch_state(drm_savage_private_t *dev_priv,
196 const drm_savage_cmd_header_t *cmd_header,
197 const uint32_t *regs)
198 {
199 unsigned int count = cmd_header->state.count;
200 unsigned int start = cmd_header->state.start;
201 unsigned int count2 = 0;
202 unsigned int bci_size;
203 int ret;
204 DMA_LOCALS;
205
206 if (!count)
207 return 0;
208
209 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
210 ret = savage_verify_state_s3d(dev_priv, start, count, regs);
211 if (ret != 0)
212 return ret;
213 /* scissor regs are emitted in savage_dispatch_draw */
214 if (start < SAVAGE_SCSTART_S3D) {
215 if (start + count > SAVAGE_SCEND_S3D + 1)
216 count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
217 if (start + count > SAVAGE_SCSTART_S3D)
218 count = SAVAGE_SCSTART_S3D - start;
219 } else if (start <= SAVAGE_SCEND_S3D) {
220 if (start + count > SAVAGE_SCEND_S3D + 1) {
221 count -= SAVAGE_SCEND_S3D + 1 - start;
222 start = SAVAGE_SCEND_S3D + 1;
223 } else
224 return 0;
225 }
226 } else {
227 ret = savage_verify_state_s4(dev_priv, start, count, regs);
228 if (ret != 0)
229 return ret;
230 /* scissor regs are emitted in savage_dispatch_draw */
231 if (start < SAVAGE_DRAWCTRL0_S4) {
232 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
233 count2 = count -
234 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
235 if (start + count > SAVAGE_DRAWCTRL0_S4)
236 count = SAVAGE_DRAWCTRL0_S4 - start;
237 } else if (start <= SAVAGE_DRAWCTRL1_S4) {
238 if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
239 count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
240 start = SAVAGE_DRAWCTRL1_S4 + 1;
241 } else
242 return 0;
243 }
244 }
245
246 bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
247
248 if (cmd_header->state.global) {
249 BEGIN_DMA(bci_size + 1);
250 DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
251 dev_priv->waiting = 1;
252 } else {
253 BEGIN_DMA(bci_size);
254 }
255
256 do {
257 while (count > 0) {
258 unsigned int n = count < 255 ? count : 255;
259 DMA_SET_REGISTERS(start, n);
260 DMA_COPY(regs, n);
261 count -= n;
262 start += n;
263 regs += n;
264 }
265 start += 2;
266 regs += 2;
267 count = count2;
268 count2 = 0;
269 } while (count);
270
271 DMA_COMMIT();
272
273 return 0;
274 }
275
savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const struct drm_buf * dmabuf)276 static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
277 const drm_savage_cmd_header_t *cmd_header,
278 const struct drm_buf *dmabuf)
279 {
280 unsigned char reorder = 0;
281 unsigned int prim = cmd_header->prim.prim;
282 unsigned int skip = cmd_header->prim.skip;
283 unsigned int n = cmd_header->prim.count;
284 unsigned int start = cmd_header->prim.start;
285 unsigned int i;
286 BCI_LOCALS;
287
288 if (!dmabuf) {
289 DRM_ERROR("called without dma buffers!\n");
290 return -EINVAL;
291 }
292
293 if (!n)
294 return 0;
295
296 switch (prim) {
297 case SAVAGE_PRIM_TRILIST_201:
298 reorder = 1;
299 prim = SAVAGE_PRIM_TRILIST;
300 /* FALLTHROUGH */
301 case SAVAGE_PRIM_TRILIST:
302 if (n % 3 != 0) {
303 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
304 n);
305 return -EINVAL;
306 }
307 break;
308 case SAVAGE_PRIM_TRISTRIP:
309 case SAVAGE_PRIM_TRIFAN:
310 if (n < 3) {
311 DRM_ERROR
312 ("wrong number of vertices %u in TRIFAN/STRIP\n",
313 n);
314 return -EINVAL;
315 }
316 break;
317 default:
318 DRM_ERROR("invalid primitive type %u\n", prim);
319 return -EINVAL;
320 }
321
322 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
323 if (skip != 0) {
324 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
325 return -EINVAL;
326 }
327 } else {
328 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
329 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
330 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
331 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
332 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
333 return -EINVAL;
334 }
335 if (reorder) {
336 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
337 return -EINVAL;
338 }
339 }
340
341 if (start + n > dmabuf->total / 32) {
342 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
343 start, start + n - 1, dmabuf->total / 32);
344 return -EINVAL;
345 }
346
347 /* Vertex DMA doesn't work with command DMA at the same time,
348 * so we use BCI_... to submit commands here. Flush buffered
349 * faked DMA first. */
350 DMA_FLUSH();
351
352 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
353 BEGIN_BCI(2);
354 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
355 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
356 dev_priv->state.common.vbaddr = dmabuf->bus_address;
357 }
358 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
359 /* Workaround for what looks like a hardware bug. If a
360 * WAIT_3D_IDLE was emitted some time before the
361 * indexed drawing command then the engine will lock
362 * up. There are two known workarounds:
363 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
364 BEGIN_BCI(63);
365 for (i = 0; i < 63; ++i)
366 BCI_WRITE(BCI_CMD_WAIT);
367 dev_priv->waiting = 0;
368 }
369
370 prim <<= 25;
371 while (n != 0) {
372 /* Can emit up to 255 indices (85 triangles) at once. */
373 unsigned int count = n > 255 ? 255 : n;
374 if (reorder) {
375 /* Need to reorder indices for correct flat
376 * shading while preserving the clock sense
377 * for correct culling. Only on Savage3D. */
378 int reorderarr[3] = { -1, -1, -1 };
379 reorderarr[start % 3] = 2;
380
381 BEGIN_BCI((count + 1 + 1) / 2);
382 BCI_DRAW_INDICES_S3D(count, prim, start + 2);
383
384 for (i = start + 1; i + 1 < start + count; i += 2)
385 BCI_WRITE((i + reorderarr[i % 3]) |
386 ((i + 1 +
387 reorderarr[(i + 1) % 3]) << 16));
388 if (i < start + count)
389 BCI_WRITE(i + reorderarr[i % 3]);
390 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
391 BEGIN_BCI((count + 1 + 1) / 2);
392 BCI_DRAW_INDICES_S3D(count, prim, start);
393
394 for (i = start + 1; i + 1 < start + count; i += 2)
395 BCI_WRITE(i | ((i + 1) << 16));
396 if (i < start + count)
397 BCI_WRITE(i);
398 } else {
399 BEGIN_BCI((count + 2 + 1) / 2);
400 BCI_DRAW_INDICES_S4(count, prim, skip);
401
402 for (i = start; i + 1 < start + count; i += 2)
403 BCI_WRITE(i | ((i + 1) << 16));
404 if (i < start + count)
405 BCI_WRITE(i);
406 }
407
408 start += count;
409 n -= count;
410
411 prim |= BCI_CMD_DRAW_CONT;
412 }
413
414 return 0;
415 }
416
savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint32_t * vtxbuf,unsigned int vb_size,unsigned int vb_stride)417 static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
418 const drm_savage_cmd_header_t *cmd_header,
419 const uint32_t *vtxbuf, unsigned int vb_size,
420 unsigned int vb_stride)
421 {
422 unsigned char reorder = 0;
423 unsigned int prim = cmd_header->prim.prim;
424 unsigned int skip = cmd_header->prim.skip;
425 unsigned int n = cmd_header->prim.count;
426 unsigned int start = cmd_header->prim.start;
427 unsigned int vtx_size;
428 unsigned int i;
429 DMA_LOCALS;
430
431 if (!n)
432 return 0;
433
434 switch (prim) {
435 case SAVAGE_PRIM_TRILIST_201:
436 reorder = 1;
437 prim = SAVAGE_PRIM_TRILIST;
438 /* FALLTHROUGH */
439 case SAVAGE_PRIM_TRILIST:
440 if (n % 3 != 0) {
441 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
442 n);
443 return -EINVAL;
444 }
445 break;
446 case SAVAGE_PRIM_TRISTRIP:
447 case SAVAGE_PRIM_TRIFAN:
448 if (n < 3) {
449 DRM_ERROR
450 ("wrong number of vertices %u in TRIFAN/STRIP\n",
451 n);
452 return -EINVAL;
453 }
454 break;
455 default:
456 DRM_ERROR("invalid primitive type %u\n", prim);
457 return -EINVAL;
458 }
459
460 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
461 if (skip > SAVAGE_SKIP_ALL_S3D) {
462 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
463 return -EINVAL;
464 }
465 vtx_size = 8; /* full vertex */
466 } else {
467 if (skip > SAVAGE_SKIP_ALL_S4) {
468 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
469 return -EINVAL;
470 }
471 vtx_size = 10; /* full vertex */
472 }
473
474 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
475 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
476 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
477
478 if (vtx_size > vb_stride) {
479 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
480 vtx_size, vb_stride);
481 return -EINVAL;
482 }
483
484 if (start + n > vb_size / (vb_stride * 4)) {
485 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
486 start, start + n - 1, vb_size / (vb_stride * 4));
487 return -EINVAL;
488 }
489
490 prim <<= 25;
491 while (n != 0) {
492 /* Can emit up to 255 vertices (85 triangles) at once. */
493 unsigned int count = n > 255 ? 255 : n;
494 if (reorder) {
495 /* Need to reorder vertices for correct flat
496 * shading while preserving the clock sense
497 * for correct culling. Only on Savage3D. */
498 int reorderarr[3] = { -1, -1, -1 };
499 reorderarr[start % 3] = 2;
500
501 BEGIN_DMA(count * vtx_size + 1);
502 DMA_DRAW_PRIMITIVE(count, prim, skip);
503
504 for (i = start; i < start + count; ++i) {
505 unsigned int j = i + reorderarr[i % 3];
506 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
507 }
508
509 DMA_COMMIT();
510 } else {
511 BEGIN_DMA(count * vtx_size + 1);
512 DMA_DRAW_PRIMITIVE(count, prim, skip);
513
514 if (vb_stride == vtx_size) {
515 DMA_COPY(&vtxbuf[vb_stride * start],
516 vtx_size * count);
517 } else {
518 for (i = start; i < start + count; ++i) {
519 DMA_COPY(&vtxbuf[vb_stride * i],
520 vtx_size);
521 }
522 }
523
524 DMA_COMMIT();
525 }
526
527 start += count;
528 n -= count;
529
530 prim |= BCI_CMD_DRAW_CONT;
531 }
532
533 return 0;
534 }
535
savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint16_t * idx,const struct drm_buf * dmabuf)536 static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
537 const drm_savage_cmd_header_t *cmd_header,
538 const uint16_t *idx,
539 const struct drm_buf *dmabuf)
540 {
541 unsigned char reorder = 0;
542 unsigned int prim = cmd_header->idx.prim;
543 unsigned int skip = cmd_header->idx.skip;
544 unsigned int n = cmd_header->idx.count;
545 unsigned int i;
546 BCI_LOCALS;
547
548 if (!dmabuf) {
549 DRM_ERROR("called without dma buffers!\n");
550 return -EINVAL;
551 }
552
553 if (!n)
554 return 0;
555
556 switch (prim) {
557 case SAVAGE_PRIM_TRILIST_201:
558 reorder = 1;
559 prim = SAVAGE_PRIM_TRILIST;
560 /* FALLTHROUGH */
561 case SAVAGE_PRIM_TRILIST:
562 if (n % 3 != 0) {
563 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
564 return -EINVAL;
565 }
566 break;
567 case SAVAGE_PRIM_TRISTRIP:
568 case SAVAGE_PRIM_TRIFAN:
569 if (n < 3) {
570 DRM_ERROR
571 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
572 return -EINVAL;
573 }
574 break;
575 default:
576 DRM_ERROR("invalid primitive type %u\n", prim);
577 return -EINVAL;
578 }
579
580 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
581 if (skip != 0) {
582 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
583 return -EINVAL;
584 }
585 } else {
586 unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
587 (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
588 (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
589 if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
590 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
591 return -EINVAL;
592 }
593 if (reorder) {
594 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
595 return -EINVAL;
596 }
597 }
598
599 /* Vertex DMA doesn't work with command DMA at the same time,
600 * so we use BCI_... to submit commands here. Flush buffered
601 * faked DMA first. */
602 DMA_FLUSH();
603
604 if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
605 BEGIN_BCI(2);
606 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
607 BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
608 dev_priv->state.common.vbaddr = dmabuf->bus_address;
609 }
610 if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
611 /* Workaround for what looks like a hardware bug. If a
612 * WAIT_3D_IDLE was emitted some time before the
613 * indexed drawing command then the engine will lock
614 * up. There are two known workarounds:
615 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
616 BEGIN_BCI(63);
617 for (i = 0; i < 63; ++i)
618 BCI_WRITE(BCI_CMD_WAIT);
619 dev_priv->waiting = 0;
620 }
621
622 prim <<= 25;
623 while (n != 0) {
624 /* Can emit up to 255 indices (85 triangles) at once. */
625 unsigned int count = n > 255 ? 255 : n;
626
627 /* check indices */
628 for (i = 0; i < count; ++i) {
629 if (idx[i] > dmabuf->total / 32) {
630 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
631 i, idx[i], dmabuf->total / 32);
632 return -EINVAL;
633 }
634 }
635
636 if (reorder) {
637 /* Need to reorder indices for correct flat
638 * shading while preserving the clock sense
639 * for correct culling. Only on Savage3D. */
640 int reorderarr[3] = { 2, -1, -1 };
641
642 BEGIN_BCI((count + 1 + 1) / 2);
643 BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
644
645 for (i = 1; i + 1 < count; i += 2)
646 BCI_WRITE(idx[i + reorderarr[i % 3]] |
647 (idx[i + 1 +
648 reorderarr[(i + 1) % 3]] << 16));
649 if (i < count)
650 BCI_WRITE(idx[i + reorderarr[i % 3]]);
651 } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
652 BEGIN_BCI((count + 1 + 1) / 2);
653 BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
654
655 for (i = 1; i + 1 < count; i += 2)
656 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
657 if (i < count)
658 BCI_WRITE(idx[i]);
659 } else {
660 BEGIN_BCI((count + 2 + 1) / 2);
661 BCI_DRAW_INDICES_S4(count, prim, skip);
662
663 for (i = 0; i + 1 < count; i += 2)
664 BCI_WRITE(idx[i] | (idx[i + 1] << 16));
665 if (i < count)
666 BCI_WRITE(idx[i]);
667 }
668
669 idx += count;
670 n -= count;
671
672 prim |= BCI_CMD_DRAW_CONT;
673 }
674
675 return 0;
676 }
677
savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const uint16_t * idx,const uint32_t * vtxbuf,unsigned int vb_size,unsigned int vb_stride)678 static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
679 const drm_savage_cmd_header_t *cmd_header,
680 const uint16_t *idx,
681 const uint32_t *vtxbuf,
682 unsigned int vb_size, unsigned int vb_stride)
683 {
684 unsigned char reorder = 0;
685 unsigned int prim = cmd_header->idx.prim;
686 unsigned int skip = cmd_header->idx.skip;
687 unsigned int n = cmd_header->idx.count;
688 unsigned int vtx_size;
689 unsigned int i;
690 DMA_LOCALS;
691
692 if (!n)
693 return 0;
694
695 switch (prim) {
696 case SAVAGE_PRIM_TRILIST_201:
697 reorder = 1;
698 prim = SAVAGE_PRIM_TRILIST;
699 /* FALLTHROUGH */
700 case SAVAGE_PRIM_TRILIST:
701 if (n % 3 != 0) {
702 DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
703 return -EINVAL;
704 }
705 break;
706 case SAVAGE_PRIM_TRISTRIP:
707 case SAVAGE_PRIM_TRIFAN:
708 if (n < 3) {
709 DRM_ERROR
710 ("wrong number of indices %u in TRIFAN/STRIP\n", n);
711 return -EINVAL;
712 }
713 break;
714 default:
715 DRM_ERROR("invalid primitive type %u\n", prim);
716 return -EINVAL;
717 }
718
719 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
720 if (skip > SAVAGE_SKIP_ALL_S3D) {
721 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
722 return -EINVAL;
723 }
724 vtx_size = 8; /* full vertex */
725 } else {
726 if (skip > SAVAGE_SKIP_ALL_S4) {
727 DRM_ERROR("invalid skip flags 0x%04x\n", skip);
728 return -EINVAL;
729 }
730 vtx_size = 10; /* full vertex */
731 }
732
733 vtx_size -= (skip & 1) + (skip >> 1 & 1) +
734 (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
735 (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
736
737 if (vtx_size > vb_stride) {
738 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
739 vtx_size, vb_stride);
740 return -EINVAL;
741 }
742
743 prim <<= 25;
744 while (n != 0) {
745 /* Can emit up to 255 vertices (85 triangles) at once. */
746 unsigned int count = n > 255 ? 255 : n;
747
748 /* Check indices */
749 for (i = 0; i < count; ++i) {
750 if (idx[i] > vb_size / (vb_stride * 4)) {
751 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
752 i, idx[i], vb_size / (vb_stride * 4));
753 return -EINVAL;
754 }
755 }
756
757 if (reorder) {
758 /* Need to reorder vertices for correct flat
759 * shading while preserving the clock sense
760 * for correct culling. Only on Savage3D. */
761 int reorderarr[3] = { 2, -1, -1 };
762
763 BEGIN_DMA(count * vtx_size + 1);
764 DMA_DRAW_PRIMITIVE(count, prim, skip);
765
766 for (i = 0; i < count; ++i) {
767 unsigned int j = idx[i + reorderarr[i % 3]];
768 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
769 }
770
771 DMA_COMMIT();
772 } else {
773 BEGIN_DMA(count * vtx_size + 1);
774 DMA_DRAW_PRIMITIVE(count, prim, skip);
775
776 for (i = 0; i < count; ++i) {
777 unsigned int j = idx[i];
778 DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
779 }
780
781 DMA_COMMIT();
782 }
783
784 idx += count;
785 n -= count;
786
787 prim |= BCI_CMD_DRAW_CONT;
788 }
789
790 return 0;
791 }
792
savage_dispatch_clear(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * cmd_header,const drm_savage_cmd_header_t * data,unsigned int nbox,const struct drm_clip_rect * boxes)793 static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
794 const drm_savage_cmd_header_t *cmd_header,
795 const drm_savage_cmd_header_t *data,
796 unsigned int nbox,
797 const struct drm_clip_rect *boxes)
798 {
799 unsigned int flags = cmd_header->clear0.flags;
800 unsigned int clear_cmd;
801 unsigned int i, nbufs;
802 DMA_LOCALS;
803
804 if (nbox == 0)
805 return 0;
806
807 clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
808 BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
809 BCI_CMD_SET_ROP(clear_cmd,0xCC);
810
811 nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
812 ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
813 if (nbufs == 0)
814 return 0;
815
816 if (data->clear1.mask != 0xffffffff) {
817 /* set mask */
818 BEGIN_DMA(2);
819 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
820 DMA_WRITE(data->clear1.mask);
821 DMA_COMMIT();
822 }
823 for (i = 0; i < nbox; ++i) {
824 unsigned int x, y, w, h;
825 unsigned int buf;
826
827 x = boxes[i].x1, y = boxes[i].y1;
828 w = boxes[i].x2 - boxes[i].x1;
829 h = boxes[i].y2 - boxes[i].y1;
830 BEGIN_DMA(nbufs * 6);
831 for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
832 if (!(flags & buf))
833 continue;
834 DMA_WRITE(clear_cmd);
835 switch (buf) {
836 case SAVAGE_FRONT:
837 DMA_WRITE(dev_priv->front_offset);
838 DMA_WRITE(dev_priv->front_bd);
839 break;
840 case SAVAGE_BACK:
841 DMA_WRITE(dev_priv->back_offset);
842 DMA_WRITE(dev_priv->back_bd);
843 break;
844 case SAVAGE_DEPTH:
845 DMA_WRITE(dev_priv->depth_offset);
846 DMA_WRITE(dev_priv->depth_bd);
847 break;
848 }
849 DMA_WRITE(data->clear1.value);
850 DMA_WRITE(BCI_X_Y(x, y));
851 DMA_WRITE(BCI_W_H(w, h));
852 }
853 DMA_COMMIT();
854 }
855 if (data->clear1.mask != 0xffffffff) {
856 /* reset mask */
857 BEGIN_DMA(2);
858 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
859 DMA_WRITE(0xffffffff);
860 DMA_COMMIT();
861 }
862
863 return 0;
864 }
865
savage_dispatch_swap(drm_savage_private_t * dev_priv,unsigned int nbox,const struct drm_clip_rect * boxes)866 static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
867 unsigned int nbox, const struct drm_clip_rect *boxes)
868 {
869 unsigned int swap_cmd;
870 unsigned int i;
871 DMA_LOCALS;
872
873 if (nbox == 0)
874 return 0;
875
876 swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
877 BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
878 BCI_CMD_SET_ROP(swap_cmd,0xCC);
879
880 for (i = 0; i < nbox; ++i) {
881 BEGIN_DMA(6);
882 DMA_WRITE(swap_cmd);
883 DMA_WRITE(dev_priv->back_offset);
884 DMA_WRITE(dev_priv->back_bd);
885 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
886 DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
887 DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
888 boxes[i].y2 - boxes[i].y1));
889 DMA_COMMIT();
890 }
891
892 return 0;
893 }
894
savage_dispatch_draw(drm_savage_private_t * dev_priv,const drm_savage_cmd_header_t * start,const drm_savage_cmd_header_t * end,const struct drm_buf * dmabuf,const unsigned int * vtxbuf,unsigned int vb_size,unsigned int vb_stride,unsigned int nbox,const struct drm_clip_rect * boxes)895 static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
896 const drm_savage_cmd_header_t *start,
897 const drm_savage_cmd_header_t *end,
898 const struct drm_buf *dmabuf,
899 const unsigned int *vtxbuf,
900 unsigned int vb_size, unsigned int vb_stride,
901 unsigned int nbox,
902 const struct drm_clip_rect *boxes)
903 {
904 unsigned int i, j;
905 int ret;
906
907 for (i = 0; i < nbox; ++i) {
908 const drm_savage_cmd_header_t *cmdbuf;
909 dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
910
911 cmdbuf = start;
912 while (cmdbuf < end) {
913 drm_savage_cmd_header_t cmd_header;
914 cmd_header = *cmdbuf;
915 cmdbuf++;
916 switch (cmd_header.cmd.cmd) {
917 case SAVAGE_CMD_DMA_PRIM:
918 ret = savage_dispatch_dma_prim(
919 dev_priv, &cmd_header, dmabuf);
920 break;
921 case SAVAGE_CMD_VB_PRIM:
922 ret = savage_dispatch_vb_prim(
923 dev_priv, &cmd_header,
924 vtxbuf, vb_size, vb_stride);
925 break;
926 case SAVAGE_CMD_DMA_IDX:
927 j = (cmd_header.idx.count + 3) / 4;
928 /* j was check in savage_bci_cmdbuf */
929 ret = savage_dispatch_dma_idx(dev_priv,
930 &cmd_header, (const uint16_t *)cmdbuf,
931 dmabuf);
932 cmdbuf += j;
933 break;
934 case SAVAGE_CMD_VB_IDX:
935 j = (cmd_header.idx.count + 3) / 4;
936 /* j was check in savage_bci_cmdbuf */
937 ret = savage_dispatch_vb_idx(dev_priv,
938 &cmd_header, (const uint16_t *)cmdbuf,
939 (const uint32_t *)vtxbuf, vb_size,
940 vb_stride);
941 cmdbuf += j;
942 break;
943 default:
944 /* What's the best return code? EFAULT? */
945 DRM_ERROR("IMPLEMENTATION ERROR: "
946 "non-drawing-command %d\n",
947 cmd_header.cmd.cmd);
948 return -EINVAL;
949 }
950
951 if (ret != 0)
952 return ret;
953 }
954 }
955
956 return 0;
957 }
958
savage_bci_cmdbuf(struct drm_device * dev,void * data,struct drm_file * file_priv)959 int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
960 {
961 drm_savage_private_t *dev_priv = dev->dev_private;
962 struct drm_device_dma *dma = dev->dma;
963 struct drm_buf *dmabuf;
964 drm_savage_cmdbuf_t *cmdbuf = data;
965 drm_savage_cmd_header_t *kcmd_addr = NULL;
966 drm_savage_cmd_header_t *first_draw_cmd;
967 unsigned int *kvb_addr = NULL;
968 struct drm_clip_rect *kbox_addr = NULL;
969 unsigned int i, j;
970 int ret = 0;
971
972 DRM_DEBUG("\n");
973
974 LOCK_TEST_WITH_RETURN(dev, file_priv);
975
976 if (dma && dma->buflist) {
977 if (cmdbuf->dma_idx > dma->buf_count) {
978 DRM_ERROR
979 ("vertex buffer index %u out of range (0-%u)\n",
980 cmdbuf->dma_idx, dma->buf_count - 1);
981 return -EINVAL;
982 }
983 dmabuf = dma->buflist[cmdbuf->dma_idx];
984 } else {
985 dmabuf = NULL;
986 }
987
988 /* Copy the user buffers into kernel temporary areas. This hasn't been
989 * a performance loss compared to VERIFYAREA_READ/
990 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
991 * for locking on FreeBSD.
992 */
993 if (cmdbuf->size) {
994 kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
995 if (kcmd_addr == NULL)
996 return -ENOMEM;
997
998 if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
999 cmdbuf->size * 8))
1000 {
1001 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1002 return -EFAULT;
1003 }
1004 cmdbuf->cmd_addr = kcmd_addr;
1005 }
1006 if (cmdbuf->vb_size) {
1007 kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
1008 if (kvb_addr == NULL) {
1009 ret = -ENOMEM;
1010 goto done;
1011 }
1012
1013 if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
1014 cmdbuf->vb_size)) {
1015 ret = -EFAULT;
1016 goto done;
1017 }
1018 cmdbuf->vb_addr = kvb_addr;
1019 }
1020 if (cmdbuf->nbox) {
1021 kbox_addr = drm_alloc(cmdbuf->nbox *
1022 sizeof(struct drm_clip_rect),
1023 DRM_MEM_DRIVER);
1024 if (kbox_addr == NULL) {
1025 ret = -ENOMEM;
1026 goto done;
1027 }
1028
1029 if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
1030 cmdbuf->nbox *
1031 sizeof(struct drm_clip_rect))) {
1032 ret = -EFAULT;
1033 goto done;
1034 }
1035 cmdbuf->box_addr = kbox_addr;
1036 }
1037
1038 /* Make sure writes to DMA buffers are finished before sending
1039 * DMA commands to the graphics hardware. */
1040 DRM_MEMORYBARRIER();
1041
1042 /* Coming from user space. Don't know if the Xserver has
1043 * emitted wait commands. Assuming the worst. */
1044 dev_priv->waiting = 1;
1045
1046 i = 0;
1047 first_draw_cmd = NULL;
1048 while (i < cmdbuf->size) {
1049 drm_savage_cmd_header_t cmd_header;
1050 cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
1051 cmdbuf->cmd_addr++;
1052 i++;
1053
1054 /* Group drawing commands with same state to minimize
1055 * iterations over clip rects. */
1056 j = 0;
1057 switch (cmd_header.cmd.cmd) {
1058 case SAVAGE_CMD_DMA_IDX:
1059 case SAVAGE_CMD_VB_IDX:
1060 j = (cmd_header.idx.count + 3) / 4;
1061 if (i + j > cmdbuf->size) {
1062 DRM_ERROR("indexed drawing command extends "
1063 "beyond end of command buffer\n");
1064 DMA_FLUSH();
1065 return -EINVAL;
1066 }
1067 /* FALLTHROUGH */
1068 case SAVAGE_CMD_DMA_PRIM:
1069 case SAVAGE_CMD_VB_PRIM:
1070 if (!first_draw_cmd)
1071 first_draw_cmd = cmdbuf->cmd_addr - 1;
1072 cmdbuf->cmd_addr += j;
1073 i += j;
1074 break;
1075 default:
1076 if (first_draw_cmd) {
1077 ret = savage_dispatch_draw(
1078 dev_priv, first_draw_cmd,
1079 cmdbuf->cmd_addr - 1,
1080 dmabuf, cmdbuf->vb_addr,
1081 cmdbuf->vb_size,
1082 cmdbuf->vb_stride,
1083 cmdbuf->nbox, cmdbuf->box_addr);
1084 if (ret != 0)
1085 return ret;
1086 first_draw_cmd = NULL;
1087 }
1088 }
1089 if (first_draw_cmd)
1090 continue;
1091
1092 switch (cmd_header.cmd.cmd) {
1093 case SAVAGE_CMD_STATE:
1094 j = (cmd_header.state.count + 1) / 2;
1095 if (i + j > cmdbuf->size) {
1096 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1097 "beyond end of command buffer\n");
1098 DMA_FLUSH();
1099 ret = -EINVAL;
1100 goto done;
1101 }
1102 ret = savage_dispatch_state(dev_priv, &cmd_header,
1103 (const uint32_t *)cmdbuf->cmd_addr);
1104 cmdbuf->cmd_addr += j;
1105 i += j;
1106 break;
1107 case SAVAGE_CMD_CLEAR:
1108 if (i + 1 > cmdbuf->size) {
1109 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1110 "beyond end of command buffer\n");
1111 DMA_FLUSH();
1112 ret = -EINVAL;
1113 goto done;
1114 }
1115 ret = savage_dispatch_clear(dev_priv, &cmd_header,
1116 cmdbuf->cmd_addr,
1117 cmdbuf->nbox,
1118 cmdbuf->box_addr);
1119 cmdbuf->cmd_addr++;
1120 i++;
1121 break;
1122 case SAVAGE_CMD_SWAP:
1123 ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
1124 cmdbuf->box_addr);
1125 break;
1126 default:
1127 DRM_ERROR("invalid command 0x%x\n",
1128 cmd_header.cmd.cmd);
1129 DMA_FLUSH();
1130 ret = -EINVAL;
1131 goto done;
1132 }
1133
1134 if (ret != 0) {
1135 DMA_FLUSH();
1136 goto done;
1137 }
1138 }
1139
1140 if (first_draw_cmd) {
1141 ret = savage_dispatch_draw(
1142 dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
1143 cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
1144 cmdbuf->nbox, cmdbuf->box_addr);
1145 if (ret != 0) {
1146 DMA_FLUSH();
1147 goto done;
1148 }
1149 }
1150
1151 DMA_FLUSH();
1152
1153 if (dmabuf && cmdbuf->discard) {
1154 drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
1155 uint16_t event;
1156 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1157 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1158 savage_freelist_put(dev, dmabuf);
1159 }
1160
1161 done:
1162 /* If we didn't need to allocate them, these'll be NULL */
1163 drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
1164 drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
1165 drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
1166 DRM_MEM_DRIVER);
1167
1168 return ret;
1169 }
1170