xref: /openbsd-src/sys/dev/pci/drm/radeon/r600_cs.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: r600_cs.c,v 1.3 2014/02/15 14:19:44 jsg Exp $	*/
2 /*
3  * Copyright 2008 Advanced Micro Devices, Inc.
4  * Copyright 2008 Red Hat Inc.
5  * Copyright 2009 Jerome Glisse.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11  * and/or sell copies of the Software, and to permit persons to whom the
12  * Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in
15  * all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors: Dave Airlie
26  *          Alex Deucher
27  *          Jerome Glisse
28  */
29 #include <dev/pci/drm/drmP.h>
30 #include "radeon.h"
31 #include "r600d.h"
32 #include "r600_reg_safe.h"
33 
34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
35 					struct radeon_cs_reloc **cs_reloc);
36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 					struct radeon_cs_reloc **cs_reloc);
38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
41 
42 int r600_cs_parse(struct radeon_cs_parser *p);
43 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
44 			unsigned family, u32 *ib, int *l);
45 void r600_cs_legacy_init(void);
46 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
47 			   struct radeon_cs_reloc **cs_reloc);
48 int r600_dma_cs_parse(struct radeon_cs_parser *p);
49 
50 struct r600_cs_track {
51 	/* configuration we miror so that we use same code btw kms/ums */
52 	u32			group_size;
53 	u32			nbanks;
54 	u32			npipes;
55 	/* value we track */
56 	u32			sq_config;
57 	u32			log_nsamples;
58 	u32			nsamples;
59 	u32			cb_color_base_last[8];
60 	struct radeon_bo	*cb_color_bo[8];
61 	u64			cb_color_bo_mc[8];
62 	u64			cb_color_bo_offset[8];
63 	struct radeon_bo	*cb_color_frag_bo[8];
64 	u64			cb_color_frag_offset[8];
65 	struct radeon_bo	*cb_color_tile_bo[8];
66 	u64			cb_color_tile_offset[8];
67 	u32			cb_color_mask[8];
68 	u32			cb_color_info[8];
69 	u32			cb_color_view[8];
70 	u32			cb_color_size_idx[8]; /* unused */
71 	u32			cb_target_mask;
72 	u32			cb_shader_mask;  /* unused */
73 	bool			is_resolve;
74 	u32			cb_color_size[8];
75 	u32			vgt_strmout_en;
76 	u32			vgt_strmout_buffer_en;
77 	struct radeon_bo	*vgt_strmout_bo[4];
78 	u64			vgt_strmout_bo_mc[4]; /* unused */
79 	u32			vgt_strmout_bo_offset[4];
80 	u32			vgt_strmout_size[4];
81 	u32			db_depth_control;
82 	u32			db_depth_info;
83 	u32			db_depth_size_idx;
84 	u32			db_depth_view;
85 	u32			db_depth_size;
86 	u32			db_offset;
87 	struct radeon_bo	*db_bo;
88 	u64			db_bo_mc;
89 	bool			sx_misc_kill_all_prims;
90 	bool			cb_dirty;
91 	bool			db_dirty;
92 	bool			streamout_dirty;
93 	struct radeon_bo	*htile_bo;
94 	u64			htile_offset;
95 	u32			htile_surface;
96 };
97 
98 #define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
99 #define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
100 #define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
101 #define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
102 #define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
103 #define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
104 #define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
105 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
106 
107 struct gpu_formats {
108 	unsigned blockwidth;
109 	unsigned blockheight;
110 	unsigned blocksize;
111 	unsigned valid_color;
112 	enum radeon_family min_family;
113 };
114 
115 static const struct gpu_formats color_formats_table[] = {
116 	/* 8 bit */
117 	FMT_8_BIT(V_038004_COLOR_8, 1),
118 	FMT_8_BIT(V_038004_COLOR_4_4, 1),
119 	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
120 	FMT_8_BIT(V_038004_FMT_1, 0),
121 
122 	/* 16-bit */
123 	FMT_16_BIT(V_038004_COLOR_16, 1),
124 	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
125 	FMT_16_BIT(V_038004_COLOR_8_8, 1),
126 	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
127 	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
128 	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
129 	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
130 	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
131 
132 	/* 24-bit */
133 	FMT_24_BIT(V_038004_FMT_8_8_8),
134 
135 	/* 32-bit */
136 	FMT_32_BIT(V_038004_COLOR_32, 1),
137 	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
138 	FMT_32_BIT(V_038004_COLOR_16_16, 1),
139 	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
140 	FMT_32_BIT(V_038004_COLOR_8_24, 1),
141 	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
142 	FMT_32_BIT(V_038004_COLOR_24_8, 1),
143 	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
144 	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
145 	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
146 	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
147 	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
148 	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
149 	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
150 	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
151 	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
152 	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
153 	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
154 
155 	/* 48-bit */
156 	FMT_48_BIT(V_038004_FMT_16_16_16),
157 	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
158 
159 	/* 64-bit */
160 	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
161 	FMT_64_BIT(V_038004_COLOR_32_32, 1),
162 	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
163 	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
164 	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
165 
166 	FMT_96_BIT(V_038004_FMT_32_32_32),
167 	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
168 
169 	/* 128-bit */
170 	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
171 	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
172 
173 	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
174 	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
175 
176 	/* block compressed formats */
177 	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
178 	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
179 	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
180 	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
181 	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
182 	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
183 	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
184 
185 	/* The other Evergreen formats */
186 	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
187 };
188 
189 bool r600_fmt_is_valid_color(u32 format)
190 {
191 	if (format >= ARRAY_SIZE(color_formats_table))
192 		return false;
193 
194 	if (color_formats_table[format].valid_color)
195 		return true;
196 
197 	return false;
198 }
199 
200 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
201 {
202 	if (format >= ARRAY_SIZE(color_formats_table))
203 		return false;
204 
205 	if (family < color_formats_table[format].min_family)
206 		return false;
207 
208 	if (color_formats_table[format].blockwidth > 0)
209 		return true;
210 
211 	return false;
212 }
213 
214 int r600_fmt_get_blocksize(u32 format)
215 {
216 	if (format >= ARRAY_SIZE(color_formats_table))
217 		return 0;
218 
219 	return color_formats_table[format].blocksize;
220 }
221 
222 int r600_fmt_get_nblocksx(u32 format, u32 w)
223 {
224 	unsigned bw;
225 
226 	if (format >= ARRAY_SIZE(color_formats_table))
227 		return 0;
228 
229 	bw = color_formats_table[format].blockwidth;
230 	if (bw == 0)
231 		return 0;
232 
233 	return (w + bw - 1) / bw;
234 }
235 
236 int r600_fmt_get_nblocksy(u32 format, u32 h)
237 {
238 	unsigned bh;
239 
240 	if (format >= ARRAY_SIZE(color_formats_table))
241 		return 0;
242 
243 	bh = color_formats_table[format].blockheight;
244 	if (bh == 0)
245 		return 0;
246 
247 	return (h + bh - 1) / bh;
248 }
249 
250 struct array_mode_checker {
251 	int array_mode;
252 	u32 group_size;
253 	u32 nbanks;
254 	u32 npipes;
255 	u32 nsamples;
256 	u32 blocksize;
257 };
258 
259 /* returns alignment in pixels for pitch/height/depth and bytes for base */
260 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
261 						u32 *pitch_align,
262 						u32 *height_align,
263 						u32 *depth_align,
264 						u64 *base_align)
265 {
266 	u32 tile_width = 8;
267 	u32 tile_height = 8;
268 	u32 macro_tile_width = values->nbanks;
269 	u32 macro_tile_height = values->npipes;
270 	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
271 	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
272 
273 	switch (values->array_mode) {
274 	case ARRAY_LINEAR_GENERAL:
275 		/* technically tile_width/_height for pitch/height */
276 		*pitch_align = 1; /* tile_width */
277 		*height_align = 1; /* tile_height */
278 		*depth_align = 1;
279 		*base_align = 1;
280 		break;
281 	case ARRAY_LINEAR_ALIGNED:
282 		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
283 		*height_align = 1;
284 		*depth_align = 1;
285 		*base_align = values->group_size;
286 		break;
287 	case ARRAY_1D_TILED_THIN1:
288 		*pitch_align = max((u32)tile_width,
289 				   (u32)(values->group_size /
290 					 (tile_height * values->blocksize * values->nsamples)));
291 		*height_align = tile_height;
292 		*depth_align = 1;
293 		*base_align = values->group_size;
294 		break;
295 	case ARRAY_2D_TILED_THIN1:
296 		*pitch_align = max((u32)macro_tile_width * tile_width,
297 				(u32)((values->group_size * values->nbanks) /
298 				(values->blocksize * values->nsamples * tile_width)));
299 		*height_align = macro_tile_height * tile_height;
300 		*depth_align = 1;
301 		*base_align = max(macro_tile_bytes,
302 				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
303 		break;
304 	default:
305 		return -EINVAL;
306 	}
307 
308 	return 0;
309 }
310 
311 static void r600_cs_track_init(struct r600_cs_track *track)
312 {
313 	int i;
314 
315 	/* assume DX9 mode */
316 	track->sq_config = DX9_CONSTS;
317 	for (i = 0; i < 8; i++) {
318 		track->cb_color_base_last[i] = 0;
319 		track->cb_color_size[i] = 0;
320 		track->cb_color_size_idx[i] = 0;
321 		track->cb_color_info[i] = 0;
322 		track->cb_color_view[i] = 0xFFFFFFFF;
323 		track->cb_color_bo[i] = NULL;
324 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
325 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
326 		track->cb_color_frag_bo[i] = NULL;
327 		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
328 		track->cb_color_tile_bo[i] = NULL;
329 		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
330 		track->cb_color_mask[i] = 0xFFFFFFFF;
331 	}
332 	track->is_resolve = false;
333 	track->nsamples = 16;
334 	track->log_nsamples = 4;
335 	track->cb_target_mask = 0xFFFFFFFF;
336 	track->cb_shader_mask = 0xFFFFFFFF;
337 	track->cb_dirty = true;
338 	track->db_bo = NULL;
339 	track->db_bo_mc = 0xFFFFFFFF;
340 	/* assume the biggest format and that htile is enabled */
341 	track->db_depth_info = 7 | (1 << 25);
342 	track->db_depth_view = 0xFFFFC000;
343 	track->db_depth_size = 0xFFFFFFFF;
344 	track->db_depth_size_idx = 0;
345 	track->db_depth_control = 0xFFFFFFFF;
346 	track->db_dirty = true;
347 	track->htile_bo = NULL;
348 	track->htile_offset = 0xFFFFFFFF;
349 	track->htile_surface = 0;
350 
351 	for (i = 0; i < 4; i++) {
352 		track->vgt_strmout_size[i] = 0;
353 		track->vgt_strmout_bo[i] = NULL;
354 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
355 		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
356 	}
357 	track->streamout_dirty = true;
358 	track->sx_misc_kill_all_prims = false;
359 }
360 
361 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
362 {
363 	struct r600_cs_track *track = p->track;
364 	u32 slice_tile_max, size, tmp;
365 	u32 height, height_align, pitch, pitch_align, depth_align;
366 	u64 base_offset, base_align;
367 	struct array_mode_checker array_check;
368 	volatile u32 *ib = p->ib.ptr;
369 	unsigned array_mode;
370 	u32 format;
371 	/* When resolve is used, the second colorbuffer has always 1 sample. */
372 	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
373 
374 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
375 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
376 	if (!r600_fmt_is_valid_color(format)) {
377 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
378 			 __func__, __LINE__, format,
379 			i, track->cb_color_info[i]);
380 		return -EINVAL;
381 	}
382 	/* pitch in pixels */
383 	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
384 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
385 	slice_tile_max *= 64;
386 	height = slice_tile_max / pitch;
387 	if (height > 8192)
388 		height = 8192;
389 	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
390 
391 	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
392 	array_check.array_mode = array_mode;
393 	array_check.group_size = track->group_size;
394 	array_check.nbanks = track->nbanks;
395 	array_check.npipes = track->npipes;
396 	array_check.nsamples = nsamples;
397 	array_check.blocksize = r600_fmt_get_blocksize(format);
398 	if (r600_get_array_mode_alignment(&array_check,
399 					  &pitch_align, &height_align, &depth_align, &base_align)) {
400 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
401 			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
402 			 track->cb_color_info[i]);
403 		return -EINVAL;
404 	}
405 	switch (array_mode) {
406 	case V_0280A0_ARRAY_LINEAR_GENERAL:
407 		break;
408 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
409 		break;
410 	case V_0280A0_ARRAY_1D_TILED_THIN1:
411 		/* avoid breaking userspace */
412 		if (height > 7)
413 			height &= ~0x7;
414 		break;
415 	case V_0280A0_ARRAY_2D_TILED_THIN1:
416 		break;
417 	default:
418 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
419 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
420 			track->cb_color_info[i]);
421 		return -EINVAL;
422 	}
423 
424 	if (!IS_ALIGNED(pitch, pitch_align)) {
425 		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
426 			 __func__, __LINE__, pitch, pitch_align, array_mode);
427 		return -EINVAL;
428 	}
429 	if (!IS_ALIGNED(height, height_align)) {
430 		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
431 			 __func__, __LINE__, height, height_align, array_mode);
432 		return -EINVAL;
433 	}
434 	if (!IS_ALIGNED(base_offset, base_align)) {
435 		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
436 			 base_offset, base_align, array_mode);
437 		return -EINVAL;
438 	}
439 
440 	/* check offset */
441 	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
442 	      r600_fmt_get_blocksize(format) * nsamples;
443 	switch (array_mode) {
444 	default:
445 	case V_0280A0_ARRAY_LINEAR_GENERAL:
446 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
447 		tmp += track->cb_color_view[i] & 0xFF;
448 		break;
449 	case V_0280A0_ARRAY_1D_TILED_THIN1:
450 	case V_0280A0_ARRAY_2D_TILED_THIN1:
451 		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
452 		break;
453 	}
454 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
455 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
456 			/* the initial DDX does bad things with the CB size occasionally */
457 			/* it rounds up height too far for slice tile max but the BO is smaller */
458 			/* r600c,g also seem to flush at bad times in some apps resulting in
459 			 * bogus values here. So for linear just allow anything to avoid breaking
460 			 * broken userspace.
461 			 */
462 		} else {
463 			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
464 				 __func__, i, array_mode,
465 				 track->cb_color_bo_offset[i], tmp,
466 				 radeon_bo_size(track->cb_color_bo[i]),
467 				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
468 				 r600_fmt_get_nblocksy(format, height),
469 				 r600_fmt_get_blocksize(format));
470 			return -EINVAL;
471 		}
472 	}
473 	/* limit max tile */
474 	tmp = (height * pitch) >> 6;
475 	if (tmp < slice_tile_max)
476 		slice_tile_max = tmp;
477 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
478 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
479 	ib[track->cb_color_size_idx[i]] = tmp;
480 
481 	/* FMASK/CMASK */
482 	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
483 	case V_0280A0_TILE_DISABLE:
484 		break;
485 	case V_0280A0_FRAG_ENABLE:
486 		if (track->nsamples > 1) {
487 			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
488 			/* the tile size is 8x8, but the size is in units of bits.
489 			 * for bytes, do just * 8. */
490 			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
491 
492 			if (bytes + track->cb_color_frag_offset[i] >
493 			    radeon_bo_size(track->cb_color_frag_bo[i])) {
494 				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
495 					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
496 					 __func__, tile_max, bytes,
497 					 track->cb_color_frag_offset[i],
498 					 radeon_bo_size(track->cb_color_frag_bo[i]));
499 				return -EINVAL;
500 			}
501 		}
502 		/* fall through */
503 	case V_0280A0_CLEAR_ENABLE:
504 	{
505 		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
506 		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
507 		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
508 		uint32_t bytes = (block_max + 1) * 128;
509 
510 		if (bytes + track->cb_color_tile_offset[i] >
511 		    radeon_bo_size(track->cb_color_tile_bo[i])) {
512 			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
513 				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
514 				 __func__, block_max, bytes,
515 				 track->cb_color_tile_offset[i],
516 				 radeon_bo_size(track->cb_color_tile_bo[i]));
517 			return -EINVAL;
518 		}
519 		break;
520 	}
521 	default:
522 		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
523 		return -EINVAL;
524 	}
525 	return 0;
526 }
527 
528 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
529 {
530 	struct r600_cs_track *track = p->track;
531 	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
532 	u32 height_align, pitch_align, depth_align;
533 	u32 pitch = 8192;
534 	u32 height = 8192;
535 	u64 base_offset, base_align;
536 	struct array_mode_checker array_check;
537 	int array_mode;
538 	volatile u32 *ib = p->ib.ptr;
539 
540 
541 	if (track->db_bo == NULL) {
542 		dev_warn(p->dev, "z/stencil with no depth buffer\n");
543 		return -EINVAL;
544 	}
545 	switch (G_028010_FORMAT(track->db_depth_info)) {
546 	case V_028010_DEPTH_16:
547 		bpe = 2;
548 		break;
549 	case V_028010_DEPTH_X8_24:
550 	case V_028010_DEPTH_8_24:
551 	case V_028010_DEPTH_X8_24_FLOAT:
552 	case V_028010_DEPTH_8_24_FLOAT:
553 	case V_028010_DEPTH_32_FLOAT:
554 		bpe = 4;
555 		break;
556 	case V_028010_DEPTH_X24_8_32_FLOAT:
557 		bpe = 8;
558 		break;
559 	default:
560 		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
561 		return -EINVAL;
562 	}
563 	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
564 		if (!track->db_depth_size_idx) {
565 			dev_warn(p->dev, "z/stencil buffer size not set\n");
566 			return -EINVAL;
567 		}
568 		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
569 		tmp = (tmp / bpe) >> 6;
570 		if (!tmp) {
571 			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
572 					track->db_depth_size, bpe, track->db_offset,
573 					radeon_bo_size(track->db_bo));
574 			return -EINVAL;
575 		}
576 		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
577 	} else {
578 		size = radeon_bo_size(track->db_bo);
579 		/* pitch in pixels */
580 		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
581 		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
582 		slice_tile_max *= 64;
583 		height = slice_tile_max / pitch;
584 		if (height > 8192)
585 			height = 8192;
586 		base_offset = track->db_bo_mc + track->db_offset;
587 		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
588 		array_check.array_mode = array_mode;
589 		array_check.group_size = track->group_size;
590 		array_check.nbanks = track->nbanks;
591 		array_check.npipes = track->npipes;
592 		array_check.nsamples = track->nsamples;
593 		array_check.blocksize = bpe;
594 		if (r600_get_array_mode_alignment(&array_check,
595 					&pitch_align, &height_align, &depth_align, &base_align)) {
596 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
597 					G_028010_ARRAY_MODE(track->db_depth_info),
598 					track->db_depth_info);
599 			return -EINVAL;
600 		}
601 		switch (array_mode) {
602 		case V_028010_ARRAY_1D_TILED_THIN1:
603 			/* don't break userspace */
604 			height &= ~0x7;
605 			break;
606 		case V_028010_ARRAY_2D_TILED_THIN1:
607 			break;
608 		default:
609 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
610 					G_028010_ARRAY_MODE(track->db_depth_info),
611 					track->db_depth_info);
612 			return -EINVAL;
613 		}
614 
615 		if (!IS_ALIGNED(pitch, pitch_align)) {
616 			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
617 					__func__, __LINE__, pitch, pitch_align, array_mode);
618 			return -EINVAL;
619 		}
620 		if (!IS_ALIGNED(height, height_align)) {
621 			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
622 					__func__, __LINE__, height, height_align, array_mode);
623 			return -EINVAL;
624 		}
625 		if (!IS_ALIGNED(base_offset, base_align)) {
626 			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
627 					base_offset, base_align, array_mode);
628 			return -EINVAL;
629 		}
630 
631 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
632 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
633 		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
634 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
635 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
636 					array_mode,
637 					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
638 					radeon_bo_size(track->db_bo));
639 			return -EINVAL;
640 		}
641 	}
642 
643 	/* hyperz */
644 	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
645 		unsigned long size;
646 		unsigned nbx, nby;
647 
648 		if (track->htile_bo == NULL) {
649 			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
650 				 __func__, __LINE__, track->db_depth_info);
651 			return -EINVAL;
652 		}
653 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
654 			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
655 				 __func__, __LINE__, track->db_depth_size);
656 			return -EINVAL;
657 		}
658 
659 		nbx = pitch;
660 		nby = height;
661 		if (G_028D24_LINEAR(track->htile_surface)) {
662 			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
663 			nbx = roundup2(nbx, 16 * 8);
664 			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
665 			nby = roundup(nby, track->npipes * 8);
666 		} else {
667 			/* always assume 8x8 htile */
668 			/* align is htile align * 8, htile align vary according to
669 			 * number of pipe and tile width and nby
670 			 */
671 			switch (track->npipes) {
672 			case 8:
673 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
674 				nbx = roundup2(nbx, 64 * 8);
675 				nby = roundup2(nby, 64 * 8);
676 				break;
677 			case 4:
678 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
679 				nbx = roundup2(nbx, 64 * 8);
680 				nby = roundup2(nby, 32 * 8);
681 				break;
682 			case 2:
683 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
684 				nbx = roundup2(nbx, 32 * 8);
685 				nby = roundup2(nby, 32 * 8);
686 				break;
687 			case 1:
688 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
689 				nbx = roundup2(nbx, 32 * 8);
690 				nby = roundup2(nby, 16 * 8);
691 				break;
692 			default:
693 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
694 					 __func__, __LINE__, track->npipes);
695 				return -EINVAL;
696 			}
697 		}
698 		/* compute number of htile */
699 		nbx = nbx >> 3;
700 		nby = nby >> 3;
701 		/* size must be aligned on npipes * 2K boundary */
702 		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
703 		size += track->htile_offset;
704 
705 		if (size > radeon_bo_size(track->htile_bo)) {
706 			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
707 				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
708 				 size, nbx, nby);
709 			return -EINVAL;
710 		}
711 	}
712 
713 	track->db_dirty = false;
714 	return 0;
715 }
716 
717 static int r600_cs_track_check(struct radeon_cs_parser *p)
718 {
719 	struct r600_cs_track *track = p->track;
720 	u32 tmp;
721 	int r, i;
722 
723 	/* on legacy kernel we don't perform advanced check */
724 	if (p->rdev == NULL)
725 		return 0;
726 
727 	/* check streamout */
728 	if (track->streamout_dirty && track->vgt_strmout_en) {
729 		for (i = 0; i < 4; i++) {
730 			if (track->vgt_strmout_buffer_en & (1 << i)) {
731 				if (track->vgt_strmout_bo[i]) {
732 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
733 						(u64)track->vgt_strmout_size[i];
734 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
735 						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
736 							  i, offset,
737 							  radeon_bo_size(track->vgt_strmout_bo[i]));
738 						return -EINVAL;
739 					}
740 				} else {
741 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
742 					return -EINVAL;
743 				}
744 			}
745 		}
746 		track->streamout_dirty = false;
747 	}
748 
749 	if (track->sx_misc_kill_all_prims)
750 		return 0;
751 
752 	/* check that we have a cb for each enabled target, we don't check
753 	 * shader_mask because it seems mesa isn't always setting it :(
754 	 */
755 	if (track->cb_dirty) {
756 		tmp = track->cb_target_mask;
757 
758 		/* We must check both colorbuffers for RESOLVE. */
759 		if (track->is_resolve) {
760 			tmp |= 0xff;
761 		}
762 
763 		for (i = 0; i < 8; i++) {
764 			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
765 
766 			if (format != V_0280A0_COLOR_INVALID &&
767 			    (tmp >> (i * 4)) & 0xF) {
768 				/* at least one component is enabled */
769 				if (track->cb_color_bo[i] == NULL) {
770 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
771 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
772 					return -EINVAL;
773 				}
774 				/* perform rewrite of CB_COLOR[0-7]_SIZE */
775 				r = r600_cs_track_validate_cb(p, i);
776 				if (r)
777 					return r;
778 			}
779 		}
780 		track->cb_dirty = false;
781 	}
782 
783 	/* Check depth buffer */
784 	if (track->db_dirty &&
785 	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
786 	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
787 	     G_028800_Z_ENABLE(track->db_depth_control))) {
788 		r = r600_cs_track_validate_db(p);
789 		if (r)
790 			return r;
791 	}
792 
793 	return 0;
794 }
795 
796 /**
797  * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
798  * @parser:	parser structure holding parsing context.
799  * @pkt:	where to store packet informations
800  *
801  * Assume that chunk_ib_index is properly set. Will return -EINVAL
802  * if packet is bigger than remaining ib size. or if packets is unknown.
803  **/
804 static int r600_cs_packet_parse(struct radeon_cs_parser *p,
805 			struct radeon_cs_packet *pkt,
806 			unsigned idx)
807 {
808 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
809 	uint32_t header;
810 
811 	if (idx >= ib_chunk->length_dw) {
812 		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
813 			  idx, ib_chunk->length_dw);
814 		return -EINVAL;
815 	}
816 	header = radeon_get_ib_value(p, idx);
817 	pkt->idx = idx;
818 	pkt->type = CP_PACKET_GET_TYPE(header);
819 	pkt->count = CP_PACKET_GET_COUNT(header);
820 	pkt->one_reg_wr = 0;
821 	switch (pkt->type) {
822 	case PACKET_TYPE0:
823 		pkt->reg = CP_PACKET0_GET_REG(header);
824 		break;
825 	case PACKET_TYPE3:
826 		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
827 		break;
828 	case PACKET_TYPE2:
829 		pkt->count = -1;
830 		break;
831 	default:
832 		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
833 		return -EINVAL;
834 	}
835 	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
836 		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
837 			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
838 		return -EINVAL;
839 	}
840 	return 0;
841 }
842 
843 /**
844  * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
845  * @parser:		parser structure holding parsing context.
846  * @data:		pointer to relocation data
847  * @offset_start:	starting offset
848  * @offset_mask:	offset mask (to align start offset on)
849  * @reloc:		reloc informations
850  *
851  * Check next packet is relocation packet3, do bo validation and compute
852  * GPU offset using the provided start.
853  **/
854 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
855 					struct radeon_cs_reloc **cs_reloc)
856 {
857 	struct radeon_cs_chunk *relocs_chunk;
858 	struct radeon_cs_packet p3reloc;
859 	unsigned idx;
860 	int r;
861 
862 	if (p->chunk_relocs_idx == -1) {
863 		DRM_ERROR("No relocation chunk !\n");
864 		return -EINVAL;
865 	}
866 	*cs_reloc = NULL;
867 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
868 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
869 	if (r) {
870 		return r;
871 	}
872 	p->idx += p3reloc.count + 2;
873 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
874 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
875 			  p3reloc.idx);
876 		return -EINVAL;
877 	}
878 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
879 	if (idx >= relocs_chunk->length_dw) {
880 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
881 			  idx, relocs_chunk->length_dw);
882 		return -EINVAL;
883 	}
884 	/* FIXME: we assume reloc size is 4 dwords */
885 	*cs_reloc = p->relocs_ptr[(idx / 4)];
886 	return 0;
887 }
888 
889 /**
890  * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
891  * @parser:		parser structure holding parsing context.
892  * @data:		pointer to relocation data
893  * @offset_start:	starting offset
894  * @offset_mask:	offset mask (to align start offset on)
895  * @reloc:		reloc informations
896  *
897  * Check next packet is relocation packet3, do bo validation and compute
898  * GPU offset using the provided start.
899  **/
900 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
901 					struct radeon_cs_reloc **cs_reloc)
902 {
903 	struct radeon_cs_chunk *relocs_chunk;
904 	struct radeon_cs_packet p3reloc;
905 	unsigned idx;
906 	int r;
907 
908 	if (p->chunk_relocs_idx == -1) {
909 		DRM_ERROR("No relocation chunk !\n");
910 		return -EINVAL;
911 	}
912 	*cs_reloc = NULL;
913 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
914 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
915 	if (r) {
916 		return r;
917 	}
918 	p->idx += p3reloc.count + 2;
919 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
920 		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
921 			  p3reloc.idx);
922 		return -EINVAL;
923 	}
924 	idx = radeon_get_ib_value(p, p3reloc.idx + 1);
925 	if (idx >= relocs_chunk->length_dw) {
926 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
927 			  idx, relocs_chunk->length_dw);
928 		return -EINVAL;
929 	}
930 	*cs_reloc = p->relocs;
931 	(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
932 	(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
933 	return 0;
934 }
935 
936 /**
937  * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
938  * @parser:		parser structure holding parsing context.
939  *
940  * Check next packet is relocation packet3, do bo validation and compute
941  * GPU offset using the provided start.
942  **/
943 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
944 {
945 	struct radeon_cs_packet p3reloc;
946 	int r;
947 
948 	r = r600_cs_packet_parse(p, &p3reloc, p->idx);
949 	if (r) {
950 		return 0;
951 	}
952 	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
953 		return 0;
954 	}
955 	return 1;
956 }
957 
958 /**
959  * r600_cs_packet_next_vline() - parse userspace VLINE packet
960  * @parser:		parser structure holding parsing context.
961  *
962  * Userspace sends a special sequence for VLINE waits.
963  * PACKET0 - VLINE_START_END + value
964  * PACKET3 - WAIT_REG_MEM poll vline status reg
965  * RELOC (P3) - crtc_id in reloc.
966  *
967  * This function parses this and relocates the VLINE START END
968  * and WAIT_REG_MEM packets to the correct crtc.
969  * It also detects a switched off crtc and nulls out the
970  * wait in that case.
971  */
972 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
973 {
974 	struct drm_mode_object *obj;
975 	struct drm_crtc *crtc;
976 	struct radeon_crtc *radeon_crtc;
977 	struct radeon_cs_packet p3reloc, wait_reg_mem;
978 	int crtc_id;
979 	int r;
980 	uint32_t header, h_idx, reg, wait_reg_mem_info;
981 	volatile uint32_t *ib;
982 
983 	ib = p->ib.ptr;
984 
985 	/* parse the WAIT_REG_MEM */
986 	r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
987 	if (r)
988 		return r;
989 
990 	/* check its a WAIT_REG_MEM */
991 	if (wait_reg_mem.type != PACKET_TYPE3 ||
992 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
993 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
994 		return -EINVAL;
995 	}
996 
997 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
998 	/* bit 4 is reg (0) or mem (1) */
999 	if (wait_reg_mem_info & 0x10) {
1000 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1001 		return -EINVAL;
1002 	}
1003 	/* waiting for value to be equal */
1004 	if ((wait_reg_mem_info & 0x7) != 0x3) {
1005 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1006 		return -EINVAL;
1007 	}
1008 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
1009 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
1014 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1015 		return -EINVAL;
1016 	}
1017 
1018 	/* jump over the NOP */
1019 	r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1020 	if (r)
1021 		return r;
1022 
1023 	h_idx = p->idx - 2;
1024 	p->idx += wait_reg_mem.count + 2;
1025 	p->idx += p3reloc.count + 2;
1026 
1027 	header = radeon_get_ib_value(p, h_idx);
1028 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1029 	reg = CP_PACKET0_GET_REG(header);
1030 
1031 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1032 	if (!obj) {
1033 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1034 		return -EINVAL;
1035 	}
1036 	crtc = obj_to_crtc(obj);
1037 	radeon_crtc = to_radeon_crtc(crtc);
1038 	crtc_id = radeon_crtc->crtc_id;
1039 
1040 	if (!crtc->enabled) {
1041 		/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1042 		ib[h_idx + 2] = PACKET2(0);
1043 		ib[h_idx + 3] = PACKET2(0);
1044 		ib[h_idx + 4] = PACKET2(0);
1045 		ib[h_idx + 5] = PACKET2(0);
1046 		ib[h_idx + 6] = PACKET2(0);
1047 		ib[h_idx + 7] = PACKET2(0);
1048 		ib[h_idx + 8] = PACKET2(0);
1049 	} else if (crtc_id == 1) {
1050 		switch (reg) {
1051 		case AVIVO_D1MODE_VLINE_START_END:
1052 			header &= ~R600_CP_PACKET0_REG_MASK;
1053 			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1054 			break;
1055 		default:
1056 			DRM_ERROR("unknown crtc reloc\n");
1057 			return -EINVAL;
1058 		}
1059 		ib[h_idx] = header;
1060 		ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 static int r600_packet0_check(struct radeon_cs_parser *p,
1067 				struct radeon_cs_packet *pkt,
1068 				unsigned idx, unsigned reg)
1069 {
1070 	int r;
1071 
1072 	switch (reg) {
1073 	case AVIVO_D1MODE_VLINE_START_END:
1074 		r = r600_cs_packet_parse_vline(p);
1075 		if (r) {
1076 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1077 					idx, reg);
1078 			return r;
1079 		}
1080 		break;
1081 	default:
1082 		DRM_ERROR( "Forbidden register 0x%04X in cs at %d\n",
1083 		       reg, idx);
1084 		return -EINVAL;
1085 	}
1086 	return 0;
1087 }
1088 
1089 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
1090 				struct radeon_cs_packet *pkt)
1091 {
1092 	unsigned reg, i;
1093 	unsigned idx;
1094 	int r;
1095 
1096 	idx = pkt->idx + 1;
1097 	reg = pkt->reg;
1098 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1099 		r = r600_packet0_check(p, pkt, idx, reg);
1100 		if (r) {
1101 			return r;
1102 		}
1103 	}
1104 	return 0;
1105 }
1106 
1107 /**
1108  * r600_cs_check_reg() - check if register is authorized or not
1109  * @parser: parser structure holding parsing context
1110  * @reg: register we are testing
1111  * @idx: index into the cs buffer
1112  *
1113  * This function will test against r600_reg_safe_bm and return 0
1114  * if register is safe. If register is not flag as safe this function
1115  * will test it against a list of register needind special handling.
1116  */
1117 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1118 {
1119 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
1120 	struct radeon_cs_reloc *reloc;
1121 	u32 m, i, tmp, *ib;
1122 	int r;
1123 
1124 	i = (reg >> 7);
1125 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1126 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1127 		return -EINVAL;
1128 	}
1129 	m = 1 << ((reg >> 2) & 31);
1130 	if (!(r600_reg_safe_bm[i] & m))
1131 		return 0;
1132 	ib = p->ib.ptr;
1133 	switch (reg) {
1134 	/* force following reg to 0 in an attempt to disable out buffer
1135 	 * which will need us to better understand how it works to perform
1136 	 * security check on it (Jerome)
1137 	 */
1138 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
1139 	case R_008C44_SQ_ESGS_RING_SIZE:
1140 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
1141 	case R_008C54_SQ_ESTMP_RING_SIZE:
1142 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
1143 	case R_008C74_SQ_FBUF_RING_SIZE:
1144 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
1145 	case R_008C5C_SQ_GSTMP_RING_SIZE:
1146 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1147 	case R_008C4C_SQ_GSVS_RING_SIZE:
1148 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1149 	case R_008C6C_SQ_PSTMP_RING_SIZE:
1150 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1151 	case R_008C7C_SQ_REDUC_RING_SIZE:
1152 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1153 	case R_008C64_SQ_VSTMP_RING_SIZE:
1154 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1155 		/* get value to populate the IB don't remove */
1156 		tmp =radeon_get_ib_value(p, idx);
1157 		ib[idx] = 0;
1158 		break;
1159 	case SQ_CONFIG:
1160 		track->sq_config = radeon_get_ib_value(p, idx);
1161 		break;
1162 	case R_028800_DB_DEPTH_CONTROL:
1163 		track->db_depth_control = radeon_get_ib_value(p, idx);
1164 		track->db_dirty = true;
1165 		break;
1166 	case R_028010_DB_DEPTH_INFO:
1167 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1168 		    r600_cs_packet_next_is_pkt3_nop(p)) {
1169 			r = r600_cs_packet_next_reloc(p, &reloc);
1170 			if (r) {
1171 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1172 					 "0x%04X\n", reg);
1173 				return -EINVAL;
1174 			}
1175 			track->db_depth_info = radeon_get_ib_value(p, idx);
1176 			ib[idx] &= C_028010_ARRAY_MODE;
1177 			track->db_depth_info &= C_028010_ARRAY_MODE;
1178 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1179 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1180 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1181 			} else {
1182 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1183 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1184 			}
1185 		} else {
1186 			track->db_depth_info = radeon_get_ib_value(p, idx);
1187 		}
1188 		track->db_dirty = true;
1189 		break;
1190 	case R_028004_DB_DEPTH_VIEW:
1191 		track->db_depth_view = radeon_get_ib_value(p, idx);
1192 		track->db_dirty = true;
1193 		break;
1194 	case R_028000_DB_DEPTH_SIZE:
1195 		track->db_depth_size = radeon_get_ib_value(p, idx);
1196 		track->db_depth_size_idx = idx;
1197 		track->db_dirty = true;
1198 		break;
1199 	case R_028AB0_VGT_STRMOUT_EN:
1200 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1201 		track->streamout_dirty = true;
1202 		break;
1203 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
1204 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1205 		track->streamout_dirty = true;
1206 		break;
1207 	case VGT_STRMOUT_BUFFER_BASE_0:
1208 	case VGT_STRMOUT_BUFFER_BASE_1:
1209 	case VGT_STRMOUT_BUFFER_BASE_2:
1210 	case VGT_STRMOUT_BUFFER_BASE_3:
1211 		r = r600_cs_packet_next_reloc(p, &reloc);
1212 		if (r) {
1213 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1214 					"0x%04X\n", reg);
1215 			return -EINVAL;
1216 		}
1217 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1218 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1219 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1220 		track->vgt_strmout_bo[tmp] = reloc->robj;
1221 		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1222 		track->streamout_dirty = true;
1223 		break;
1224 	case VGT_STRMOUT_BUFFER_SIZE_0:
1225 	case VGT_STRMOUT_BUFFER_SIZE_1:
1226 	case VGT_STRMOUT_BUFFER_SIZE_2:
1227 	case VGT_STRMOUT_BUFFER_SIZE_3:
1228 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1229 		/* size in register is DWs, convert to bytes */
1230 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1231 		track->streamout_dirty = true;
1232 		break;
1233 	case CP_COHER_BASE:
1234 		r = r600_cs_packet_next_reloc(p, &reloc);
1235 		if (r) {
1236 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1237 					"0x%04X\n", reg);
1238 			return -EINVAL;
1239 		}
1240 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1241 		break;
1242 	case R_028238_CB_TARGET_MASK:
1243 		track->cb_target_mask = radeon_get_ib_value(p, idx);
1244 		track->cb_dirty = true;
1245 		break;
1246 	case R_02823C_CB_SHADER_MASK:
1247 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
1248 		break;
1249 	case R_028C04_PA_SC_AA_CONFIG:
1250 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1251 		track->log_nsamples = tmp;
1252 		track->nsamples = 1 << tmp;
1253 		track->cb_dirty = true;
1254 		break;
1255 	case R_028808_CB_COLOR_CONTROL:
1256 		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1257 		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1258 		track->cb_dirty = true;
1259 		break;
1260 	case R_0280A0_CB_COLOR0_INFO:
1261 	case R_0280A4_CB_COLOR1_INFO:
1262 	case R_0280A8_CB_COLOR2_INFO:
1263 	case R_0280AC_CB_COLOR3_INFO:
1264 	case R_0280B0_CB_COLOR4_INFO:
1265 	case R_0280B4_CB_COLOR5_INFO:
1266 	case R_0280B8_CB_COLOR6_INFO:
1267 	case R_0280BC_CB_COLOR7_INFO:
1268 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1269 		     r600_cs_packet_next_is_pkt3_nop(p)) {
1270 			r = r600_cs_packet_next_reloc(p, &reloc);
1271 			if (r) {
1272 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1273 				return -EINVAL;
1274 			}
1275 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1276 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1277 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1278 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1279 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1280 			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1281 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1282 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1283 			}
1284 		} else {
1285 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1286 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1287 		}
1288 		track->cb_dirty = true;
1289 		break;
1290 	case R_028080_CB_COLOR0_VIEW:
1291 	case R_028084_CB_COLOR1_VIEW:
1292 	case R_028088_CB_COLOR2_VIEW:
1293 	case R_02808C_CB_COLOR3_VIEW:
1294 	case R_028090_CB_COLOR4_VIEW:
1295 	case R_028094_CB_COLOR5_VIEW:
1296 	case R_028098_CB_COLOR6_VIEW:
1297 	case R_02809C_CB_COLOR7_VIEW:
1298 		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1299 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1300 		track->cb_dirty = true;
1301 		break;
1302 	case R_028060_CB_COLOR0_SIZE:
1303 	case R_028064_CB_COLOR1_SIZE:
1304 	case R_028068_CB_COLOR2_SIZE:
1305 	case R_02806C_CB_COLOR3_SIZE:
1306 	case R_028070_CB_COLOR4_SIZE:
1307 	case R_028074_CB_COLOR5_SIZE:
1308 	case R_028078_CB_COLOR6_SIZE:
1309 	case R_02807C_CB_COLOR7_SIZE:
1310 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1311 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1312 		track->cb_color_size_idx[tmp] = idx;
1313 		track->cb_dirty = true;
1314 		break;
1315 		/* This register were added late, there is userspace
1316 		 * which does provide relocation for those but set
1317 		 * 0 offset. In order to avoid breaking old userspace
1318 		 * we detect this and set address to point to last
1319 		 * CB_COLOR0_BASE, note that if userspace doesn't set
1320 		 * CB_COLOR0_BASE before this register we will report
1321 		 * error. Old userspace always set CB_COLOR0_BASE
1322 		 * before any of this.
1323 		 */
1324 	case R_0280E0_CB_COLOR0_FRAG:
1325 	case R_0280E4_CB_COLOR1_FRAG:
1326 	case R_0280E8_CB_COLOR2_FRAG:
1327 	case R_0280EC_CB_COLOR3_FRAG:
1328 	case R_0280F0_CB_COLOR4_FRAG:
1329 	case R_0280F4_CB_COLOR5_FRAG:
1330 	case R_0280F8_CB_COLOR6_FRAG:
1331 	case R_0280FC_CB_COLOR7_FRAG:
1332 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1333 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1334 			if (!track->cb_color_base_last[tmp]) {
1335 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1336 				return -EINVAL;
1337 			}
1338 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1339 			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1340 			ib[idx] = track->cb_color_base_last[tmp];
1341 		} else {
1342 			r = r600_cs_packet_next_reloc(p, &reloc);
1343 			if (r) {
1344 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1345 				return -EINVAL;
1346 			}
1347 			track->cb_color_frag_bo[tmp] = reloc->robj;
1348 			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1349 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1350 		}
1351 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1352 			track->cb_dirty = true;
1353 		}
1354 		break;
1355 	case R_0280C0_CB_COLOR0_TILE:
1356 	case R_0280C4_CB_COLOR1_TILE:
1357 	case R_0280C8_CB_COLOR2_TILE:
1358 	case R_0280CC_CB_COLOR3_TILE:
1359 	case R_0280D0_CB_COLOR4_TILE:
1360 	case R_0280D4_CB_COLOR5_TILE:
1361 	case R_0280D8_CB_COLOR6_TILE:
1362 	case R_0280DC_CB_COLOR7_TILE:
1363 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1364 		if (!r600_cs_packet_next_is_pkt3_nop(p)) {
1365 			if (!track->cb_color_base_last[tmp]) {
1366 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1367 				return -EINVAL;
1368 			}
1369 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1370 			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1371 			ib[idx] = track->cb_color_base_last[tmp];
1372 		} else {
1373 			r = r600_cs_packet_next_reloc(p, &reloc);
1374 			if (r) {
1375 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1376 				return -EINVAL;
1377 			}
1378 			track->cb_color_tile_bo[tmp] = reloc->robj;
1379 			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1380 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1381 		}
1382 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1383 			track->cb_dirty = true;
1384 		}
1385 		break;
1386 	case R_028100_CB_COLOR0_MASK:
1387 	case R_028104_CB_COLOR1_MASK:
1388 	case R_028108_CB_COLOR2_MASK:
1389 	case R_02810C_CB_COLOR3_MASK:
1390 	case R_028110_CB_COLOR4_MASK:
1391 	case R_028114_CB_COLOR5_MASK:
1392 	case R_028118_CB_COLOR6_MASK:
1393 	case R_02811C_CB_COLOR7_MASK:
1394 		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1395 		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1396 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1397 			track->cb_dirty = true;
1398 		}
1399 		break;
1400 	case CB_COLOR0_BASE:
1401 	case CB_COLOR1_BASE:
1402 	case CB_COLOR2_BASE:
1403 	case CB_COLOR3_BASE:
1404 	case CB_COLOR4_BASE:
1405 	case CB_COLOR5_BASE:
1406 	case CB_COLOR6_BASE:
1407 	case CB_COLOR7_BASE:
1408 		r = r600_cs_packet_next_reloc(p, &reloc);
1409 		if (r) {
1410 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1411 					"0x%04X\n", reg);
1412 			return -EINVAL;
1413 		}
1414 		tmp = (reg - CB_COLOR0_BASE) / 4;
1415 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1416 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1417 		track->cb_color_base_last[tmp] = ib[idx];
1418 		track->cb_color_bo[tmp] = reloc->robj;
1419 		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1420 		track->cb_dirty = true;
1421 		break;
1422 	case DB_DEPTH_BASE:
1423 		r = r600_cs_packet_next_reloc(p, &reloc);
1424 		if (r) {
1425 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1426 					"0x%04X\n", reg);
1427 			return -EINVAL;
1428 		}
1429 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1430 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1431 		track->db_bo = reloc->robj;
1432 		track->db_bo_mc = reloc->lobj.gpu_offset;
1433 		track->db_dirty = true;
1434 		break;
1435 	case DB_HTILE_DATA_BASE:
1436 		r = r600_cs_packet_next_reloc(p, &reloc);
1437 		if (r) {
1438 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1439 					"0x%04X\n", reg);
1440 			return -EINVAL;
1441 		}
1442 		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1443 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1444 		track->htile_bo = reloc->robj;
1445 		track->db_dirty = true;
1446 		break;
1447 	case DB_HTILE_SURFACE:
1448 		track->htile_surface = radeon_get_ib_value(p, idx);
1449 		/* force 8x8 htile width and height */
1450 		ib[idx] |= 3;
1451 		track->db_dirty = true;
1452 		break;
1453 	case SQ_PGM_START_FS:
1454 	case SQ_PGM_START_ES:
1455 	case SQ_PGM_START_VS:
1456 	case SQ_PGM_START_GS:
1457 	case SQ_PGM_START_PS:
1458 	case SQ_ALU_CONST_CACHE_GS_0:
1459 	case SQ_ALU_CONST_CACHE_GS_1:
1460 	case SQ_ALU_CONST_CACHE_GS_2:
1461 	case SQ_ALU_CONST_CACHE_GS_3:
1462 	case SQ_ALU_CONST_CACHE_GS_4:
1463 	case SQ_ALU_CONST_CACHE_GS_5:
1464 	case SQ_ALU_CONST_CACHE_GS_6:
1465 	case SQ_ALU_CONST_CACHE_GS_7:
1466 	case SQ_ALU_CONST_CACHE_GS_8:
1467 	case SQ_ALU_CONST_CACHE_GS_9:
1468 	case SQ_ALU_CONST_CACHE_GS_10:
1469 	case SQ_ALU_CONST_CACHE_GS_11:
1470 	case SQ_ALU_CONST_CACHE_GS_12:
1471 	case SQ_ALU_CONST_CACHE_GS_13:
1472 	case SQ_ALU_CONST_CACHE_GS_14:
1473 	case SQ_ALU_CONST_CACHE_GS_15:
1474 	case SQ_ALU_CONST_CACHE_PS_0:
1475 	case SQ_ALU_CONST_CACHE_PS_1:
1476 	case SQ_ALU_CONST_CACHE_PS_2:
1477 	case SQ_ALU_CONST_CACHE_PS_3:
1478 	case SQ_ALU_CONST_CACHE_PS_4:
1479 	case SQ_ALU_CONST_CACHE_PS_5:
1480 	case SQ_ALU_CONST_CACHE_PS_6:
1481 	case SQ_ALU_CONST_CACHE_PS_7:
1482 	case SQ_ALU_CONST_CACHE_PS_8:
1483 	case SQ_ALU_CONST_CACHE_PS_9:
1484 	case SQ_ALU_CONST_CACHE_PS_10:
1485 	case SQ_ALU_CONST_CACHE_PS_11:
1486 	case SQ_ALU_CONST_CACHE_PS_12:
1487 	case SQ_ALU_CONST_CACHE_PS_13:
1488 	case SQ_ALU_CONST_CACHE_PS_14:
1489 	case SQ_ALU_CONST_CACHE_PS_15:
1490 	case SQ_ALU_CONST_CACHE_VS_0:
1491 	case SQ_ALU_CONST_CACHE_VS_1:
1492 	case SQ_ALU_CONST_CACHE_VS_2:
1493 	case SQ_ALU_CONST_CACHE_VS_3:
1494 	case SQ_ALU_CONST_CACHE_VS_4:
1495 	case SQ_ALU_CONST_CACHE_VS_5:
1496 	case SQ_ALU_CONST_CACHE_VS_6:
1497 	case SQ_ALU_CONST_CACHE_VS_7:
1498 	case SQ_ALU_CONST_CACHE_VS_8:
1499 	case SQ_ALU_CONST_CACHE_VS_9:
1500 	case SQ_ALU_CONST_CACHE_VS_10:
1501 	case SQ_ALU_CONST_CACHE_VS_11:
1502 	case SQ_ALU_CONST_CACHE_VS_12:
1503 	case SQ_ALU_CONST_CACHE_VS_13:
1504 	case SQ_ALU_CONST_CACHE_VS_14:
1505 	case SQ_ALU_CONST_CACHE_VS_15:
1506 		r = r600_cs_packet_next_reloc(p, &reloc);
1507 		if (r) {
1508 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1509 					"0x%04X\n", reg);
1510 			return -EINVAL;
1511 		}
1512 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1513 		break;
1514 	case SX_MEMORY_EXPORT_BASE:
1515 		r = r600_cs_packet_next_reloc(p, &reloc);
1516 		if (r) {
1517 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1518 					"0x%04X\n", reg);
1519 			return -EINVAL;
1520 		}
1521 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1522 		break;
1523 	case SX_MISC:
1524 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1525 		break;
1526 	default:
1527 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1528 		return -EINVAL;
1529 	}
1530 	return 0;
1531 }
1532 
1533 unsigned r600_mip_minify(unsigned size, unsigned level)
1534 {
1535 	unsigned val;
1536 
1537 	val = max(1U, size >> level);
1538 	if (level > 0)
1539 		val = roundup_pow_of_two(val);
1540 	return val;
1541 }
1542 
1543 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1544 			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1545 			      unsigned block_align, unsigned height_align, unsigned base_align,
1546 			      unsigned *l0_size, unsigned *mipmap_size)
1547 {
1548 	unsigned offset, i, level;
1549 	unsigned width, height, depth, size;
1550 	unsigned blocksize;
1551 	unsigned nbx, nby;
1552 	unsigned nlevels = llevel - blevel + 1;
1553 
1554 	*l0_size = -1;
1555 	blocksize = r600_fmt_get_blocksize(format);
1556 
1557 	w0 = r600_mip_minify(w0, 0);
1558 	h0 = r600_mip_minify(h0, 0);
1559 	d0 = r600_mip_minify(d0, 0);
1560 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1561 		width = r600_mip_minify(w0, i);
1562 		nbx = r600_fmt_get_nblocksx(format, width);
1563 
1564 		nbx = roundup(nbx, block_align);
1565 
1566 		height = r600_mip_minify(h0, i);
1567 		nby = r600_fmt_get_nblocksy(format, height);
1568 		nby = roundup(nby, height_align);
1569 
1570 		depth = r600_mip_minify(d0, i);
1571 
1572 		size = nbx * nby * blocksize * nsamples;
1573 		if (nfaces)
1574 			size *= nfaces;
1575 		else
1576 			size *= depth;
1577 
1578 		if (i == 0)
1579 			*l0_size = size;
1580 
1581 		if (i == 0 || i == 1)
1582 			offset = roundup(offset, base_align);
1583 
1584 		offset += size;
1585 	}
1586 	*mipmap_size = offset;
1587 	if (llevel == 0)
1588 		*mipmap_size = *l0_size;
1589 	if (!blevel)
1590 		*mipmap_size -= *l0_size;
1591 }
1592 
1593 /**
1594  * r600_check_texture_resource() - check if register is authorized or not
1595  * @p: parser structure holding parsing context
1596  * @idx: index into the cs buffer
1597  * @texture: texture's bo structure
1598  * @mipmap: mipmap's bo structure
1599  *
1600  * This function will check that the resource has valid field and that
1601  * the texture and mipmap bo object are big enough to cover this resource.
1602  */
1603 static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1604 					      struct radeon_bo *texture,
1605 					      struct radeon_bo *mipmap,
1606 					      u64 base_offset,
1607 					      u64 mip_offset,
1608 					      u32 tiling_flags)
1609 {
1610 	struct r600_cs_track *track = p->track;
1611 	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1612 	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1613 	u32 height_align, pitch, pitch_align, depth_align;
1614 	u32 barray, larray;
1615 	u64 base_align;
1616 	struct array_mode_checker array_check;
1617 	u32 format;
1618 	bool is_array;
1619 
1620 	/* on legacy kernel we don't perform advanced check */
1621 	if (p->rdev == NULL)
1622 		return 0;
1623 
1624 	/* convert to bytes */
1625 	base_offset <<= 8;
1626 	mip_offset <<= 8;
1627 
1628 	word0 = radeon_get_ib_value(p, idx + 0);
1629 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1630 		if (tiling_flags & RADEON_TILING_MACRO)
1631 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1632 		else if (tiling_flags & RADEON_TILING_MICRO)
1633 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1634 	}
1635 	word1 = radeon_get_ib_value(p, idx + 1);
1636 	word2 = radeon_get_ib_value(p, idx + 2) << 8;
1637 	word3 = radeon_get_ib_value(p, idx + 3) << 8;
1638 	word4 = radeon_get_ib_value(p, idx + 4);
1639 	word5 = radeon_get_ib_value(p, idx + 5);
1640 	dim = G_038000_DIM(word0);
1641 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1642 	pitch = (G_038000_PITCH(word0) + 1) * 8;
1643 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1644 	d0 = G_038004_TEX_DEPTH(word1);
1645 	format = G_038004_DATA_FORMAT(word1);
1646 	blevel = G_038010_BASE_LEVEL(word4);
1647 	llevel = G_038014_LAST_LEVEL(word5);
1648 	/* pitch in texels */
1649 	array_check.array_mode = G_038000_TILE_MODE(word0);
1650 	array_check.group_size = track->group_size;
1651 	array_check.nbanks = track->nbanks;
1652 	array_check.npipes = track->npipes;
1653 	array_check.nsamples = 1;
1654 	array_check.blocksize = r600_fmt_get_blocksize(format);
1655 	nfaces = 1;
1656 	is_array = false;
1657 	switch (dim) {
1658 	case V_038000_SQ_TEX_DIM_1D:
1659 	case V_038000_SQ_TEX_DIM_2D:
1660 	case V_038000_SQ_TEX_DIM_3D:
1661 		break;
1662 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1663 		if (p->family >= CHIP_RV770)
1664 			nfaces = 8;
1665 		else
1666 			nfaces = 6;
1667 		break;
1668 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1669 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1670 		is_array = true;
1671 		break;
1672 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1673 		is_array = true;
1674 		/* fall through */
1675 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1676 		array_check.nsamples = 1 << llevel;
1677 		llevel = 0;
1678 		break;
1679 	default:
1680 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1681 		return -EINVAL;
1682 	}
1683 	if (!r600_fmt_is_valid_texture(format, p->family)) {
1684 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1685 			 __func__, __LINE__, format);
1686 		return -EINVAL;
1687 	}
1688 
1689 	if (r600_get_array_mode_alignment(&array_check,
1690 					  &pitch_align, &height_align, &depth_align, &base_align)) {
1691 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1692 			 __func__, __LINE__, G_038000_TILE_MODE(word0));
1693 		return -EINVAL;
1694 	}
1695 
1696 	/* XXX check height as well... */
1697 
1698 	if (!IS_ALIGNED(pitch, pitch_align)) {
1699 		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1700 			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1701 		return -EINVAL;
1702 	}
1703 	if (!IS_ALIGNED(base_offset, base_align)) {
1704 		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1705 			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1706 		return -EINVAL;
1707 	}
1708 	if (!IS_ALIGNED(mip_offset, base_align)) {
1709 		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1710 			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1711 		return -EINVAL;
1712 	}
1713 
1714 	if (blevel > llevel) {
1715 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1716 			 blevel, llevel);
1717 	}
1718 	if (is_array) {
1719 		barray = G_038014_BASE_ARRAY(word5);
1720 		larray = G_038014_LAST_ARRAY(word5);
1721 
1722 		nfaces = larray - barray + 1;
1723 	}
1724 	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1725 			  pitch_align, height_align, base_align,
1726 			  &l0_size, &mipmap_size);
1727 	/* using get ib will give us the offset into the texture bo */
1728 	if ((l0_size + word2) > radeon_bo_size(texture)) {
1729 		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1730 			 w0, h0, pitch_align, height_align,
1731 			 array_check.array_mode, format, word2,
1732 			 l0_size, radeon_bo_size(texture));
1733 		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1734 		return -EINVAL;
1735 	}
1736 	/* using get ib will give us the offset into the mipmap bo */
1737 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1738 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1739 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1740 	}
1741 	return 0;
1742 }
1743 
1744 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1745 {
1746 	u32 m, i;
1747 
1748 	i = (reg >> 7);
1749 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1750 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1751 		return false;
1752 	}
1753 	m = 1 << ((reg >> 2) & 31);
1754 	if (!(r600_reg_safe_bm[i] & m))
1755 		return true;
1756 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1757 	return false;
1758 }
1759 
1760 static int r600_packet3_check(struct radeon_cs_parser *p,
1761 				struct radeon_cs_packet *pkt)
1762 {
1763 	struct radeon_cs_reloc *reloc;
1764 	struct r600_cs_track *track;
1765 	volatile u32 *ib;
1766 	unsigned idx;
1767 	unsigned i;
1768 	unsigned start_reg, end_reg, reg;
1769 	int r;
1770 	u32 idx_value;
1771 
1772 	track = (struct r600_cs_track *)p->track;
1773 	ib = p->ib.ptr;
1774 	idx = pkt->idx + 1;
1775 	idx_value = radeon_get_ib_value(p, idx);
1776 
1777 	switch (pkt->opcode) {
1778 	case PACKET3_SET_PREDICATION:
1779 	{
1780 		int pred_op;
1781 		int tmp;
1782 		uint64_t offset;
1783 
1784 		if (pkt->count != 1) {
1785 			DRM_ERROR("bad SET PREDICATION\n");
1786 			return -EINVAL;
1787 		}
1788 
1789 		tmp = radeon_get_ib_value(p, idx + 1);
1790 		pred_op = (tmp >> 16) & 0x7;
1791 
1792 		/* for the clear predicate operation */
1793 		if (pred_op == 0)
1794 			return 0;
1795 
1796 		if (pred_op > 2) {
1797 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1798 			return -EINVAL;
1799 		}
1800 
1801 		r = r600_cs_packet_next_reloc(p, &reloc);
1802 		if (r) {
1803 			DRM_ERROR("bad SET PREDICATION\n");
1804 			return -EINVAL;
1805 		}
1806 
1807 		offset = reloc->lobj.gpu_offset +
1808 		         (idx_value & 0xfffffff0) +
1809 		         ((u64)(tmp & 0xff) << 32);
1810 
1811 		ib[idx + 0] = offset;
1812 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1813 	}
1814 	break;
1815 
1816 	case PACKET3_START_3D_CMDBUF:
1817 		if (p->family >= CHIP_RV770 || pkt->count) {
1818 			DRM_ERROR("bad START_3D\n");
1819 			return -EINVAL;
1820 		}
1821 		break;
1822 	case PACKET3_CONTEXT_CONTROL:
1823 		if (pkt->count != 1) {
1824 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1825 			return -EINVAL;
1826 		}
1827 		break;
1828 	case PACKET3_INDEX_TYPE:
1829 	case PACKET3_NUM_INSTANCES:
1830 		if (pkt->count) {
1831 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1832 			return -EINVAL;
1833 		}
1834 		break;
1835 	case PACKET3_DRAW_INDEX:
1836 	{
1837 		uint64_t offset;
1838 		if (pkt->count != 3) {
1839 			DRM_ERROR("bad DRAW_INDEX\n");
1840 			return -EINVAL;
1841 		}
1842 		r = r600_cs_packet_next_reloc(p, &reloc);
1843 		if (r) {
1844 			DRM_ERROR("bad DRAW_INDEX\n");
1845 			return -EINVAL;
1846 		}
1847 
1848 		offset = reloc->lobj.gpu_offset +
1849 		         idx_value +
1850 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1851 
1852 		ib[idx+0] = offset;
1853 		ib[idx+1] = upper_32_bits(offset) & 0xff;
1854 
1855 		r = r600_cs_track_check(p);
1856 		if (r) {
1857 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1858 			return r;
1859 		}
1860 		break;
1861 	}
1862 	case PACKET3_DRAW_INDEX_AUTO:
1863 		if (pkt->count != 1) {
1864 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1865 			return -EINVAL;
1866 		}
1867 		r = r600_cs_track_check(p);
1868 		if (r) {
1869 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1870 			return r;
1871 		}
1872 		break;
1873 	case PACKET3_DRAW_INDEX_IMMD_BE:
1874 	case PACKET3_DRAW_INDEX_IMMD:
1875 		if (pkt->count < 2) {
1876 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1877 			return -EINVAL;
1878 		}
1879 		r = r600_cs_track_check(p);
1880 		if (r) {
1881 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1882 			return r;
1883 		}
1884 		break;
1885 	case PACKET3_WAIT_REG_MEM:
1886 		if (pkt->count != 5) {
1887 			DRM_ERROR("bad WAIT_REG_MEM\n");
1888 			return -EINVAL;
1889 		}
1890 		/* bit 4 is reg (0) or mem (1) */
1891 		if (idx_value & 0x10) {
1892 			uint64_t offset;
1893 
1894 			r = r600_cs_packet_next_reloc(p, &reloc);
1895 			if (r) {
1896 				DRM_ERROR("bad WAIT_REG_MEM\n");
1897 				return -EINVAL;
1898 			}
1899 
1900 			offset = reloc->lobj.gpu_offset +
1901 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1902 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1903 
1904 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1905 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1906 		}
1907 		break;
1908 	case PACKET3_CP_DMA:
1909 	{
1910 		u32 command, size;
1911 		u64 offset, tmp;
1912 		if (pkt->count != 4) {
1913 			DRM_ERROR("bad CP DMA\n");
1914 			return -EINVAL;
1915 		}
1916 		command = radeon_get_ib_value(p, idx+4);
1917 		size = command & 0x1fffff;
1918 		if (command & PACKET3_CP_DMA_CMD_SAS) {
1919 			/* src address space is register */
1920 			DRM_ERROR("CP DMA SAS not supported\n");
1921 			return -EINVAL;
1922 		} else {
1923 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
1924 				DRM_ERROR("CP DMA SAIC only supported for registers\n");
1925 				return -EINVAL;
1926 			}
1927 			/* src address space is memory */
1928 			r = r600_cs_packet_next_reloc(p, &reloc);
1929 			if (r) {
1930 				DRM_ERROR("bad CP DMA SRC\n");
1931 				return -EINVAL;
1932 			}
1933 
1934 			tmp = radeon_get_ib_value(p, idx) +
1935 				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1936 
1937 			offset = reloc->lobj.gpu_offset + tmp;
1938 
1939 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1940 				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1941 					 tmp + size, radeon_bo_size(reloc->robj));
1942 				return -EINVAL;
1943 			}
1944 
1945 			ib[idx] = offset;
1946 			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1947 		}
1948 		if (command & PACKET3_CP_DMA_CMD_DAS) {
1949 			/* dst address space is register */
1950 			DRM_ERROR("CP DMA DAS not supported\n");
1951 			return -EINVAL;
1952 		} else {
1953 			/* dst address space is memory */
1954 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
1955 				DRM_ERROR("CP DMA DAIC only supported for registers\n");
1956 				return -EINVAL;
1957 			}
1958 			r = r600_cs_packet_next_reloc(p, &reloc);
1959 			if (r) {
1960 				DRM_ERROR("bad CP DMA DST\n");
1961 				return -EINVAL;
1962 			}
1963 
1964 			tmp = radeon_get_ib_value(p, idx+2) +
1965 				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1966 
1967 			offset = reloc->lobj.gpu_offset + tmp;
1968 
1969 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1970 				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1971 					 tmp + size, radeon_bo_size(reloc->robj));
1972 				return -EINVAL;
1973 			}
1974 
1975 			ib[idx+2] = offset;
1976 			ib[idx+3] = upper_32_bits(offset) & 0xff;
1977 		}
1978 		break;
1979 	}
1980 	case PACKET3_SURFACE_SYNC:
1981 		if (pkt->count != 3) {
1982 			DRM_ERROR("bad SURFACE_SYNC\n");
1983 			return -EINVAL;
1984 		}
1985 		/* 0xffffffff/0x0 is flush all cache flag */
1986 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1987 		    radeon_get_ib_value(p, idx + 2) != 0) {
1988 			r = r600_cs_packet_next_reloc(p, &reloc);
1989 			if (r) {
1990 				DRM_ERROR("bad SURFACE_SYNC\n");
1991 				return -EINVAL;
1992 			}
1993 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1994 		}
1995 		break;
1996 	case PACKET3_EVENT_WRITE:
1997 		if (pkt->count != 2 && pkt->count != 0) {
1998 			DRM_ERROR("bad EVENT_WRITE\n");
1999 			return -EINVAL;
2000 		}
2001 		if (pkt->count) {
2002 			uint64_t offset;
2003 
2004 			r = r600_cs_packet_next_reloc(p, &reloc);
2005 			if (r) {
2006 				DRM_ERROR("bad EVENT_WRITE\n");
2007 				return -EINVAL;
2008 			}
2009 			offset = reloc->lobj.gpu_offset +
2010 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2011 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2012 
2013 			ib[idx+1] = offset & 0xfffffff8;
2014 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2015 		}
2016 		break;
2017 	case PACKET3_EVENT_WRITE_EOP:
2018 	{
2019 		uint64_t offset;
2020 
2021 		if (pkt->count != 4) {
2022 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
2023 			return -EINVAL;
2024 		}
2025 		r = r600_cs_packet_next_reloc(p, &reloc);
2026 		if (r) {
2027 			DRM_ERROR("bad EVENT_WRITE\n");
2028 			return -EINVAL;
2029 		}
2030 
2031 		offset = reloc->lobj.gpu_offset +
2032 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2033 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2034 
2035 		ib[idx+1] = offset & 0xfffffffc;
2036 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2037 		break;
2038 	}
2039 	case PACKET3_SET_CONFIG_REG:
2040 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
2041 		end_reg = 4 * pkt->count + start_reg - 4;
2042 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
2043 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2044 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2045 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2046 			return -EINVAL;
2047 		}
2048 		for (i = 0; i < pkt->count; i++) {
2049 			reg = start_reg + (4 * i);
2050 			r = r600_cs_check_reg(p, reg, idx+1+i);
2051 			if (r)
2052 				return r;
2053 		}
2054 		break;
2055 	case PACKET3_SET_CONTEXT_REG:
2056 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
2057 		end_reg = 4 * pkt->count + start_reg - 4;
2058 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
2059 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2060 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2061 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2062 			return -EINVAL;
2063 		}
2064 		for (i = 0; i < pkt->count; i++) {
2065 			reg = start_reg + (4 * i);
2066 			r = r600_cs_check_reg(p, reg, idx+1+i);
2067 			if (r)
2068 				return r;
2069 		}
2070 		break;
2071 	case PACKET3_SET_RESOURCE:
2072 		if (pkt->count % 7) {
2073 			DRM_ERROR("bad SET_RESOURCE\n");
2074 			return -EINVAL;
2075 		}
2076 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
2077 		end_reg = 4 * pkt->count + start_reg - 4;
2078 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
2079 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
2080 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
2081 			DRM_ERROR("bad SET_RESOURCE\n");
2082 			return -EINVAL;
2083 		}
2084 		for (i = 0; i < (pkt->count / 7); i++) {
2085 			struct radeon_bo *texture, *mipmap;
2086 			u32 size, offset, base_offset, mip_offset;
2087 
2088 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
2089 			case SQ_TEX_VTX_VALID_TEXTURE:
2090 				/* tex base */
2091 				r = r600_cs_packet_next_reloc(p, &reloc);
2092 				if (r) {
2093 					DRM_ERROR("bad SET_RESOURCE\n");
2094 					return -EINVAL;
2095 				}
2096 				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2097 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
2098 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
2099 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
2100 					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
2101 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
2102 				}
2103 				texture = reloc->robj;
2104 				/* tex mip base */
2105 				r = r600_cs_packet_next_reloc(p, &reloc);
2106 				if (r) {
2107 					DRM_ERROR("bad SET_RESOURCE\n");
2108 					return -EINVAL;
2109 				}
2110 				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2111 				mipmap = reloc->robj;
2112 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
2113 								texture, mipmap,
2114 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
2115 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
2116 								reloc->lobj.tiling_flags);
2117 				if (r)
2118 					return r;
2119 				ib[idx+1+(i*7)+2] += base_offset;
2120 				ib[idx+1+(i*7)+3] += mip_offset;
2121 				break;
2122 			case SQ_TEX_VTX_VALID_BUFFER:
2123 			{
2124 				uint64_t offset64;
2125 				/* vtx base */
2126 				r = r600_cs_packet_next_reloc(p, &reloc);
2127 				if (r) {
2128 					DRM_ERROR("bad SET_RESOURCE\n");
2129 					return -EINVAL;
2130 				}
2131 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2132 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2133 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2134 					/* force size to size of the buffer */
2135 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
2136 						 size + offset, radeon_bo_size(reloc->robj));
2137 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2138 				}
2139 
2140 				offset64 = reloc->lobj.gpu_offset + offset;
2141 				ib[idx+1+(i*8)+0] = offset64;
2142 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2143 						    (upper_32_bits(offset64) & 0xff);
2144 				break;
2145 			}
2146 			case SQ_TEX_VTX_INVALID_TEXTURE:
2147 			case SQ_TEX_VTX_INVALID_BUFFER:
2148 			default:
2149 				DRM_ERROR("bad SET_RESOURCE\n");
2150 				return -EINVAL;
2151 			}
2152 		}
2153 		break;
2154 	case PACKET3_SET_ALU_CONST:
2155 		if (track->sq_config & DX9_CONSTS) {
2156 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2157 			end_reg = 4 * pkt->count + start_reg - 4;
2158 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2159 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2160 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2161 				DRM_ERROR("bad SET_ALU_CONST\n");
2162 				return -EINVAL;
2163 			}
2164 		}
2165 		break;
2166 	case PACKET3_SET_BOOL_CONST:
2167 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2168 		end_reg = 4 * pkt->count + start_reg - 4;
2169 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2170 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2171 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2172 			DRM_ERROR("bad SET_BOOL_CONST\n");
2173 			return -EINVAL;
2174 		}
2175 		break;
2176 	case PACKET3_SET_LOOP_CONST:
2177 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2178 		end_reg = 4 * pkt->count + start_reg - 4;
2179 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2180 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2181 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2182 			DRM_ERROR("bad SET_LOOP_CONST\n");
2183 			return -EINVAL;
2184 		}
2185 		break;
2186 	case PACKET3_SET_CTL_CONST:
2187 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2188 		end_reg = 4 * pkt->count + start_reg - 4;
2189 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2190 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2191 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2192 			DRM_ERROR("bad SET_CTL_CONST\n");
2193 			return -EINVAL;
2194 		}
2195 		break;
2196 	case PACKET3_SET_SAMPLER:
2197 		if (pkt->count % 3) {
2198 			DRM_ERROR("bad SET_SAMPLER\n");
2199 			return -EINVAL;
2200 		}
2201 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2202 		end_reg = 4 * pkt->count + start_reg - 4;
2203 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2204 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
2205 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
2206 			DRM_ERROR("bad SET_SAMPLER\n");
2207 			return -EINVAL;
2208 		}
2209 		break;
2210 	case PACKET3_STRMOUT_BASE_UPDATE:
2211 		/* RS780 and RS880 also need this */
2212 		if (p->family < CHIP_RS780) {
2213 			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2214 			return -EINVAL;
2215 		}
2216 		if (pkt->count != 1) {
2217 			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2218 			return -EINVAL;
2219 		}
2220 		if (idx_value > 3) {
2221 			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2222 			return -EINVAL;
2223 		}
2224 		{
2225 			u64 offset;
2226 
2227 			r = r600_cs_packet_next_reloc(p, &reloc);
2228 			if (r) {
2229 				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2230 				return -EINVAL;
2231 			}
2232 
2233 			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2234 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2235 				return -EINVAL;
2236 			}
2237 
2238 			offset = radeon_get_ib_value(p, idx+1) << 8;
2239 			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2240 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2241 					  offset, track->vgt_strmout_bo_offset[idx_value]);
2242 				return -EINVAL;
2243 			}
2244 
2245 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2246 				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2247 					  offset + 4, radeon_bo_size(reloc->robj));
2248 				return -EINVAL;
2249 			}
2250 			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2251 		}
2252 		break;
2253 	case PACKET3_SURFACE_BASE_UPDATE:
2254 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2255 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2256 			return -EINVAL;
2257 		}
2258 		if (pkt->count) {
2259 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2260 			return -EINVAL;
2261 		}
2262 		break;
2263 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2264 		if (pkt->count != 4) {
2265 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2266 			return -EINVAL;
2267 		}
2268 		/* Updating memory at DST_ADDRESS. */
2269 		if (idx_value & 0x1) {
2270 			u64 offset;
2271 			r = r600_cs_packet_next_reloc(p, &reloc);
2272 			if (r) {
2273 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2274 				return -EINVAL;
2275 			}
2276 			offset = radeon_get_ib_value(p, idx+1);
2277 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2278 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2279 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2280 					  offset + 4, radeon_bo_size(reloc->robj));
2281 				return -EINVAL;
2282 			}
2283 			offset += reloc->lobj.gpu_offset;
2284 			ib[idx+1] = offset;
2285 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2286 		}
2287 		/* Reading data from SRC_ADDRESS. */
2288 		if (((idx_value >> 1) & 0x3) == 2) {
2289 			u64 offset;
2290 			r = r600_cs_packet_next_reloc(p, &reloc);
2291 			if (r) {
2292 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2293 				return -EINVAL;
2294 			}
2295 			offset = radeon_get_ib_value(p, idx+3);
2296 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2297 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2298 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2299 					  offset + 4, radeon_bo_size(reloc->robj));
2300 				return -EINVAL;
2301 			}
2302 			offset += reloc->lobj.gpu_offset;
2303 			ib[idx+3] = offset;
2304 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2305 		}
2306 		break;
2307 	case PACKET3_MEM_WRITE:
2308 	{
2309 		u64 offset;
2310 
2311 		if (pkt->count != 3) {
2312 			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2313 			return -EINVAL;
2314 		}
2315 		r = r600_cs_packet_next_reloc(p, &reloc);
2316 		if (r) {
2317 			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2318 			return -EINVAL;
2319 		}
2320 		offset = radeon_get_ib_value(p, idx+0);
2321 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2322 		if (offset & 0x7) {
2323 			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2324 			return -EINVAL;
2325 		}
2326 		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2327 			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2328 				  offset + 8, radeon_bo_size(reloc->robj));
2329 			return -EINVAL;
2330 		}
2331 		offset += reloc->lobj.gpu_offset;
2332 		ib[idx+0] = offset;
2333 		ib[idx+1] = upper_32_bits(offset) & 0xff;
2334 		break;
2335 	}
2336 	case PACKET3_COPY_DW:
2337 		if (pkt->count != 4) {
2338 			DRM_ERROR("bad COPY_DW (invalid count)\n");
2339 			return -EINVAL;
2340 		}
2341 		if (idx_value & 0x1) {
2342 			u64 offset;
2343 			/* SRC is memory. */
2344 			r = r600_cs_packet_next_reloc(p, &reloc);
2345 			if (r) {
2346 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2347 				return -EINVAL;
2348 			}
2349 			offset = radeon_get_ib_value(p, idx+1);
2350 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2351 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2352 				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2353 					  offset + 4, radeon_bo_size(reloc->robj));
2354 				return -EINVAL;
2355 			}
2356 			offset += reloc->lobj.gpu_offset;
2357 			ib[idx+1] = offset;
2358 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2359 		} else {
2360 			/* SRC is a reg. */
2361 			reg = radeon_get_ib_value(p, idx+1) << 2;
2362 			if (!r600_is_safe_reg(p, reg, idx+1))
2363 				return -EINVAL;
2364 		}
2365 		if (idx_value & 0x2) {
2366 			u64 offset;
2367 			/* DST is memory. */
2368 			r = r600_cs_packet_next_reloc(p, &reloc);
2369 			if (r) {
2370 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2371 				return -EINVAL;
2372 			}
2373 			offset = radeon_get_ib_value(p, idx+3);
2374 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2375 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2376 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2377 					  offset + 4, radeon_bo_size(reloc->robj));
2378 				return -EINVAL;
2379 			}
2380 			offset += reloc->lobj.gpu_offset;
2381 			ib[idx+3] = offset;
2382 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2383 		} else {
2384 			/* DST is a reg. */
2385 			reg = radeon_get_ib_value(p, idx+3) << 2;
2386 			if (!r600_is_safe_reg(p, reg, idx+3))
2387 				return -EINVAL;
2388 		}
2389 		break;
2390 	case PACKET3_NOP:
2391 		break;
2392 	default:
2393 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2394 		return -EINVAL;
2395 	}
2396 	return 0;
2397 }
2398 
2399 int r600_cs_parse(struct radeon_cs_parser *p)
2400 {
2401 	struct radeon_cs_packet pkt;
2402 	struct r600_cs_track *track;
2403 	int r;
2404 
2405 	if (p->track == NULL) {
2406 		/* initialize tracker, we are in kms */
2407 		track = kzalloc(sizeof(*track), GFP_KERNEL);
2408 		if (track == NULL)
2409 			return -ENOMEM;
2410 		r600_cs_track_init(track);
2411 		if (p->rdev->family < CHIP_RV770) {
2412 			track->npipes = p->rdev->config.r600.tiling_npipes;
2413 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
2414 			track->group_size = p->rdev->config.r600.tiling_group_size;
2415 		} else if (p->rdev->family <= CHIP_RV740) {
2416 			track->npipes = p->rdev->config.rv770.tiling_npipes;
2417 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2418 			track->group_size = p->rdev->config.rv770.tiling_group_size;
2419 		}
2420 		p->track = track;
2421 	}
2422 	do {
2423 		r = r600_cs_packet_parse(p, &pkt, p->idx);
2424 		if (r) {
2425 			kfree(p->track);
2426 			p->track = NULL;
2427 			return r;
2428 		}
2429 		p->idx += pkt.count + 2;
2430 		switch (pkt.type) {
2431 		case PACKET_TYPE0:
2432 			r = r600_cs_parse_packet0(p, &pkt);
2433 			break;
2434 		case PACKET_TYPE2:
2435 			break;
2436 		case PACKET_TYPE3:
2437 			r = r600_packet3_check(p, &pkt);
2438 			break;
2439 		default:
2440 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2441 			kfree(p->track);
2442 			p->track = NULL;
2443 			return -EINVAL;
2444 		}
2445 		if (r) {
2446 			kfree(p->track);
2447 			p->track = NULL;
2448 			return r;
2449 		}
2450 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2451 #if 0
2452 	for (r = 0; r < p->ib.length_dw; r++) {
2453 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2454 		mdelay(1);
2455 	}
2456 #endif
2457 	kfree(p->track);
2458 	p->track = NULL;
2459 	return 0;
2460 }
2461 
2462 /* don't these need UMS functions */
2463 #if 0
2464 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2465 {
2466 	if (p->chunk_relocs_idx == -1) {
2467 		return 0;
2468 	}
2469 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2470 	if (p->relocs == NULL) {
2471 		return -ENOMEM;
2472 	}
2473 	return 0;
2474 }
2475 
2476 /**
2477  * cs_parser_fini() - clean parser states
2478  * @parser:	parser structure holding parsing context.
2479  * @error:	error number
2480  *
2481  * If error is set than unvalidate buffer, otherwise just free memory
2482  * used by parsing context.
2483  **/
2484 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2485 {
2486 	unsigned i;
2487 
2488 	kfree(parser->relocs);
2489 	for (i = 0; i < parser->nchunks; i++) {
2490 		kfree(parser->chunks[i].kdata);
2491 		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2492 			kfree(parser->chunks[i].kpage[0]);
2493 			kfree(parser->chunks[i].kpage[1]);
2494 		}
2495 	}
2496 	kfree(parser->chunks);
2497 	kfree(parser->chunks_array);
2498 }
2499 
2500 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2501 			unsigned family, u32 *ib, int *l)
2502 {
2503 	struct radeon_cs_parser parser;
2504 	struct radeon_cs_chunk *ib_chunk;
2505 	struct r600_cs_track *track;
2506 	int r;
2507 
2508 	/* initialize tracker */
2509 	track = kzalloc(sizeof(*track), GFP_KERNEL);
2510 	if (track == NULL)
2511 		return -ENOMEM;
2512 	r600_cs_track_init(track);
2513 	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2514 	/* initialize parser */
2515 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
2516 	parser.filp = filp;
2517 #ifdef notyet
2518 	parser.dev = dev;
2519 #endif
2520 	parser.rdev = NULL;
2521 	parser.family = family;
2522 	parser.track = track;
2523 	parser.ib.ptr = ib;
2524 	r = radeon_cs_parser_init(&parser, data);
2525 	if (r) {
2526 		DRM_ERROR("Failed to initialize parser !\n");
2527 		r600_cs_parser_fini(&parser, r);
2528 		return r;
2529 	}
2530 	r = r600_cs_parser_relocs_legacy(&parser);
2531 	if (r) {
2532 		DRM_ERROR("Failed to parse relocation !\n");
2533 		r600_cs_parser_fini(&parser, r);
2534 		return r;
2535 	}
2536 	/* Copy the packet into the IB, the parser will read from the
2537 	 * input memory (cached) and write to the IB (which can be
2538 	 * uncached). */
2539 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2540 	parser.ib.length_dw = ib_chunk->length_dw;
2541 	*l = parser.ib.length_dw;
2542 	r = r600_cs_parse(&parser);
2543 	if (r) {
2544 		DRM_ERROR("Invalid command stream !\n");
2545 		r600_cs_parser_fini(&parser, r);
2546 		return r;
2547 	}
2548 	r = radeon_cs_finish_pages(&parser);
2549 	if (r) {
2550 		DRM_ERROR("Invalid command stream !\n");
2551 		r600_cs_parser_fini(&parser, r);
2552 		return r;
2553 	}
2554 	r600_cs_parser_fini(&parser, r);
2555 	return r;
2556 }
2557 #endif
2558 
2559 void r600_cs_legacy_init(void)
2560 {
2561 	r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
2562 }
2563 
2564 /*
2565  *  DMA
2566  */
2567 /**
2568  * r600_dma_cs_next_reloc() - parse next reloc
2569  * @p:		parser structure holding parsing context.
2570  * @cs_reloc:		reloc informations
2571  *
2572  * Return the next reloc, do bo validation and compute
2573  * GPU offset using the provided start.
2574  **/
2575 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2576 			   struct radeon_cs_reloc **cs_reloc)
2577 {
2578 	struct radeon_cs_chunk *relocs_chunk;
2579 	unsigned idx;
2580 
2581 	*cs_reloc = NULL;
2582 	if (p->chunk_relocs_idx == -1) {
2583 		DRM_ERROR("No relocation chunk !\n");
2584 		return -EINVAL;
2585 	}
2586 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2587 	idx = p->dma_reloc_idx;
2588 	if (idx >= p->nrelocs) {
2589 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2590 			  idx, p->nrelocs);
2591 		return -EINVAL;
2592 	}
2593 	*cs_reloc = p->relocs_ptr[idx];
2594 	p->dma_reloc_idx++;
2595 	return 0;
2596 }
2597 
2598 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2599 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2600 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2601 
2602 /**
2603  * r600_dma_cs_parse() - parse the DMA IB
2604  * @p:		parser structure holding parsing context.
2605  *
2606  * Parses the DMA IB from the CS ioctl and updates
2607  * the GPU addresses based on the reloc information and
2608  * checks for errors. (R6xx-R7xx)
2609  * Returns 0 for success and an error on failure.
2610  **/
2611 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2612 {
2613 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2614 	struct radeon_cs_reloc *src_reloc, *dst_reloc;
2615 	u32 header, cmd, count, tiled;
2616 	volatile u32 *ib = p->ib.ptr;
2617 	u32 idx, idx_value;
2618 	u64 src_offset, dst_offset;
2619 	int r;
2620 
2621 	do {
2622 		if (p->idx >= ib_chunk->length_dw) {
2623 			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2624 				  p->idx, ib_chunk->length_dw);
2625 			return -EINVAL;
2626 		}
2627 		idx = p->idx;
2628 		header = radeon_get_ib_value(p, idx);
2629 		cmd = GET_DMA_CMD(header);
2630 		count = GET_DMA_COUNT(header);
2631 		tiled = GET_DMA_T(header);
2632 
2633 		switch (cmd) {
2634 		case DMA_PACKET_WRITE:
2635 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2636 			if (r) {
2637 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2638 				return -EINVAL;
2639 			}
2640 			if (tiled) {
2641 				dst_offset = radeon_get_ib_value(p, idx+1);
2642 				dst_offset <<= 8;
2643 
2644 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2645 				p->idx += count + 5;
2646 			} else {
2647 				dst_offset = radeon_get_ib_value(p, idx+1);
2648 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2649 
2650 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2651 				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2652 				p->idx += count + 3;
2653 			}
2654 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2655 				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2656 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2657 				return -EINVAL;
2658 			}
2659 			break;
2660 		case DMA_PACKET_COPY:
2661 			r = r600_dma_cs_next_reloc(p, &src_reloc);
2662 			if (r) {
2663 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2664 				return -EINVAL;
2665 			}
2666 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2667 			if (r) {
2668 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2669 				return -EINVAL;
2670 			}
2671 			if (tiled) {
2672 				idx_value = radeon_get_ib_value(p, idx + 2);
2673 				/* detile bit */
2674 				if (idx_value & (1 << 31)) {
2675 					/* tiled src, linear dst */
2676 					src_offset = radeon_get_ib_value(p, idx+1);
2677 					src_offset <<= 8;
2678 					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2679 
2680 					dst_offset = radeon_get_ib_value(p, idx+5);
2681 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2682 					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2683 					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2684 				} else {
2685 					/* linear src, tiled dst */
2686 					src_offset = radeon_get_ib_value(p, idx+5);
2687 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2688 					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2689 					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2690 
2691 					dst_offset = radeon_get_ib_value(p, idx+1);
2692 					dst_offset <<= 8;
2693 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2694 				}
2695 				p->idx += 7;
2696 			} else {
2697 				if (p->family >= CHIP_RV770) {
2698 					src_offset = radeon_get_ib_value(p, idx+2);
2699 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2700 					dst_offset = radeon_get_ib_value(p, idx+1);
2701 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2702 
2703 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2704 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2705 					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2706 					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2707 					p->idx += 5;
2708 				} else {
2709 					src_offset = radeon_get_ib_value(p, idx+2);
2710 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2711 					dst_offset = radeon_get_ib_value(p, idx+1);
2712 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2713 
2714 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2715 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2716 					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2717 					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2718 					p->idx += 4;
2719 				}
2720 			}
2721 			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2722 				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2723 					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2724 				return -EINVAL;
2725 			}
2726 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2727 				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2728 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2729 				return -EINVAL;
2730 			}
2731 			break;
2732 		case DMA_PACKET_CONSTANT_FILL:
2733 			if (p->family < CHIP_RV770) {
2734 				DRM_ERROR("Constant Fill is 7xx only !\n");
2735 				return -EINVAL;
2736 			}
2737 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2738 			if (r) {
2739 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2740 				return -EINVAL;
2741 			}
2742 			dst_offset = radeon_get_ib_value(p, idx+1);
2743 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2744 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2745 				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2746 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2747 				return -EINVAL;
2748 			}
2749 			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2750 			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2751 			p->idx += 4;
2752 			break;
2753 		case DMA_PACKET_NOP:
2754 			p->idx += 1;
2755 			break;
2756 		default:
2757 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2758 			return -EINVAL;
2759 		}
2760 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2761 #if 0
2762 	for (r = 0; r < p->ib->length_dw; r++) {
2763 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2764 		mdelay(1);
2765 	}
2766 #endif
2767 	return 0;
2768 }
2769