xref: /openbsd-src/sys/dev/pci/drm/radeon/ni.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: ni.c,v 1.8 2014/07/12 18:48:52 tedu Exp $	*/
2 /*
3  * Copyright 2010 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Alex Deucher
24  */
25 #include <dev/pci/drm/drmP.h>
26 #include "radeon.h"
27 #include "radeon_asic.h"
28 #include <dev/pci/drm/radeon_drm.h>
29 #include "nid.h"
30 #include "atom.h"
31 #include "ni_reg.h"
32 #include "cayman_blit_shaders.h"
33 
34 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
35 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
36 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
37 extern void evergreen_mc_program(struct radeon_device *rdev);
38 extern void evergreen_irq_suspend(struct radeon_device *rdev);
39 extern int evergreen_mc_init(struct radeon_device *rdev);
40 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
41 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
42 extern void si_rlc_fini(struct radeon_device *rdev);
43 extern int si_rlc_init(struct radeon_device *rdev);
44 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, int ring, u32 cp_int_cntl);
45 
46 #define EVERGREEN_PFP_UCODE_SIZE 1120
47 #define EVERGREEN_PM4_UCODE_SIZE 1376
48 #define EVERGREEN_RLC_UCODE_SIZE 768
49 #define BTC_MC_UCODE_SIZE 6024
50 
51 #define CAYMAN_PFP_UCODE_SIZE 2176
52 #define CAYMAN_PM4_UCODE_SIZE 2176
53 #define CAYMAN_RLC_UCODE_SIZE 1024
54 #define CAYMAN_MC_UCODE_SIZE 6037
55 
56 #define ARUBA_RLC_UCODE_SIZE 1536
57 
58 /* Firmware Names */
59 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
60 MODULE_FIRMWARE("radeon/BARTS_me.bin");
61 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
62 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
63 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
64 MODULE_FIRMWARE("radeon/TURKS_me.bin");
65 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
66 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
67 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
68 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
69 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
70 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
71 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
72 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
73 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
74 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
75 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
76 
77 #define BTC_IO_MC_REGS_SIZE 29
78 
79 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
80 	{0x00000077, 0xff010100},
81 	{0x00000078, 0x00000000},
82 	{0x00000079, 0x00001434},
83 	{0x0000007a, 0xcc08ec08},
84 	{0x0000007b, 0x00040000},
85 	{0x0000007c, 0x000080c0},
86 	{0x0000007d, 0x09000000},
87 	{0x0000007e, 0x00210404},
88 	{0x00000081, 0x08a8e800},
89 	{0x00000082, 0x00030444},
90 	{0x00000083, 0x00000000},
91 	{0x00000085, 0x00000001},
92 	{0x00000086, 0x00000002},
93 	{0x00000087, 0x48490000},
94 	{0x00000088, 0x20244647},
95 	{0x00000089, 0x00000005},
96 	{0x0000008b, 0x66030000},
97 	{0x0000008c, 0x00006603},
98 	{0x0000008d, 0x00000100},
99 	{0x0000008f, 0x00001c0a},
100 	{0x00000090, 0xff000001},
101 	{0x00000094, 0x00101101},
102 	{0x00000095, 0x00000fff},
103 	{0x00000096, 0x00116fff},
104 	{0x00000097, 0x60010000},
105 	{0x00000098, 0x10010000},
106 	{0x00000099, 0x00006000},
107 	{0x0000009a, 0x00001000},
108 	{0x0000009f, 0x00946a00}
109 };
110 
111 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
112 	{0x00000077, 0xff010100},
113 	{0x00000078, 0x00000000},
114 	{0x00000079, 0x00001434},
115 	{0x0000007a, 0xcc08ec08},
116 	{0x0000007b, 0x00040000},
117 	{0x0000007c, 0x000080c0},
118 	{0x0000007d, 0x09000000},
119 	{0x0000007e, 0x00210404},
120 	{0x00000081, 0x08a8e800},
121 	{0x00000082, 0x00030444},
122 	{0x00000083, 0x00000000},
123 	{0x00000085, 0x00000001},
124 	{0x00000086, 0x00000002},
125 	{0x00000087, 0x48490000},
126 	{0x00000088, 0x20244647},
127 	{0x00000089, 0x00000005},
128 	{0x0000008b, 0x66030000},
129 	{0x0000008c, 0x00006603},
130 	{0x0000008d, 0x00000100},
131 	{0x0000008f, 0x00001c0a},
132 	{0x00000090, 0xff000001},
133 	{0x00000094, 0x00101101},
134 	{0x00000095, 0x00000fff},
135 	{0x00000096, 0x00116fff},
136 	{0x00000097, 0x60010000},
137 	{0x00000098, 0x10010000},
138 	{0x00000099, 0x00006000},
139 	{0x0000009a, 0x00001000},
140 	{0x0000009f, 0x00936a00}
141 };
142 
143 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
144 	{0x00000077, 0xff010100},
145 	{0x00000078, 0x00000000},
146 	{0x00000079, 0x00001434},
147 	{0x0000007a, 0xcc08ec08},
148 	{0x0000007b, 0x00040000},
149 	{0x0000007c, 0x000080c0},
150 	{0x0000007d, 0x09000000},
151 	{0x0000007e, 0x00210404},
152 	{0x00000081, 0x08a8e800},
153 	{0x00000082, 0x00030444},
154 	{0x00000083, 0x00000000},
155 	{0x00000085, 0x00000001},
156 	{0x00000086, 0x00000002},
157 	{0x00000087, 0x48490000},
158 	{0x00000088, 0x20244647},
159 	{0x00000089, 0x00000005},
160 	{0x0000008b, 0x66030000},
161 	{0x0000008c, 0x00006603},
162 	{0x0000008d, 0x00000100},
163 	{0x0000008f, 0x00001c0a},
164 	{0x00000090, 0xff000001},
165 	{0x00000094, 0x00101101},
166 	{0x00000095, 0x00000fff},
167 	{0x00000096, 0x00116fff},
168 	{0x00000097, 0x60010000},
169 	{0x00000098, 0x10010000},
170 	{0x00000099, 0x00006000},
171 	{0x0000009a, 0x00001000},
172 	{0x0000009f, 0x00916a00}
173 };
174 
175 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
176 	{0x00000077, 0xff010100},
177 	{0x00000078, 0x00000000},
178 	{0x00000079, 0x00001434},
179 	{0x0000007a, 0xcc08ec08},
180 	{0x0000007b, 0x00040000},
181 	{0x0000007c, 0x000080c0},
182 	{0x0000007d, 0x09000000},
183 	{0x0000007e, 0x00210404},
184 	{0x00000081, 0x08a8e800},
185 	{0x00000082, 0x00030444},
186 	{0x00000083, 0x00000000},
187 	{0x00000085, 0x00000001},
188 	{0x00000086, 0x00000002},
189 	{0x00000087, 0x48490000},
190 	{0x00000088, 0x20244647},
191 	{0x00000089, 0x00000005},
192 	{0x0000008b, 0x66030000},
193 	{0x0000008c, 0x00006603},
194 	{0x0000008d, 0x00000100},
195 	{0x0000008f, 0x00001c0a},
196 	{0x00000090, 0xff000001},
197 	{0x00000094, 0x00101101},
198 	{0x00000095, 0x00000fff},
199 	{0x00000096, 0x00116fff},
200 	{0x00000097, 0x60010000},
201 	{0x00000098, 0x10010000},
202 	{0x00000099, 0x00006000},
203 	{0x0000009a, 0x00001000},
204 	{0x0000009f, 0x00976b00}
205 };
206 
207 int ni_mc_load_microcode(struct radeon_device *rdev)
208 {
209 	const __be32 *fw_data;
210 	u32 mem_type, running, blackout = 0;
211 	u32 *io_mc_regs;
212 	int i, ucode_size, regs_size;
213 
214 	if (!rdev->mc_fw)
215 		return -EINVAL;
216 
217 	switch (rdev->family) {
218 	case CHIP_BARTS:
219 		io_mc_regs = (u32 *)&barts_io_mc_regs;
220 		ucode_size = BTC_MC_UCODE_SIZE;
221 		regs_size = BTC_IO_MC_REGS_SIZE;
222 		break;
223 	case CHIP_TURKS:
224 		io_mc_regs = (u32 *)&turks_io_mc_regs;
225 		ucode_size = BTC_MC_UCODE_SIZE;
226 		regs_size = BTC_IO_MC_REGS_SIZE;
227 		break;
228 	case CHIP_CAICOS:
229 	default:
230 		io_mc_regs = (u32 *)&caicos_io_mc_regs;
231 		ucode_size = BTC_MC_UCODE_SIZE;
232 		regs_size = BTC_IO_MC_REGS_SIZE;
233 		break;
234 	case CHIP_CAYMAN:
235 		io_mc_regs = (u32 *)&cayman_io_mc_regs;
236 		ucode_size = CAYMAN_MC_UCODE_SIZE;
237 		regs_size = BTC_IO_MC_REGS_SIZE;
238 		break;
239 	}
240 
241 	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
242 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
243 
244 	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
245 		if (running) {
246 			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
247 			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
248 		}
249 
250 		/* reset the engine and set to writable */
251 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
252 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
253 
254 		/* load mc io regs */
255 		for (i = 0; i < regs_size; i++) {
256 			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
257 			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
258 		}
259 		/* load the MC ucode */
260 		fw_data = (const __be32 *)rdev->mc_fw;
261 		for (i = 0; i < ucode_size; i++)
262 			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
263 
264 		/* put the engine back into the active state */
265 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
266 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
267 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
268 
269 		/* wait for training to complete */
270 		for (i = 0; i < rdev->usec_timeout; i++) {
271 			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
272 				break;
273 			udelay(1);
274 		}
275 
276 		if (running)
277 			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
278 	}
279 
280 	return 0;
281 }
282 
283 int ni_init_microcode(struct radeon_device *rdev)
284 {
285 	const char *chip_name;
286 	const char *rlc_chip_name;
287 	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
288 	char fw_name[30];
289 	int err;
290 
291 	DRM_DEBUG("\n");
292 
293 	switch (rdev->family) {
294 	case CHIP_BARTS:
295 		chip_name = "barts";
296 		rlc_chip_name = "btc";
297 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
298 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
299 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
300 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
301 		break;
302 	case CHIP_TURKS:
303 		chip_name = "turks";
304 		rlc_chip_name = "btc";
305 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
306 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
307 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
308 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
309 		break;
310 	case CHIP_CAICOS:
311 		chip_name = "caicos";
312 		rlc_chip_name = "btc";
313 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
314 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
315 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
316 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
317 		break;
318 	case CHIP_CAYMAN:
319 		chip_name = "cayman";
320 		rlc_chip_name = "cayman";
321 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
322 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
323 		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
324 		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
325 		break;
326 	case CHIP_ARUBA:
327 		chip_name = "aruba";
328 		rlc_chip_name = "aruba";
329 		/* pfp/me same size as CAYMAN */
330 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
331 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
332 		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
333 		mc_req_size = 0;
334 		break;
335 	default: BUG();
336 	}
337 
338 #ifdef DRMDEBUG
339 	DRM_INFO("Loading %s Microcode\n", chip_name);
340 #endif
341 
342 	snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name);
343 	err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size);
344 	if (err)
345 		goto out;
346 	if (rdev->pfp_fw_size != pfp_req_size) {
347 		DRM_ERROR(
348 		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
349 		       rdev->pfp_fw_size, fw_name);
350 		err = -EINVAL;
351 		goto out;
352 	}
353 
354 	snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name);
355 	err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size);
356 	if (err)
357 		goto out;
358 	if (rdev->me_fw_size != me_req_size) {
359 		DRM_ERROR(
360 		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
361 		       rdev->me_fw_size, fw_name);
362 		err = -EINVAL;
363 	}
364 
365 	snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name);
366 	err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size);
367 	if (err)
368 		goto out;
369 	if (rdev->rlc_fw_size != rlc_req_size) {
370 		DRM_ERROR(
371 		       "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
372 		       rdev->rlc_fw_size, fw_name);
373 		err = -EINVAL;
374 	}
375 
376 	/* no MC ucode on TN */
377 	if (!(rdev->flags & RADEON_IS_IGP)) {
378 		snprintf(fw_name, sizeof(fw_name), "radeon-%s_mc", chip_name);
379 		err = loadfirmware(fw_name, &rdev->mc_fw, &rdev->mc_fw_size);
380 		if (err)
381 			goto out;
382 		if (rdev->mc_fw_size != mc_req_size) {
383 			DRM_ERROR(
384 			       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
385 			       rdev->mc_fw_size, fw_name);
386 			err = -EINVAL;
387 		}
388 	}
389 out:
390 	if (err) {
391 		if (err != -EINVAL)
392 			DRM_ERROR(
393 			       "ni_cp: Failed to load firmware \"%s\"\n",
394 			       fw_name);
395 		if (rdev->pfp_fw) {
396 			free(rdev->pfp_fw, M_DEVBUF, 0);
397 			rdev->pfp_fw = NULL;
398 		}
399 		if (rdev->me_fw) {
400 			free(rdev->me_fw, M_DEVBUF, 0);
401 			rdev->me_fw = NULL;
402 		}
403 		if (rdev->rlc_fw) {
404 			free(rdev->rlc_fw, M_DEVBUF, 0);
405 			rdev->rlc_fw = NULL;
406 		}
407 		if (rdev->mc_fw) {
408 			free(rdev->mc_fw, M_DEVBUF, 0);
409 			rdev->mc_fw = NULL;
410 		}
411 	}
412 	return err;
413 }
414 
415 /*
416  * Core functions
417  */
418 static void cayman_gpu_init(struct radeon_device *rdev)
419 {
420 	u32 gb_addr_config = 0;
421 	u32 mc_shared_chmap, mc_arb_ramcfg;
422 	u32 cgts_tcc_disable;
423 	u32 sx_debug_1;
424 	u32 smx_dc_ctl0;
425 	u32 cgts_sm_ctrl_reg;
426 	u32 hdp_host_path_cntl;
427 	u32 tmp;
428 	u32 disabled_rb_mask;
429 	int i, j;
430 
431 	switch (rdev->family) {
432 	case CHIP_CAYMAN:
433 		rdev->config.cayman.max_shader_engines = 2;
434 		rdev->config.cayman.max_pipes_per_simd = 4;
435 		rdev->config.cayman.max_tile_pipes = 8;
436 		rdev->config.cayman.max_simds_per_se = 12;
437 		rdev->config.cayman.max_backends_per_se = 4;
438 		rdev->config.cayman.max_texture_channel_caches = 8;
439 		rdev->config.cayman.max_gprs = 256;
440 		rdev->config.cayman.max_threads = 256;
441 		rdev->config.cayman.max_gs_threads = 32;
442 		rdev->config.cayman.max_stack_entries = 512;
443 		rdev->config.cayman.sx_num_of_sets = 8;
444 		rdev->config.cayman.sx_max_export_size = 256;
445 		rdev->config.cayman.sx_max_export_pos_size = 64;
446 		rdev->config.cayman.sx_max_export_smx_size = 192;
447 		rdev->config.cayman.max_hw_contexts = 8;
448 		rdev->config.cayman.sq_num_cf_insts = 2;
449 
450 		rdev->config.cayman.sc_prim_fifo_size = 0x100;
451 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
452 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
453 		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
454 		break;
455 	case CHIP_ARUBA:
456 	default:
457 		rdev->config.cayman.max_shader_engines = 1;
458 		rdev->config.cayman.max_pipes_per_simd = 4;
459 		rdev->config.cayman.max_tile_pipes = 2;
460 		if ((rdev->pdev->device == 0x9900) ||
461 		    (rdev->pdev->device == 0x9901) ||
462 		    (rdev->pdev->device == 0x9905) ||
463 		    (rdev->pdev->device == 0x9906) ||
464 		    (rdev->pdev->device == 0x9907) ||
465 		    (rdev->pdev->device == 0x9908) ||
466 		    (rdev->pdev->device == 0x9909) ||
467 		    (rdev->pdev->device == 0x990B) ||
468 		    (rdev->pdev->device == 0x990C) ||
469 		    (rdev->pdev->device == 0x990F) ||
470 		    (rdev->pdev->device == 0x9910) ||
471 		    (rdev->pdev->device == 0x9917) ||
472 		    (rdev->pdev->device == 0x9999) ||
473 		    (rdev->pdev->device == 0x999C)) {
474 			rdev->config.cayman.max_simds_per_se = 6;
475 			rdev->config.cayman.max_backends_per_se = 2;
476 			rdev->config.cayman.max_hw_contexts = 8;
477 			rdev->config.cayman.sx_max_export_size = 256;
478 			rdev->config.cayman.sx_max_export_pos_size = 64;
479 			rdev->config.cayman.sx_max_export_smx_size = 192;
480 		} else if ((rdev->pdev->device == 0x9903) ||
481 			   (rdev->pdev->device == 0x9904) ||
482 			   (rdev->pdev->device == 0x990A) ||
483 			   (rdev->pdev->device == 0x990D) ||
484 			   (rdev->pdev->device == 0x990E) ||
485 			   (rdev->pdev->device == 0x9913) ||
486 			   (rdev->pdev->device == 0x9918) ||
487 			   (rdev->pdev->device == 0x999D)) {
488 			rdev->config.cayman.max_simds_per_se = 4;
489 			rdev->config.cayman.max_backends_per_se = 2;
490 			rdev->config.cayman.max_hw_contexts = 8;
491 			rdev->config.cayman.sx_max_export_size = 256;
492 			rdev->config.cayman.sx_max_export_pos_size = 64;
493 			rdev->config.cayman.sx_max_export_smx_size = 192;
494 		} else if ((rdev->pdev->device == 0x9919) ||
495 			   (rdev->pdev->device == 0x9990) ||
496 			   (rdev->pdev->device == 0x9991) ||
497 			   (rdev->pdev->device == 0x9994) ||
498 			   (rdev->pdev->device == 0x9995) ||
499 			   (rdev->pdev->device == 0x9996) ||
500 			   (rdev->pdev->device == 0x999A) ||
501 			   (rdev->pdev->device == 0x99A0)) {
502 			rdev->config.cayman.max_simds_per_se = 3;
503 			rdev->config.cayman.max_backends_per_se = 1;
504 			rdev->config.cayman.max_hw_contexts = 4;
505 			rdev->config.cayman.sx_max_export_size = 128;
506 			rdev->config.cayman.sx_max_export_pos_size = 32;
507 			rdev->config.cayman.sx_max_export_smx_size = 96;
508 		} else {
509 			rdev->config.cayman.max_simds_per_se = 2;
510 			rdev->config.cayman.max_backends_per_se = 1;
511 			rdev->config.cayman.max_hw_contexts = 4;
512 			rdev->config.cayman.sx_max_export_size = 128;
513 			rdev->config.cayman.sx_max_export_pos_size = 32;
514 			rdev->config.cayman.sx_max_export_smx_size = 96;
515 		}
516 		rdev->config.cayman.max_texture_channel_caches = 2;
517 		rdev->config.cayman.max_gprs = 256;
518 		rdev->config.cayman.max_threads = 256;
519 		rdev->config.cayman.max_gs_threads = 32;
520 		rdev->config.cayman.max_stack_entries = 512;
521 		rdev->config.cayman.sx_num_of_sets = 8;
522 		rdev->config.cayman.sq_num_cf_insts = 2;
523 
524 		rdev->config.cayman.sc_prim_fifo_size = 0x40;
525 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
526 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
527 		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
528 		break;
529 	}
530 
531 	/* Initialize HDP */
532 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
533 		WREG32((0x2c14 + j), 0x00000000);
534 		WREG32((0x2c18 + j), 0x00000000);
535 		WREG32((0x2c1c + j), 0x00000000);
536 		WREG32((0x2c20 + j), 0x00000000);
537 		WREG32((0x2c24 + j), 0x00000000);
538 	}
539 
540 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
541 
542 	evergreen_fix_pci_max_read_req_size(rdev);
543 
544 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
545 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
546 
547 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
548 	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
549 	if (rdev->config.cayman.mem_row_size_in_kb > 4)
550 		rdev->config.cayman.mem_row_size_in_kb = 4;
551 	/* XXX use MC settings? */
552 	rdev->config.cayman.shader_engine_tile_size = 32;
553 	rdev->config.cayman.num_gpus = 1;
554 	rdev->config.cayman.multi_gpu_tile_size = 64;
555 
556 	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
557 	rdev->config.cayman.num_tile_pipes = (1 << tmp);
558 	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
559 	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
560 	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
561 	rdev->config.cayman.num_shader_engines = tmp + 1;
562 	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
563 	rdev->config.cayman.num_gpus = tmp + 1;
564 	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
565 	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
566 	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
567 	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
568 
569 
570 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
571 	 * not have bank info, so create a custom tiling dword.
572 	 * bits 3:0   num_pipes
573 	 * bits 7:4   num_banks
574 	 * bits 11:8  group_size
575 	 * bits 15:12 row_size
576 	 */
577 	rdev->config.cayman.tile_config = 0;
578 	switch (rdev->config.cayman.num_tile_pipes) {
579 	case 1:
580 	default:
581 		rdev->config.cayman.tile_config |= (0 << 0);
582 		break;
583 	case 2:
584 		rdev->config.cayman.tile_config |= (1 << 0);
585 		break;
586 	case 4:
587 		rdev->config.cayman.tile_config |= (2 << 0);
588 		break;
589 	case 8:
590 		rdev->config.cayman.tile_config |= (3 << 0);
591 		break;
592 	}
593 
594 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
595 	if (rdev->flags & RADEON_IS_IGP)
596 		rdev->config.cayman.tile_config |= 1 << 4;
597 	else {
598 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
599 		case 0: /* four banks */
600 			rdev->config.cayman.tile_config |= 0 << 4;
601 			break;
602 		case 1: /* eight banks */
603 			rdev->config.cayman.tile_config |= 1 << 4;
604 			break;
605 		case 2: /* sixteen banks */
606 		default:
607 			rdev->config.cayman.tile_config |= 2 << 4;
608 			break;
609 		}
610 	}
611 	rdev->config.cayman.tile_config |=
612 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
613 	rdev->config.cayman.tile_config |=
614 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
615 
616 	tmp = 0;
617 	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
618 		u32 rb_disable_bitmap;
619 
620 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
621 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
622 		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
623 		tmp <<= 4;
624 		tmp |= rb_disable_bitmap;
625 	}
626 	/* enabled rb are just the one not disabled :) */
627 	disabled_rb_mask = tmp;
628 
629 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
630 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
631 
632 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
633 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
634 	if (ASIC_IS_DCE6(rdev))
635 		WREG32(DMIF_ADDR_CALC, gb_addr_config);
636 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
637 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
638 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
639 
640 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
641 	    (rdev->flags & RADEON_IS_IGP)) {
642 		if ((disabled_rb_mask & 3) == 1) {
643 			/* RB0 disabled, RB1 enabled */
644 			tmp = 0x11111111;
645 		} else {
646 			/* RB1 disabled, RB0 enabled */
647 			tmp = 0x00000000;
648 		}
649 	} else {
650 		tmp = gb_addr_config & NUM_PIPES_MASK;
651 		tmp = r6xx_remap_render_backend(rdev, tmp,
652 						rdev->config.cayman.max_backends_per_se *
653 						rdev->config.cayman.max_shader_engines,
654 						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
655 	}
656 	WREG32(GB_BACKEND_MAP, tmp);
657 
658 	cgts_tcc_disable = 0xffff0000;
659 	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
660 		cgts_tcc_disable &= ~(1 << (16 + i));
661 	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
662 	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
663 	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
664 	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
665 
666 	/* reprogram the shader complex */
667 	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
668 	for (i = 0; i < 16; i++)
669 		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
670 	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
671 
672 	/* set HW defaults for 3D engine */
673 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
674 
675 	sx_debug_1 = RREG32(SX_DEBUG_1);
676 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
677 	WREG32(SX_DEBUG_1, sx_debug_1);
678 
679 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
680 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
681 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
682 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
683 
684 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
685 
686 	/* need to be explicitly zero-ed */
687 	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
688 	WREG32(SQ_LSTMP_RING_BASE, 0);
689 	WREG32(SQ_HSTMP_RING_BASE, 0);
690 	WREG32(SQ_ESTMP_RING_BASE, 0);
691 	WREG32(SQ_GSTMP_RING_BASE, 0);
692 	WREG32(SQ_VSTMP_RING_BASE, 0);
693 	WREG32(SQ_PSTMP_RING_BASE, 0);
694 
695 	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
696 
697 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
698 					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
699 					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
700 
701 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
702 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
703 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
704 
705 
706 	WREG32(VGT_NUM_INSTANCES, 1);
707 
708 	WREG32(CP_PERFMON_CNTL, 0);
709 
710 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
711 				  FETCH_FIFO_HIWATER(0x4) |
712 				  DONE_FIFO_HIWATER(0xe0) |
713 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
714 
715 	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
716 	WREG32(SQ_CONFIG, (VC_ENABLE |
717 			   EXPORT_SRC_C |
718 			   GFX_PRIO(0) |
719 			   CS1_PRIO(0) |
720 			   CS2_PRIO(1)));
721 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
722 
723 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
724 					  FORCE_EOV_MAX_REZ_CNT(255)));
725 
726 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
727 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
728 
729 	WREG32(VGT_GS_VERTEX_REUSE, 16);
730 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
731 
732 	WREG32(CB_PERF_CTR0_SEL_0, 0);
733 	WREG32(CB_PERF_CTR0_SEL_1, 0);
734 	WREG32(CB_PERF_CTR1_SEL_0, 0);
735 	WREG32(CB_PERF_CTR1_SEL_1, 0);
736 	WREG32(CB_PERF_CTR2_SEL_0, 0);
737 	WREG32(CB_PERF_CTR2_SEL_1, 0);
738 	WREG32(CB_PERF_CTR3_SEL_0, 0);
739 	WREG32(CB_PERF_CTR3_SEL_1, 0);
740 
741 	tmp = RREG32(HDP_MISC_CNTL);
742 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
743 	WREG32(HDP_MISC_CNTL, tmp);
744 
745 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
746 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
747 
748 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
749 
750 	udelay(50);
751 }
752 
753 /*
754  * GART
755  */
756 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
757 {
758 	/* flush hdp cache */
759 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
760 
761 	/* bits 0-7 are the VM contexts0-7 */
762 	WREG32(VM_INVALIDATE_REQUEST, 1);
763 }
764 
765 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
766 {
767 	int i, r;
768 
769 	if (rdev->gart.robj == NULL) {
770 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
771 		return -EINVAL;
772 	}
773 	r = radeon_gart_table_vram_pin(rdev);
774 	if (r)
775 		return r;
776 	radeon_gart_restore(rdev);
777 	/* Setup TLB control */
778 	WREG32(MC_VM_MX_L1_TLB_CNTL,
779 	       (0xA << 7) |
780 	       ENABLE_L1_TLB |
781 	       ENABLE_L1_FRAGMENT_PROCESSING |
782 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
783 	       ENABLE_ADVANCED_DRIVER_MODEL |
784 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
785 	/* Setup L2 cache */
786 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
787 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
788 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
789 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
790 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
791 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
792 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
793 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
794 	/* setup context0 */
795 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
796 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
797 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
798 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
799 			(u32)(rdev->dummy_page.addr >> 12));
800 	WREG32(VM_CONTEXT0_CNTL2, 0);
801 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
802 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
803 
804 	WREG32(0x15D4, 0);
805 	WREG32(0x15D8, 0);
806 	WREG32(0x15DC, 0);
807 
808 	/* empty context1-7 */
809 	/* Assign the pt base to something valid for now; the pts used for
810 	 * the VMs are determined by the application and setup and assigned
811 	 * on the fly in the vm part of radeon_gart.c
812 	 */
813 	for (i = 1; i < 8; i++) {
814 		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
815 		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
816 		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
817 			rdev->gart.table_addr >> 12);
818 	}
819 
820 	/* enable context1-7 */
821 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
822 	       (u32)(rdev->dummy_page.addr >> 12));
823 	WREG32(VM_CONTEXT1_CNTL2, 4);
824 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
825 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
826 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
827 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
828 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
829 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
830 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
831 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
832 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
833 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
834 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
835 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
836 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
837 
838 	cayman_pcie_gart_tlb_flush(rdev);
839 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
840 		 (unsigned)(rdev->mc.gtt_size >> 20),
841 		 (unsigned long long)rdev->gart.table_addr);
842 	rdev->gart.ready = true;
843 	return 0;
844 }
845 
846 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
847 {
848 	/* Disable all tables */
849 	WREG32(VM_CONTEXT0_CNTL, 0);
850 	WREG32(VM_CONTEXT1_CNTL, 0);
851 	/* Setup TLB control */
852 	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
853 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
854 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
855 	/* Setup L2 cache */
856 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
857 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
858 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
859 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
860 	WREG32(VM_L2_CNTL2, 0);
861 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
862 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
863 	radeon_gart_table_vram_unpin(rdev);
864 }
865 
866 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
867 {
868 	cayman_pcie_gart_disable(rdev);
869 	radeon_gart_table_vram_free(rdev);
870 	radeon_gart_fini(rdev);
871 }
872 
873 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
874 			      int ring, u32 cp_int_cntl)
875 {
876 	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
877 
878 	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
879 	WREG32(CP_INT_CNTL, cp_int_cntl);
880 }
881 
882 /*
883  * CP.
884  */
885 void cayman_fence_ring_emit(struct radeon_device *rdev,
886 			    struct radeon_fence *fence)
887 {
888 	struct radeon_ring *ring = &rdev->ring[fence->ring];
889 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
890 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
891 		PACKET3_SH_ACTION_ENA;
892 
893 	/* flush read cache over gart for this vmid */
894 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
895 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
896 	radeon_ring_write(ring, 0xFFFFFFFF);
897 	radeon_ring_write(ring, 0);
898 	radeon_ring_write(ring, 10); /* poll interval */
899 	/* EVENT_WRITE_EOP - flush caches, send int */
900 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
901 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
902 	radeon_ring_write(ring, addr & 0xffffffff);
903 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
904 	radeon_ring_write(ring, fence->seq);
905 	radeon_ring_write(ring, 0);
906 }
907 
908 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
909 {
910 	struct radeon_ring *ring = &rdev->ring[ib->ring];
911 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
912 		PACKET3_SH_ACTION_ENA;
913 
914 	/* set to DX10/11 mode */
915 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
916 	radeon_ring_write(ring, 1);
917 
918 	if (ring->rptr_save_reg) {
919 		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
920 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
921 		radeon_ring_write(ring, ((ring->rptr_save_reg -
922 					  PACKET3_SET_CONFIG_REG_START) >> 2));
923 		radeon_ring_write(ring, next_rptr);
924 	}
925 
926 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
927 	radeon_ring_write(ring,
928 #ifdef __BIG_ENDIAN
929 			  (2 << 0) |
930 #endif
931 			  (ib->gpu_addr & 0xFFFFFFFC));
932 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
933 	radeon_ring_write(ring, ib->length_dw |
934 			  (ib->vm ? (ib->vm->id << 24) : 0));
935 
936 	/* flush read cache over gart for this vmid */
937 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
938 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
939 	radeon_ring_write(ring, 0xFFFFFFFF);
940 	radeon_ring_write(ring, 0);
941 	radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
942 }
943 
944 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
945 {
946 	if (enable)
947 		WREG32(CP_ME_CNTL, 0);
948 	else {
949 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
950 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
951 		WREG32(SCRATCH_UMSK, 0);
952 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
953 	}
954 }
955 
956 static int cayman_cp_load_microcode(struct radeon_device *rdev)
957 {
958 	const __be32 *fw_data;
959 	int i;
960 
961 	if (!rdev->me_fw || !rdev->pfp_fw)
962 		return -EINVAL;
963 
964 	cayman_cp_enable(rdev, false);
965 
966 	fw_data = (const __be32 *)rdev->pfp_fw;
967 	WREG32(CP_PFP_UCODE_ADDR, 0);
968 	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
969 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
970 	WREG32(CP_PFP_UCODE_ADDR, 0);
971 
972 	fw_data = (const __be32 *)rdev->me_fw;
973 	WREG32(CP_ME_RAM_WADDR, 0);
974 	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
975 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
976 
977 	WREG32(CP_PFP_UCODE_ADDR, 0);
978 	WREG32(CP_ME_RAM_WADDR, 0);
979 	WREG32(CP_ME_RAM_RADDR, 0);
980 	return 0;
981 }
982 
983 static int cayman_cp_start(struct radeon_device *rdev)
984 {
985 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
986 	int r, i;
987 
988 	r = radeon_ring_lock(rdev, ring, 7);
989 	if (r) {
990 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
991 		return r;
992 	}
993 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
994 	radeon_ring_write(ring, 0x1);
995 	radeon_ring_write(ring, 0x0);
996 	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
997 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
998 	radeon_ring_write(ring, 0);
999 	radeon_ring_write(ring, 0);
1000 	radeon_ring_unlock_commit(rdev, ring);
1001 
1002 	cayman_cp_enable(rdev, true);
1003 
1004 	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1005 	if (r) {
1006 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1007 		return r;
1008 	}
1009 
1010 	/* setup clear context state */
1011 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1012 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1013 
1014 	for (i = 0; i < cayman_default_size; i++)
1015 		radeon_ring_write(ring, cayman_default_state[i]);
1016 
1017 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1018 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1019 
1020 	/* set clear context state */
1021 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1022 	radeon_ring_write(ring, 0);
1023 
1024 	/* SQ_VTX_BASE_VTX_LOC */
1025 	radeon_ring_write(ring, 0xc0026f00);
1026 	radeon_ring_write(ring, 0x00000000);
1027 	radeon_ring_write(ring, 0x00000000);
1028 	radeon_ring_write(ring, 0x00000000);
1029 
1030 	/* Clear consts */
1031 	radeon_ring_write(ring, 0xc0036f00);
1032 	radeon_ring_write(ring, 0x00000bc4);
1033 	radeon_ring_write(ring, 0xffffffff);
1034 	radeon_ring_write(ring, 0xffffffff);
1035 	radeon_ring_write(ring, 0xffffffff);
1036 
1037 	radeon_ring_write(ring, 0xc0026900);
1038 	radeon_ring_write(ring, 0x00000316);
1039 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1040 	radeon_ring_write(ring, 0x00000010); /*  */
1041 
1042 	radeon_ring_unlock_commit(rdev, ring);
1043 
1044 	/* XXX init other rings */
1045 
1046 	return 0;
1047 }
1048 
1049 static void cayman_cp_fini(struct radeon_device *rdev)
1050 {
1051 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1052 	cayman_cp_enable(rdev, false);
1053 	radeon_ring_fini(rdev, ring);
1054 	radeon_scratch_free(rdev, ring->rptr_save_reg);
1055 }
1056 
1057 static int cayman_cp_resume(struct radeon_device *rdev)
1058 {
1059 	static const int ridx[] = {
1060 		RADEON_RING_TYPE_GFX_INDEX,
1061 		CAYMAN_RING_TYPE_CP1_INDEX,
1062 		CAYMAN_RING_TYPE_CP2_INDEX
1063 	};
1064 	static const unsigned cp_rb_cntl[] = {
1065 		CP_RB0_CNTL,
1066 		CP_RB1_CNTL,
1067 		CP_RB2_CNTL,
1068 	};
1069 	static const unsigned cp_rb_rptr_addr[] = {
1070 		CP_RB0_RPTR_ADDR,
1071 		CP_RB1_RPTR_ADDR,
1072 		CP_RB2_RPTR_ADDR
1073 	};
1074 	static const unsigned cp_rb_rptr_addr_hi[] = {
1075 		CP_RB0_RPTR_ADDR_HI,
1076 		CP_RB1_RPTR_ADDR_HI,
1077 		CP_RB2_RPTR_ADDR_HI
1078 	};
1079 	static const unsigned cp_rb_base[] = {
1080 		CP_RB0_BASE,
1081 		CP_RB1_BASE,
1082 		CP_RB2_BASE
1083 	};
1084 	struct radeon_ring *ring;
1085 	int i, r;
1086 
1087 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1088 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1089 				 SOFT_RESET_PA |
1090 				 SOFT_RESET_SH |
1091 				 SOFT_RESET_VGT |
1092 				 SOFT_RESET_SPI |
1093 				 SOFT_RESET_SX));
1094 	RREG32(GRBM_SOFT_RESET);
1095 	mdelay(15);
1096 	WREG32(GRBM_SOFT_RESET, 0);
1097 	RREG32(GRBM_SOFT_RESET);
1098 
1099 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1100 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1101 
1102 	/* Set the write pointer delay */
1103 	WREG32(CP_RB_WPTR_DELAY, 0);
1104 
1105 	WREG32(CP_DEBUG, (1 << 27));
1106 
1107 	/* set the wb address whether it's enabled or not */
1108 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1109 	WREG32(SCRATCH_UMSK, 0xff);
1110 
1111 	for (i = 0; i < 3; ++i) {
1112 		uint32_t rb_cntl;
1113 		uint64_t addr;
1114 
1115 		/* Set ring buffer size */
1116 		ring = &rdev->ring[ridx[i]];
1117 		rb_cntl = drm_order(ring->ring_size / 8);
1118 		rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
1119 #ifdef __BIG_ENDIAN
1120 		rb_cntl |= BUF_SWAP_32BIT;
1121 #endif
1122 		WREG32(cp_rb_cntl[i], rb_cntl);
1123 
1124 		/* set the wb address whether it's enabled or not */
1125 		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1126 		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1127 		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1128 	}
1129 
1130 	/* set the rb base addr, this causes an internal reset of ALL rings */
1131 	for (i = 0; i < 3; ++i) {
1132 		ring = &rdev->ring[ridx[i]];
1133 		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1134 	}
1135 
1136 	for (i = 0; i < 3; ++i) {
1137 		/* Initialize the ring buffer's read and write pointers */
1138 		ring = &rdev->ring[ridx[i]];
1139 		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1140 
1141 		ring->rptr = ring->wptr = 0;
1142 		WREG32(ring->rptr_reg, ring->rptr);
1143 		WREG32(ring->wptr_reg, ring->wptr);
1144 
1145 		mdelay(1);
1146 		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1147 	}
1148 
1149 	/* start the rings */
1150 	cayman_cp_start(rdev);
1151 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1152 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1153 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1154 	/* this only test cp0 */
1155 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1156 	if (r) {
1157 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1158 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1159 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1160 		return r;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 /*
1167  * DMA
1168  * Starting with R600, the GPU has an asynchronous
1169  * DMA engine.  The programming model is very similar
1170  * to the 3D engine (ring buffer, IBs, etc.), but the
1171  * DMA controller has it's own packet format that is
1172  * different form the PM4 format used by the 3D engine.
1173  * It supports copying data, writing embedded data,
1174  * solid fills, and a number of other things.  It also
1175  * has support for tiling/detiling of buffers.
1176  * Cayman and newer support two asynchronous DMA engines.
1177  */
1178 /**
1179  * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1180  *
1181  * @rdev: radeon_device pointer
1182  * @ib: IB object to schedule
1183  *
1184  * Schedule an IB in the DMA ring (cayman-SI).
1185  */
1186 void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1187 				struct radeon_ib *ib)
1188 {
1189 	struct radeon_ring *ring = &rdev->ring[ib->ring];
1190 
1191 	if (rdev->wb.enabled) {
1192 		u32 next_rptr = ring->wptr + 4;
1193 		while ((next_rptr & 7) != 5)
1194 			next_rptr++;
1195 		next_rptr += 3;
1196 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1197 		radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1198 		radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1199 		radeon_ring_write(ring, next_rptr);
1200 	}
1201 
1202 	/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1203 	 * Pad as necessary with NOPs.
1204 	 */
1205 	while ((ring->wptr & 7) != 5)
1206 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1207 	radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1208 	radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1209 	radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1210 
1211 }
1212 
1213 /**
1214  * cayman_dma_stop - stop the async dma engines
1215  *
1216  * @rdev: radeon_device pointer
1217  *
1218  * Stop the async dma engines (cayman-SI).
1219  */
1220 void cayman_dma_stop(struct radeon_device *rdev)
1221 {
1222 	u32 rb_cntl;
1223 
1224 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1225 
1226 	/* dma0 */
1227 	rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1228 	rb_cntl &= ~DMA_RB_ENABLE;
1229 	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1230 
1231 	/* dma1 */
1232 	rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1233 	rb_cntl &= ~DMA_RB_ENABLE;
1234 	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1235 
1236 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1237 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1238 }
1239 
1240 /**
1241  * cayman_dma_resume - setup and start the async dma engines
1242  *
1243  * @rdev: radeon_device pointer
1244  *
1245  * Set up the DMA ring buffers and enable them. (cayman-SI).
1246  * Returns 0 for success, error for failure.
1247  */
1248 int cayman_dma_resume(struct radeon_device *rdev)
1249 {
1250 	struct radeon_ring *ring;
1251 	u32 rb_cntl, dma_cntl, ib_cntl;
1252 	u32 rb_bufsz;
1253 	u32 reg_offset, wb_offset;
1254 	int i, r;
1255 
1256 	/* Reset dma */
1257 	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1258 	RREG32(SRBM_SOFT_RESET);
1259 	udelay(50);
1260 	WREG32(SRBM_SOFT_RESET, 0);
1261 
1262 	for (i = 0; i < 2; i++) {
1263 		if (i == 0) {
1264 			ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1265 			reg_offset = DMA0_REGISTER_OFFSET;
1266 			wb_offset = R600_WB_DMA_RPTR_OFFSET;
1267 		} else {
1268 			ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1269 			reg_offset = DMA1_REGISTER_OFFSET;
1270 			wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1271 		}
1272 
1273 		WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1274 		WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1275 
1276 		/* Set ring buffer size in dwords */
1277 		rb_bufsz = drm_order(ring->ring_size / 4);
1278 		rb_cntl = rb_bufsz << 1;
1279 #ifdef __BIG_ENDIAN
1280 		rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1281 #endif
1282 		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1283 
1284 		/* Initialize the ring buffer's read and write pointers */
1285 		WREG32(DMA_RB_RPTR + reg_offset, 0);
1286 		WREG32(DMA_RB_WPTR + reg_offset, 0);
1287 
1288 		/* set the wb address whether it's enabled or not */
1289 		WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1290 		       upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1291 		WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1292 		       ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1293 
1294 		if (rdev->wb.enabled)
1295 			rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1296 
1297 		WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1298 
1299 		/* enable DMA IBs */
1300 		ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
1301 #ifdef __BIG_ENDIAN
1302 		ib_cntl |= DMA_IB_SWAP_ENABLE;
1303 #endif
1304 		WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
1305 
1306 		dma_cntl = RREG32(DMA_CNTL + reg_offset);
1307 		dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1308 		WREG32(DMA_CNTL + reg_offset, dma_cntl);
1309 
1310 		ring->wptr = 0;
1311 		WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1312 
1313 		ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1314 
1315 		WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1316 
1317 		ring->ready = true;
1318 
1319 		r = radeon_ring_test(rdev, ring->idx, ring);
1320 		if (r) {
1321 			ring->ready = false;
1322 			return r;
1323 		}
1324 	}
1325 
1326 	radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * cayman_dma_fini - tear down the async dma engines
1333  *
1334  * @rdev: radeon_device pointer
1335  *
1336  * Stop the async dma engines and free the rings (cayman-SI).
1337  */
1338 void cayman_dma_fini(struct radeon_device *rdev)
1339 {
1340 	cayman_dma_stop(rdev);
1341 	radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1342 	radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1343 }
1344 
1345 static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
1346 {
1347 	u32 grbm_reset = 0;
1348 
1349 	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1350 		return;
1351 
1352 	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
1353 		RREG32(GRBM_STATUS));
1354 	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
1355 		RREG32(GRBM_STATUS_SE0));
1356 	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
1357 		RREG32(GRBM_STATUS_SE1));
1358 	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
1359 		RREG32(SRBM_STATUS));
1360 	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1361 		RREG32(CP_STALLED_STAT1));
1362 	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1363 		RREG32(CP_STALLED_STAT2));
1364 	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
1365 		RREG32(CP_BUSY_STAT));
1366 	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
1367 		RREG32(CP_STAT));
1368 
1369 	/* Disable CP parsing/prefetching */
1370 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1371 
1372 	/* reset all the gfx blocks */
1373 	grbm_reset = (SOFT_RESET_CP |
1374 		      SOFT_RESET_CB |
1375 		      SOFT_RESET_DB |
1376 		      SOFT_RESET_GDS |
1377 		      SOFT_RESET_PA |
1378 		      SOFT_RESET_SC |
1379 		      SOFT_RESET_SPI |
1380 		      SOFT_RESET_SH |
1381 		      SOFT_RESET_SX |
1382 		      SOFT_RESET_TC |
1383 		      SOFT_RESET_TA |
1384 		      SOFT_RESET_VGT |
1385 		      SOFT_RESET_IA);
1386 
1387 	dev_info(rdev->dev, "  GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1388 	WREG32(GRBM_SOFT_RESET, grbm_reset);
1389 	(void)RREG32(GRBM_SOFT_RESET);
1390 	udelay(50);
1391 	WREG32(GRBM_SOFT_RESET, 0);
1392 	(void)RREG32(GRBM_SOFT_RESET);
1393 
1394 	dev_info(rdev->dev, "  GRBM_STATUS               = 0x%08X\n",
1395 		RREG32(GRBM_STATUS));
1396 	dev_info(rdev->dev, "  GRBM_STATUS_SE0           = 0x%08X\n",
1397 		RREG32(GRBM_STATUS_SE0));
1398 	dev_info(rdev->dev, "  GRBM_STATUS_SE1           = 0x%08X\n",
1399 		RREG32(GRBM_STATUS_SE1));
1400 	dev_info(rdev->dev, "  SRBM_STATUS               = 0x%08X\n",
1401 		RREG32(SRBM_STATUS));
1402 	dev_info(rdev->dev, "  R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1403 		RREG32(CP_STALLED_STAT1));
1404 	dev_info(rdev->dev, "  R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1405 		RREG32(CP_STALLED_STAT2));
1406 	dev_info(rdev->dev, "  R_00867C_CP_BUSY_STAT     = 0x%08X\n",
1407 		RREG32(CP_BUSY_STAT));
1408 	dev_info(rdev->dev, "  R_008680_CP_STAT          = 0x%08X\n",
1409 		RREG32(CP_STAT));
1410 
1411 }
1412 
1413 static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
1414 {
1415 	u32 tmp;
1416 
1417 	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1418 		return;
1419 
1420 	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
1421 		RREG32(DMA_STATUS_REG));
1422 
1423 	/* dma0 */
1424 	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1425 	tmp &= ~DMA_RB_ENABLE;
1426 	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1427 
1428 	/* dma1 */
1429 	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1430 	tmp &= ~DMA_RB_ENABLE;
1431 	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1432 
1433 	/* Reset dma */
1434 	WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1435 	RREG32(SRBM_SOFT_RESET);
1436 	udelay(50);
1437 	WREG32(SRBM_SOFT_RESET, 0);
1438 
1439 	dev_info(rdev->dev, "  R_00D034_DMA_STATUS_REG   = 0x%08X\n",
1440 		RREG32(DMA_STATUS_REG));
1441 
1442 }
1443 
1444 static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1445 {
1446 	struct evergreen_mc_save save;
1447 
1448 	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1449 		reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1450 
1451 	if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1452 		reset_mask &= ~RADEON_RESET_DMA;
1453 
1454 	if (reset_mask == 0)
1455 		return 0;
1456 
1457 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1458 
1459 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1460 		 RREG32(0x14F8));
1461 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1462 		 RREG32(0x14D8));
1463 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1464 		 RREG32(0x14FC));
1465 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1466 		 RREG32(0x14DC));
1467 
1468 	evergreen_mc_stop(rdev, &save);
1469 	if (evergreen_mc_wait_for_idle(rdev)) {
1470 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1471 	}
1472 
1473 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1474 		cayman_gpu_soft_reset_gfx(rdev);
1475 
1476 	if (reset_mask & RADEON_RESET_DMA)
1477 		cayman_gpu_soft_reset_dma(rdev);
1478 
1479 	/* Wait a little for things to settle down */
1480 	udelay(50);
1481 
1482 	evergreen_mc_resume(rdev, &save);
1483 	return 0;
1484 }
1485 
1486 int cayman_asic_reset(struct radeon_device *rdev)
1487 {
1488 	return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1489 					    RADEON_RESET_COMPUTE |
1490 					    RADEON_RESET_DMA));
1491 }
1492 
1493 /**
1494  * cayman_dma_is_lockup - Check if the DMA engine is locked up
1495  *
1496  * @rdev: radeon_device pointer
1497  * @ring: radeon_ring structure holding ring information
1498  *
1499  * Check if the async DMA engine is locked up (cayman-SI).
1500  * Returns true if the engine appears to be locked up, false if not.
1501  */
1502 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1503 {
1504 	u32 dma_status_reg;
1505 
1506 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
1507 		dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1508 	else
1509 		dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1510 	if (dma_status_reg & DMA_IDLE) {
1511 		radeon_ring_lockup_update(ring);
1512 		return false;
1513 	}
1514 	/* force ring activities */
1515 	radeon_ring_force_activity(rdev, ring);
1516 	return radeon_ring_test_lockup(rdev, ring);
1517 }
1518 
1519 static int cayman_startup(struct radeon_device *rdev)
1520 {
1521 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1522 	int r;
1523 
1524 	/* enable pcie gen2 link */
1525 	evergreen_pcie_gen2_enable(rdev);
1526 
1527 	evergreen_mc_program(rdev);
1528 
1529 	if (rdev->flags & RADEON_IS_IGP) {
1530 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1531 			r = ni_init_microcode(rdev);
1532 			if (r) {
1533 				DRM_ERROR("Failed to load firmware!\n");
1534 				return r;
1535 			}
1536 		}
1537 	} else {
1538 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1539 			r = ni_init_microcode(rdev);
1540 			if (r) {
1541 				DRM_ERROR("Failed to load firmware!\n");
1542 				return r;
1543 			}
1544 		}
1545 
1546 		r = ni_mc_load_microcode(rdev);
1547 		if (r) {
1548 			DRM_ERROR("Failed to load MC firmware!\n");
1549 			return r;
1550 		}
1551 	}
1552 
1553 	r = r600_vram_scratch_init(rdev);
1554 	if (r)
1555 		return r;
1556 
1557 	r = cayman_pcie_gart_enable(rdev);
1558 	if (r)
1559 		return r;
1560 	cayman_gpu_init(rdev);
1561 
1562 	r = evergreen_blit_init(rdev);
1563 	if (r) {
1564 		r600_blit_fini(rdev);
1565 		rdev->asic->copy.copy = NULL;
1566 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1567 	}
1568 
1569 	/* allocate rlc buffers */
1570 	if (rdev->flags & RADEON_IS_IGP) {
1571 		r = si_rlc_init(rdev);
1572 		if (r) {
1573 			DRM_ERROR("Failed to init rlc BOs!\n");
1574 			return r;
1575 		}
1576 	}
1577 
1578 	/* allocate wb buffer */
1579 	r = radeon_wb_init(rdev);
1580 	if (r)
1581 		return r;
1582 
1583 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
1584 	if (r) {
1585 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1586 		return r;
1587 	}
1588 
1589 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
1590 	if (r) {
1591 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1592 		return r;
1593 	}
1594 
1595 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
1596 	if (r) {
1597 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
1598 		return r;
1599 	}
1600 
1601 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1602 	if (r) {
1603 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1604 		return r;
1605 	}
1606 
1607 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1608 	if (r) {
1609 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1610 		return r;
1611 	}
1612 
1613 	/* Enable IRQ */
1614 	if (!rdev->irq.installed) {
1615 		r = radeon_irq_kms_init(rdev);
1616 		if (r)
1617 			return r;
1618 	}
1619 
1620 	r = r600_irq_init(rdev);
1621 	if (r) {
1622 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
1623 		radeon_irq_kms_fini(rdev);
1624 		return r;
1625 	}
1626 	evergreen_irq_set(rdev);
1627 
1628 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1629 			     CP_RB0_RPTR, CP_RB0_WPTR,
1630 			     0, 0xfffff, RADEON_CP_PACKET2);
1631 	if (r)
1632 		return r;
1633 
1634 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1635 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1636 			     DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1637 			     DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1638 			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1639 	if (r)
1640 		return r;
1641 
1642 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1643 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1644 			     DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1645 			     DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1646 			     2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1647 	if (r)
1648 		return r;
1649 
1650 	r = cayman_cp_load_microcode(rdev);
1651 	if (r)
1652 		return r;
1653 	r = cayman_cp_resume(rdev);
1654 	if (r)
1655 		return r;
1656 
1657 	r = cayman_dma_resume(rdev);
1658 	if (r)
1659 		return r;
1660 
1661 	r = radeon_ib_pool_init(rdev);
1662 	if (r) {
1663 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
1664 		return r;
1665 	}
1666 
1667 	r = radeon_vm_manager_init(rdev);
1668 	if (r) {
1669 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
1670 		return r;
1671 	}
1672 
1673 	r = r600_audio_init(rdev);
1674 	if (r)
1675 		return r;
1676 
1677 	return 0;
1678 }
1679 
1680 int cayman_resume(struct radeon_device *rdev)
1681 {
1682 	int r;
1683 
1684 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
1685 	 * posting will perform necessary task to bring back GPU into good
1686 	 * shape.
1687 	 */
1688 	/* post card */
1689 	atom_asic_init(rdev->mode_info.atom_context);
1690 
1691 	rdev->accel_working = true;
1692 	r = cayman_startup(rdev);
1693 	if (r) {
1694 		DRM_ERROR("cayman startup failed on resume\n");
1695 		rdev->accel_working = false;
1696 		return r;
1697 	}
1698 	return r;
1699 }
1700 
1701 int cayman_suspend(struct radeon_device *rdev)
1702 {
1703 	r600_audio_fini(rdev);
1704 	radeon_vm_manager_fini(rdev);
1705 	cayman_cp_enable(rdev, false);
1706 	cayman_dma_stop(rdev);
1707 	evergreen_irq_suspend(rdev);
1708 	radeon_wb_disable(rdev);
1709 	cayman_pcie_gart_disable(rdev);
1710 	return 0;
1711 }
1712 
1713 /* Plan is to move initialization in that function and use
1714  * helper function so that radeon_device_init pretty much
1715  * do nothing more than calling asic specific function. This
1716  * should also allow to remove a bunch of callback function
1717  * like vram_info.
1718  */
1719 int cayman_init(struct radeon_device *rdev)
1720 {
1721 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1722 	int r;
1723 
1724 	/* Read BIOS */
1725 	if (!radeon_get_bios(rdev)) {
1726 		if (ASIC_IS_AVIVO(rdev))
1727 			return -EINVAL;
1728 	}
1729 	/* Must be an ATOMBIOS */
1730 	if (!rdev->is_atom_bios) {
1731 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
1732 		return -EINVAL;
1733 	}
1734 	r = radeon_atombios_init(rdev);
1735 	if (r)
1736 		return r;
1737 
1738 	/* Post card if necessary */
1739 	if (!radeon_card_posted(rdev)) {
1740 		if (!rdev->bios) {
1741 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1742 			return -EINVAL;
1743 		}
1744 		DRM_INFO("GPU not posted. posting now...\n");
1745 		atom_asic_init(rdev->mode_info.atom_context);
1746 	}
1747 	/* Initialize scratch registers */
1748 	r600_scratch_init(rdev);
1749 	/* Initialize surface registers */
1750 	radeon_surface_init(rdev);
1751 	/* Initialize clocks */
1752 	radeon_get_clock_info(rdev->ddev);
1753 	/* Fence driver */
1754 	r = radeon_fence_driver_init(rdev);
1755 	if (r)
1756 		return r;
1757 	/* initialize memory controller */
1758 	r = evergreen_mc_init(rdev);
1759 	if (r)
1760 		return r;
1761 	/* Memory manager */
1762 	r = radeon_bo_init(rdev);
1763 	if (r)
1764 		return r;
1765 
1766 	ring->ring_obj = NULL;
1767 	r600_ring_init(rdev, ring, 1024 * 1024);
1768 
1769 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1770 	ring->ring_obj = NULL;
1771 	r600_ring_init(rdev, ring, 64 * 1024);
1772 
1773 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1774 	ring->ring_obj = NULL;
1775 	r600_ring_init(rdev, ring, 64 * 1024);
1776 
1777 	rdev->ih.ring_obj = NULL;
1778 	r600_ih_ring_init(rdev, 64 * 1024);
1779 
1780 	r = r600_pcie_gart_init(rdev);
1781 	if (r)
1782 		return r;
1783 
1784 	rdev->accel_working = true;
1785 	r = cayman_startup(rdev);
1786 	if (r) {
1787 		dev_err(rdev->dev, "disabling GPU acceleration\n");
1788 		cayman_cp_fini(rdev);
1789 		cayman_dma_fini(rdev);
1790 		r600_irq_fini(rdev);
1791 		if (rdev->flags & RADEON_IS_IGP)
1792 			si_rlc_fini(rdev);
1793 		radeon_wb_fini(rdev);
1794 		radeon_ib_pool_fini(rdev);
1795 		radeon_vm_manager_fini(rdev);
1796 		radeon_irq_kms_fini(rdev);
1797 		cayman_pcie_gart_fini(rdev);
1798 		rdev->accel_working = false;
1799 	}
1800 
1801 	/* Don't start up if the MC ucode is missing.
1802 	 * The default clocks and voltages before the MC ucode
1803 	 * is loaded are not suffient for advanced operations.
1804 	 *
1805 	 * We can skip this check for TN, because there is no MC
1806 	 * ucode.
1807 	 */
1808 	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
1809 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
1810 		return -EINVAL;
1811 	}
1812 
1813 	return 0;
1814 }
1815 
1816 void cayman_fini(struct radeon_device *rdev)
1817 {
1818 	r600_blit_fini(rdev);
1819 	cayman_cp_fini(rdev);
1820 	cayman_dma_fini(rdev);
1821 	r600_irq_fini(rdev);
1822 	if (rdev->flags & RADEON_IS_IGP)
1823 		si_rlc_fini(rdev);
1824 	radeon_wb_fini(rdev);
1825 	radeon_vm_manager_fini(rdev);
1826 	radeon_ib_pool_fini(rdev);
1827 	radeon_irq_kms_fini(rdev);
1828 	cayman_pcie_gart_fini(rdev);
1829 	r600_vram_scratch_fini(rdev);
1830 	radeon_gem_fini(rdev);
1831 	radeon_fence_driver_fini(rdev);
1832 	radeon_bo_fini(rdev);
1833 	radeon_atombios_fini(rdev);
1834 	kfree(rdev->bios);
1835 	rdev->bios = NULL;
1836 }
1837 
1838 /*
1839  * vm
1840  */
1841 int cayman_vm_init(struct radeon_device *rdev)
1842 {
1843 	/* number of VMs */
1844 	rdev->vm_manager.nvm = 8;
1845 	/* base offset of vram pages */
1846 	if (rdev->flags & RADEON_IS_IGP) {
1847 		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
1848 		tmp <<= 22;
1849 		rdev->vm_manager.vram_base_offset = tmp;
1850 	} else
1851 		rdev->vm_manager.vram_base_offset = 0;
1852 	return 0;
1853 }
1854 
1855 void cayman_vm_fini(struct radeon_device *rdev)
1856 {
1857 }
1858 
1859 #define R600_ENTRY_VALID   (1 << 0)
1860 #define R600_PTE_SYSTEM    (1 << 1)
1861 #define R600_PTE_SNOOPED   (1 << 2)
1862 #define R600_PTE_READABLE  (1 << 5)
1863 #define R600_PTE_WRITEABLE (1 << 6)
1864 
1865 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
1866 {
1867 	uint32_t r600_flags = 0;
1868 	r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
1869 	r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
1870 	r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
1871 	if (flags & RADEON_VM_PAGE_SYSTEM) {
1872 		r600_flags |= R600_PTE_SYSTEM;
1873 		r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
1874 	}
1875 	return r600_flags;
1876 }
1877 
1878 /**
1879  * cayman_vm_set_page - update the page tables using the CP
1880  *
1881  * @rdev: radeon_device pointer
1882  * @pe: addr of the page entry
1883  * @addr: dst addr to write into pe
1884  * @count: number of page entries to update
1885  * @incr: increase next addr by incr bytes
1886  * @flags: access flags
1887  *
1888  * Update the page tables using the CP (cayman-si).
1889  */
1890 void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1891 			uint64_t addr, unsigned count,
1892 			uint32_t incr, uint32_t flags)
1893 {
1894 	struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1895 	uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1896 	uint64_t value;
1897 	unsigned ndw;
1898 
1899 	if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
1900 		while (count) {
1901 			ndw = 1 + count * 2;
1902 			if (ndw > 0x3FFF)
1903 				ndw = 0x3FFF;
1904 
1905 			radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1906 			radeon_ring_write(ring, pe);
1907 			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1908 			for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1909 				if (flags & RADEON_VM_PAGE_SYSTEM) {
1910 					value = radeon_vm_map_gart(rdev, addr);
1911 					value &= 0xFFFFFFFFFFFFF000ULL;
1912 				} else if (flags & RADEON_VM_PAGE_VALID) {
1913 					value = addr;
1914 				} else {
1915 					value = 0;
1916 				}
1917 				addr += incr;
1918 				value |= r600_flags;
1919 				radeon_ring_write(ring, value);
1920 				radeon_ring_write(ring, upper_32_bits(value));
1921 			}
1922 		}
1923 	} else {
1924 		while (count) {
1925 			ndw = count * 2;
1926 			if (ndw > 0xFFFFE)
1927 				ndw = 0xFFFFE;
1928 
1929 			/* for non-physically contiguous pages (system) */
1930 			radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
1931 			radeon_ring_write(ring, pe);
1932 			radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1933 			for (; ndw > 0; ndw -= 2, --count, pe += 8) {
1934 				if (flags & RADEON_VM_PAGE_SYSTEM) {
1935 					value = radeon_vm_map_gart(rdev, addr);
1936 					value &= 0xFFFFFFFFFFFFF000ULL;
1937 				} else if (flags & RADEON_VM_PAGE_VALID) {
1938 					value = addr;
1939 				} else {
1940 					value = 0;
1941 				}
1942 				addr += incr;
1943 				value |= r600_flags;
1944 				radeon_ring_write(ring, value);
1945 				radeon_ring_write(ring, upper_32_bits(value));
1946 			}
1947 		}
1948 	}
1949 }
1950 
1951 /**
1952  * cayman_vm_flush - vm flush using the CP
1953  *
1954  * @rdev: radeon_device pointer
1955  *
1956  * Update the page table base and flush the VM TLB
1957  * using the CP (cayman-si).
1958  */
1959 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1960 {
1961 	struct radeon_ring *ring = &rdev->ring[ridx];
1962 
1963 	if (vm == NULL)
1964 		return;
1965 
1966 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
1967 	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1968 
1969 	/* flush hdp cache */
1970 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1971 	radeon_ring_write(ring, 0x1);
1972 
1973 	/* bits 0-7 are the VM contexts0-7 */
1974 	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
1975 	radeon_ring_write(ring, 1 << vm->id);
1976 
1977 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
1978 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1979 	radeon_ring_write(ring, 0x0);
1980 }
1981 
1982 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1983 {
1984 	struct radeon_ring *ring = &rdev->ring[ridx];
1985 
1986 	if (vm == NULL)
1987 		return;
1988 
1989 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1990 	radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
1991 	radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1992 
1993 	/* flush hdp cache */
1994 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1995 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
1996 	radeon_ring_write(ring, 1);
1997 
1998 	/* bits 0-7 are the VM contexts0-7 */
1999 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
2000 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
2001 	radeon_ring_write(ring, 1 << vm->id);
2002 }
2003 
2004