1 /* $OpenBSD: ni.c,v 1.10 2015/04/18 14:47:35 jsg Exp $ */ 2 /* 3 * Copyright 2010 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: Alex Deucher 24 */ 25 #include <dev/pci/drm/drmP.h> 26 #include "radeon.h" 27 #include "radeon_asic.h" 28 #include <dev/pci/drm/radeon_drm.h> 29 #include "nid.h" 30 #include "atom.h" 31 #include "ni_reg.h" 32 #include "cayman_blit_shaders.h" 33 34 extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 35 extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 36 extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 37 extern void evergreen_mc_program(struct radeon_device *rdev); 38 extern void evergreen_irq_suspend(struct radeon_device *rdev); 39 extern int evergreen_mc_init(struct radeon_device *rdev); 40 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); 41 extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev); 42 extern void si_rlc_fini(struct radeon_device *rdev); 43 extern int si_rlc_init(struct radeon_device *rdev); 44 45 #define EVERGREEN_PFP_UCODE_SIZE 1120 46 #define EVERGREEN_PM4_UCODE_SIZE 1376 47 #define EVERGREEN_RLC_UCODE_SIZE 768 48 #define BTC_MC_UCODE_SIZE 6024 49 50 #define CAYMAN_PFP_UCODE_SIZE 2176 51 #define CAYMAN_PM4_UCODE_SIZE 2176 52 #define CAYMAN_RLC_UCODE_SIZE 1024 53 #define CAYMAN_MC_UCODE_SIZE 6037 54 55 #define ARUBA_RLC_UCODE_SIZE 1536 56 57 /* Firmware Names */ 58 MODULE_FIRMWARE("radeon/BARTS_pfp.bin"); 59 MODULE_FIRMWARE("radeon/BARTS_me.bin"); 60 MODULE_FIRMWARE("radeon/BARTS_mc.bin"); 61 MODULE_FIRMWARE("radeon/BTC_rlc.bin"); 62 MODULE_FIRMWARE("radeon/TURKS_pfp.bin"); 63 MODULE_FIRMWARE("radeon/TURKS_me.bin"); 64 MODULE_FIRMWARE("radeon/TURKS_mc.bin"); 65 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin"); 66 MODULE_FIRMWARE("radeon/CAICOS_me.bin"); 67 MODULE_FIRMWARE("radeon/CAICOS_mc.bin"); 68 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin"); 69 MODULE_FIRMWARE("radeon/CAYMAN_me.bin"); 70 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin"); 71 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin"); 72 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin"); 73 MODULE_FIRMWARE("radeon/ARUBA_me.bin"); 74 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin"); 75 76 #define BTC_IO_MC_REGS_SIZE 29 77 78 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 79 {0x00000077, 0xff010100}, 80 {0x00000078, 0x00000000}, 81 {0x00000079, 0x00001434}, 82 {0x0000007a, 0xcc08ec08}, 83 {0x0000007b, 0x00040000}, 84 {0x0000007c, 0x000080c0}, 85 {0x0000007d, 0x09000000}, 86 {0x0000007e, 0x00210404}, 87 {0x00000081, 0x08a8e800}, 88 {0x00000082, 0x00030444}, 89 {0x00000083, 0x00000000}, 90 {0x00000085, 0x00000001}, 91 {0x00000086, 0x00000002}, 92 {0x00000087, 0x48490000}, 93 {0x00000088, 0x20244647}, 94 {0x00000089, 0x00000005}, 95 {0x0000008b, 0x66030000}, 96 {0x0000008c, 0x00006603}, 97 {0x0000008d, 0x00000100}, 98 {0x0000008f, 0x00001c0a}, 99 {0x00000090, 0xff000001}, 100 {0x00000094, 0x00101101}, 101 {0x00000095, 0x00000fff}, 102 {0x00000096, 0x00116fff}, 103 {0x00000097, 0x60010000}, 104 {0x00000098, 0x10010000}, 105 {0x00000099, 0x00006000}, 106 {0x0000009a, 0x00001000}, 107 {0x0000009f, 0x00946a00} 108 }; 109 110 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 111 {0x00000077, 0xff010100}, 112 {0x00000078, 0x00000000}, 113 {0x00000079, 0x00001434}, 114 {0x0000007a, 0xcc08ec08}, 115 {0x0000007b, 0x00040000}, 116 {0x0000007c, 0x000080c0}, 117 {0x0000007d, 0x09000000}, 118 {0x0000007e, 0x00210404}, 119 {0x00000081, 0x08a8e800}, 120 {0x00000082, 0x00030444}, 121 {0x00000083, 0x00000000}, 122 {0x00000085, 0x00000001}, 123 {0x00000086, 0x00000002}, 124 {0x00000087, 0x48490000}, 125 {0x00000088, 0x20244647}, 126 {0x00000089, 0x00000005}, 127 {0x0000008b, 0x66030000}, 128 {0x0000008c, 0x00006603}, 129 {0x0000008d, 0x00000100}, 130 {0x0000008f, 0x00001c0a}, 131 {0x00000090, 0xff000001}, 132 {0x00000094, 0x00101101}, 133 {0x00000095, 0x00000fff}, 134 {0x00000096, 0x00116fff}, 135 {0x00000097, 0x60010000}, 136 {0x00000098, 0x10010000}, 137 {0x00000099, 0x00006000}, 138 {0x0000009a, 0x00001000}, 139 {0x0000009f, 0x00936a00} 140 }; 141 142 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 143 {0x00000077, 0xff010100}, 144 {0x00000078, 0x00000000}, 145 {0x00000079, 0x00001434}, 146 {0x0000007a, 0xcc08ec08}, 147 {0x0000007b, 0x00040000}, 148 {0x0000007c, 0x000080c0}, 149 {0x0000007d, 0x09000000}, 150 {0x0000007e, 0x00210404}, 151 {0x00000081, 0x08a8e800}, 152 {0x00000082, 0x00030444}, 153 {0x00000083, 0x00000000}, 154 {0x00000085, 0x00000001}, 155 {0x00000086, 0x00000002}, 156 {0x00000087, 0x48490000}, 157 {0x00000088, 0x20244647}, 158 {0x00000089, 0x00000005}, 159 {0x0000008b, 0x66030000}, 160 {0x0000008c, 0x00006603}, 161 {0x0000008d, 0x00000100}, 162 {0x0000008f, 0x00001c0a}, 163 {0x00000090, 0xff000001}, 164 {0x00000094, 0x00101101}, 165 {0x00000095, 0x00000fff}, 166 {0x00000096, 0x00116fff}, 167 {0x00000097, 0x60010000}, 168 {0x00000098, 0x10010000}, 169 {0x00000099, 0x00006000}, 170 {0x0000009a, 0x00001000}, 171 {0x0000009f, 0x00916a00} 172 }; 173 174 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = { 175 {0x00000077, 0xff010100}, 176 {0x00000078, 0x00000000}, 177 {0x00000079, 0x00001434}, 178 {0x0000007a, 0xcc08ec08}, 179 {0x0000007b, 0x00040000}, 180 {0x0000007c, 0x000080c0}, 181 {0x0000007d, 0x09000000}, 182 {0x0000007e, 0x00210404}, 183 {0x00000081, 0x08a8e800}, 184 {0x00000082, 0x00030444}, 185 {0x00000083, 0x00000000}, 186 {0x00000085, 0x00000001}, 187 {0x00000086, 0x00000002}, 188 {0x00000087, 0x48490000}, 189 {0x00000088, 0x20244647}, 190 {0x00000089, 0x00000005}, 191 {0x0000008b, 0x66030000}, 192 {0x0000008c, 0x00006603}, 193 {0x0000008d, 0x00000100}, 194 {0x0000008f, 0x00001c0a}, 195 {0x00000090, 0xff000001}, 196 {0x00000094, 0x00101101}, 197 {0x00000095, 0x00000fff}, 198 {0x00000096, 0x00116fff}, 199 {0x00000097, 0x60010000}, 200 {0x00000098, 0x10010000}, 201 {0x00000099, 0x00006000}, 202 {0x0000009a, 0x00001000}, 203 {0x0000009f, 0x00976b00} 204 }; 205 206 int ni_mc_load_microcode(struct radeon_device *rdev) 207 { 208 const __be32 *fw_data; 209 u32 mem_type, running, blackout = 0; 210 u32 *io_mc_regs; 211 int i, ucode_size, regs_size; 212 213 if (!rdev->mc_fw) 214 return -EINVAL; 215 216 switch (rdev->family) { 217 case CHIP_BARTS: 218 io_mc_regs = (u32 *)&barts_io_mc_regs; 219 ucode_size = BTC_MC_UCODE_SIZE; 220 regs_size = BTC_IO_MC_REGS_SIZE; 221 break; 222 case CHIP_TURKS: 223 io_mc_regs = (u32 *)&turks_io_mc_regs; 224 ucode_size = BTC_MC_UCODE_SIZE; 225 regs_size = BTC_IO_MC_REGS_SIZE; 226 break; 227 case CHIP_CAICOS: 228 default: 229 io_mc_regs = (u32 *)&caicos_io_mc_regs; 230 ucode_size = BTC_MC_UCODE_SIZE; 231 regs_size = BTC_IO_MC_REGS_SIZE; 232 break; 233 case CHIP_CAYMAN: 234 io_mc_regs = (u32 *)&cayman_io_mc_regs; 235 ucode_size = CAYMAN_MC_UCODE_SIZE; 236 regs_size = BTC_IO_MC_REGS_SIZE; 237 break; 238 } 239 240 mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT; 241 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 242 243 if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) { 244 if (running) { 245 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 246 WREG32(MC_SHARED_BLACKOUT_CNTL, 1); 247 } 248 249 /* reset the engine and set to writable */ 250 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 251 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 252 253 /* load mc io regs */ 254 for (i = 0; i < regs_size; i++) { 255 WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]); 256 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 257 } 258 /* load the MC ucode */ 259 fw_data = (const __be32 *)rdev->mc_fw; 260 for (i = 0; i < ucode_size; i++) 261 WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++)); 262 263 /* put the engine back into the active state */ 264 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 265 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 266 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 267 268 /* wait for training to complete */ 269 for (i = 0; i < rdev->usec_timeout; i++) { 270 if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD) 271 break; 272 udelay(1); 273 } 274 275 if (running) 276 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout); 277 } 278 279 return 0; 280 } 281 282 int ni_init_microcode(struct radeon_device *rdev) 283 { 284 const char *chip_name; 285 const char *rlc_chip_name; 286 size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size; 287 char fw_name[30]; 288 int err; 289 290 DRM_DEBUG("\n"); 291 292 switch (rdev->family) { 293 case CHIP_BARTS: 294 chip_name = "barts"; 295 rlc_chip_name = "btc"; 296 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 297 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 298 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 299 mc_req_size = BTC_MC_UCODE_SIZE * 4; 300 break; 301 case CHIP_TURKS: 302 chip_name = "turks"; 303 rlc_chip_name = "btc"; 304 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 305 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 306 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 307 mc_req_size = BTC_MC_UCODE_SIZE * 4; 308 break; 309 case CHIP_CAICOS: 310 chip_name = "caicos"; 311 rlc_chip_name = "btc"; 312 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 313 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 314 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 315 mc_req_size = BTC_MC_UCODE_SIZE * 4; 316 break; 317 case CHIP_CAYMAN: 318 chip_name = "cayman"; 319 rlc_chip_name = "cayman"; 320 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 321 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 322 rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4; 323 mc_req_size = CAYMAN_MC_UCODE_SIZE * 4; 324 break; 325 case CHIP_ARUBA: 326 chip_name = "aruba"; 327 rlc_chip_name = "aruba"; 328 /* pfp/me same size as CAYMAN */ 329 pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4; 330 me_req_size = CAYMAN_PM4_UCODE_SIZE * 4; 331 rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4; 332 mc_req_size = 0; 333 break; 334 default: BUG(); 335 } 336 337 DRM_INFO("Loading %s Microcode\n", chip_name); 338 339 snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name); 340 err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size); 341 if (err) 342 goto out; 343 if (rdev->pfp_fw_size != pfp_req_size) { 344 DRM_ERROR( 345 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 346 rdev->pfp_fw_size, fw_name); 347 err = -EINVAL; 348 goto out; 349 } 350 351 snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name); 352 err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size); 353 if (err) 354 goto out; 355 if (rdev->me_fw_size != me_req_size) { 356 DRM_ERROR( 357 "ni_cp: Bogus length %zu in firmware \"%s\"\n", 358 rdev->me_fw_size, fw_name); 359 err = -EINVAL; 360 } 361 362 snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name); 363 err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size); 364 if (err) 365 goto out; 366 if (rdev->rlc_fw_size != rlc_req_size) { 367 DRM_ERROR( 368 "ni_rlc: Bogus length %zu in firmware \"%s\"\n", 369 rdev->rlc_fw_size, fw_name); 370 err = -EINVAL; 371 } 372 373 /* no MC ucode on TN */ 374 if (!(rdev->flags & RADEON_IS_IGP)) { 375 snprintf(fw_name, sizeof(fw_name), "radeon-%s_mc", chip_name); 376 err = loadfirmware(fw_name, &rdev->mc_fw, &rdev->mc_fw_size); 377 if (err) 378 goto out; 379 if (rdev->mc_fw_size != mc_req_size) { 380 DRM_ERROR( 381 "ni_mc: Bogus length %zu in firmware \"%s\"\n", 382 rdev->mc_fw_size, fw_name); 383 err = -EINVAL; 384 } 385 } 386 out: 387 if (err) { 388 if (err != -EINVAL) 389 DRM_ERROR( 390 "ni_cp: Failed to load firmware \"%s\"\n", 391 fw_name); 392 if (rdev->pfp_fw) { 393 free(rdev->pfp_fw, M_DEVBUF, 0); 394 rdev->pfp_fw = NULL; 395 } 396 if (rdev->me_fw) { 397 free(rdev->me_fw, M_DEVBUF, 0); 398 rdev->me_fw = NULL; 399 } 400 if (rdev->rlc_fw) { 401 free(rdev->rlc_fw, M_DEVBUF, 0); 402 rdev->rlc_fw = NULL; 403 } 404 if (rdev->mc_fw) { 405 free(rdev->mc_fw, M_DEVBUF, 0); 406 rdev->mc_fw = NULL; 407 } 408 } 409 return err; 410 } 411 412 /* 413 * Core functions 414 */ 415 static void cayman_gpu_init(struct radeon_device *rdev) 416 { 417 u32 gb_addr_config = 0; 418 u32 mc_shared_chmap, mc_arb_ramcfg; 419 u32 cgts_tcc_disable; 420 u32 sx_debug_1; 421 u32 smx_dc_ctl0; 422 u32 cgts_sm_ctrl_reg; 423 u32 hdp_host_path_cntl; 424 u32 tmp; 425 u32 disabled_rb_mask; 426 int i, j; 427 428 switch (rdev->family) { 429 case CHIP_CAYMAN: 430 rdev->config.cayman.max_shader_engines = 2; 431 rdev->config.cayman.max_pipes_per_simd = 4; 432 rdev->config.cayman.max_tile_pipes = 8; 433 rdev->config.cayman.max_simds_per_se = 12; 434 rdev->config.cayman.max_backends_per_se = 4; 435 rdev->config.cayman.max_texture_channel_caches = 8; 436 rdev->config.cayman.max_gprs = 256; 437 rdev->config.cayman.max_threads = 256; 438 rdev->config.cayman.max_gs_threads = 32; 439 rdev->config.cayman.max_stack_entries = 512; 440 rdev->config.cayman.sx_num_of_sets = 8; 441 rdev->config.cayman.sx_max_export_size = 256; 442 rdev->config.cayman.sx_max_export_pos_size = 64; 443 rdev->config.cayman.sx_max_export_smx_size = 192; 444 rdev->config.cayman.max_hw_contexts = 8; 445 rdev->config.cayman.sq_num_cf_insts = 2; 446 447 rdev->config.cayman.sc_prim_fifo_size = 0x100; 448 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 449 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 450 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; 451 break; 452 case CHIP_ARUBA: 453 default: 454 rdev->config.cayman.max_shader_engines = 1; 455 rdev->config.cayman.max_pipes_per_simd = 4; 456 rdev->config.cayman.max_tile_pipes = 2; 457 if ((rdev->pdev->device == 0x9900) || 458 (rdev->pdev->device == 0x9901) || 459 (rdev->pdev->device == 0x9905) || 460 (rdev->pdev->device == 0x9906) || 461 (rdev->pdev->device == 0x9907) || 462 (rdev->pdev->device == 0x9908) || 463 (rdev->pdev->device == 0x9909) || 464 (rdev->pdev->device == 0x990B) || 465 (rdev->pdev->device == 0x990C) || 466 (rdev->pdev->device == 0x990F) || 467 (rdev->pdev->device == 0x9910) || 468 (rdev->pdev->device == 0x9917) || 469 (rdev->pdev->device == 0x9999) || 470 (rdev->pdev->device == 0x999C)) { 471 rdev->config.cayman.max_simds_per_se = 6; 472 rdev->config.cayman.max_backends_per_se = 2; 473 rdev->config.cayman.max_hw_contexts = 8; 474 rdev->config.cayman.sx_max_export_size = 256; 475 rdev->config.cayman.sx_max_export_pos_size = 64; 476 rdev->config.cayman.sx_max_export_smx_size = 192; 477 } else if ((rdev->pdev->device == 0x9903) || 478 (rdev->pdev->device == 0x9904) || 479 (rdev->pdev->device == 0x990A) || 480 (rdev->pdev->device == 0x990D) || 481 (rdev->pdev->device == 0x990E) || 482 (rdev->pdev->device == 0x9913) || 483 (rdev->pdev->device == 0x9918) || 484 (rdev->pdev->device == 0x999D)) { 485 rdev->config.cayman.max_simds_per_se = 4; 486 rdev->config.cayman.max_backends_per_se = 2; 487 rdev->config.cayman.max_hw_contexts = 8; 488 rdev->config.cayman.sx_max_export_size = 256; 489 rdev->config.cayman.sx_max_export_pos_size = 64; 490 rdev->config.cayman.sx_max_export_smx_size = 192; 491 } else if ((rdev->pdev->device == 0x9919) || 492 (rdev->pdev->device == 0x9990) || 493 (rdev->pdev->device == 0x9991) || 494 (rdev->pdev->device == 0x9994) || 495 (rdev->pdev->device == 0x9995) || 496 (rdev->pdev->device == 0x9996) || 497 (rdev->pdev->device == 0x999A) || 498 (rdev->pdev->device == 0x99A0)) { 499 rdev->config.cayman.max_simds_per_se = 3; 500 rdev->config.cayman.max_backends_per_se = 1; 501 rdev->config.cayman.max_hw_contexts = 4; 502 rdev->config.cayman.sx_max_export_size = 128; 503 rdev->config.cayman.sx_max_export_pos_size = 32; 504 rdev->config.cayman.sx_max_export_smx_size = 96; 505 } else { 506 rdev->config.cayman.max_simds_per_se = 2; 507 rdev->config.cayman.max_backends_per_se = 1; 508 rdev->config.cayman.max_hw_contexts = 4; 509 rdev->config.cayman.sx_max_export_size = 128; 510 rdev->config.cayman.sx_max_export_pos_size = 32; 511 rdev->config.cayman.sx_max_export_smx_size = 96; 512 } 513 rdev->config.cayman.max_texture_channel_caches = 2; 514 rdev->config.cayman.max_gprs = 256; 515 rdev->config.cayman.max_threads = 256; 516 rdev->config.cayman.max_gs_threads = 32; 517 rdev->config.cayman.max_stack_entries = 512; 518 rdev->config.cayman.sx_num_of_sets = 8; 519 rdev->config.cayman.sq_num_cf_insts = 2; 520 521 rdev->config.cayman.sc_prim_fifo_size = 0x40; 522 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 523 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 524 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; 525 break; 526 } 527 528 /* Initialize HDP */ 529 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 530 WREG32((0x2c14 + j), 0x00000000); 531 WREG32((0x2c18 + j), 0x00000000); 532 WREG32((0x2c1c + j), 0x00000000); 533 WREG32((0x2c20 + j), 0x00000000); 534 WREG32((0x2c24 + j), 0x00000000); 535 } 536 537 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 538 539 evergreen_fix_pci_max_read_req_size(rdev); 540 541 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 542 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 543 544 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 545 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 546 if (rdev->config.cayman.mem_row_size_in_kb > 4) 547 rdev->config.cayman.mem_row_size_in_kb = 4; 548 /* XXX use MC settings? */ 549 rdev->config.cayman.shader_engine_tile_size = 32; 550 rdev->config.cayman.num_gpus = 1; 551 rdev->config.cayman.multi_gpu_tile_size = 64; 552 553 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 554 rdev->config.cayman.num_tile_pipes = (1 << tmp); 555 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 556 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; 557 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT; 558 rdev->config.cayman.num_shader_engines = tmp + 1; 559 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT; 560 rdev->config.cayman.num_gpus = tmp + 1; 561 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT; 562 rdev->config.cayman.multi_gpu_tile_size = 1 << tmp; 563 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 564 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 565 566 567 /* setup tiling info dword. gb_addr_config is not adequate since it does 568 * not have bank info, so create a custom tiling dword. 569 * bits 3:0 num_pipes 570 * bits 7:4 num_banks 571 * bits 11:8 group_size 572 * bits 15:12 row_size 573 */ 574 rdev->config.cayman.tile_config = 0; 575 switch (rdev->config.cayman.num_tile_pipes) { 576 case 1: 577 default: 578 rdev->config.cayman.tile_config |= (0 << 0); 579 break; 580 case 2: 581 rdev->config.cayman.tile_config |= (1 << 0); 582 break; 583 case 4: 584 rdev->config.cayman.tile_config |= (2 << 0); 585 break; 586 case 8: 587 rdev->config.cayman.tile_config |= (3 << 0); 588 break; 589 } 590 591 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 592 if (rdev->flags & RADEON_IS_IGP) 593 rdev->config.cayman.tile_config |= 1 << 4; 594 else { 595 switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { 596 case 0: /* four banks */ 597 rdev->config.cayman.tile_config |= 0 << 4; 598 break; 599 case 1: /* eight banks */ 600 rdev->config.cayman.tile_config |= 1 << 4; 601 break; 602 case 2: /* sixteen banks */ 603 default: 604 rdev->config.cayman.tile_config |= 2 << 4; 605 break; 606 } 607 } 608 rdev->config.cayman.tile_config |= 609 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 610 rdev->config.cayman.tile_config |= 611 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 612 613 tmp = 0; 614 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { 615 u32 rb_disable_bitmap; 616 617 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 618 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); 619 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; 620 tmp <<= 4; 621 tmp |= rb_disable_bitmap; 622 } 623 /* enabled rb are just the one not disabled :) */ 624 disabled_rb_mask = tmp; 625 626 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 627 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); 628 629 WREG32(GB_ADDR_CONFIG, gb_addr_config); 630 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 631 if (ASIC_IS_DCE6(rdev)) 632 WREG32(DMIF_ADDR_CALC, gb_addr_config); 633 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 634 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 635 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 636 637 if ((rdev->config.cayman.max_backends_per_se == 1) && 638 (rdev->flags & RADEON_IS_IGP)) { 639 if ((disabled_rb_mask & 3) == 1) { 640 /* RB0 disabled, RB1 enabled */ 641 tmp = 0x11111111; 642 } else { 643 /* RB1 disabled, RB0 enabled */ 644 tmp = 0x00000000; 645 } 646 } else { 647 tmp = gb_addr_config & NUM_PIPES_MASK; 648 tmp = r6xx_remap_render_backend(rdev, tmp, 649 rdev->config.cayman.max_backends_per_se * 650 rdev->config.cayman.max_shader_engines, 651 CAYMAN_MAX_BACKENDS, disabled_rb_mask); 652 } 653 WREG32(GB_BACKEND_MAP, tmp); 654 655 cgts_tcc_disable = 0xffff0000; 656 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) 657 cgts_tcc_disable &= ~(1 << (16 + i)); 658 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 659 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 660 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 661 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 662 663 /* reprogram the shader complex */ 664 cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG); 665 for (i = 0; i < 16; i++) 666 WREG32(CGTS_SM_CTRL_REG, OVERRIDE); 667 WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg); 668 669 /* set HW defaults for 3D engine */ 670 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 671 672 sx_debug_1 = RREG32(SX_DEBUG_1); 673 sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; 674 WREG32(SX_DEBUG_1, sx_debug_1); 675 676 smx_dc_ctl0 = RREG32(SMX_DC_CTL0); 677 smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); 678 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); 679 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 680 681 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); 682 683 /* need to be explicitly zero-ed */ 684 WREG32(VGT_OFFCHIP_LDS_BASE, 0); 685 WREG32(SQ_LSTMP_RING_BASE, 0); 686 WREG32(SQ_HSTMP_RING_BASE, 0); 687 WREG32(SQ_ESTMP_RING_BASE, 0); 688 WREG32(SQ_GSTMP_RING_BASE, 0); 689 WREG32(SQ_VSTMP_RING_BASE, 0); 690 WREG32(SQ_PSTMP_RING_BASE, 0); 691 692 WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); 693 694 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | 695 POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | 696 SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); 697 698 WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | 699 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | 700 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); 701 702 703 WREG32(VGT_NUM_INSTANCES, 1); 704 705 WREG32(CP_PERFMON_CNTL, 0); 706 707 WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | 708 FETCH_FIFO_HIWATER(0x4) | 709 DONE_FIFO_HIWATER(0xe0) | 710 ALU_UPDATE_FIFO_HIWATER(0x8))); 711 712 WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4)); 713 WREG32(SQ_CONFIG, (VC_ENABLE | 714 EXPORT_SRC_C | 715 GFX_PRIO(0) | 716 CS1_PRIO(0) | 717 CS2_PRIO(1))); 718 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE); 719 720 WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | 721 FORCE_EOV_MAX_REZ_CNT(255))); 722 723 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) | 724 AUTO_INVLD_EN(ES_AND_GS_AUTO)); 725 726 WREG32(VGT_GS_VERTEX_REUSE, 16); 727 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 728 729 WREG32(CB_PERF_CTR0_SEL_0, 0); 730 WREG32(CB_PERF_CTR0_SEL_1, 0); 731 WREG32(CB_PERF_CTR1_SEL_0, 0); 732 WREG32(CB_PERF_CTR1_SEL_1, 0); 733 WREG32(CB_PERF_CTR2_SEL_0, 0); 734 WREG32(CB_PERF_CTR2_SEL_1, 0); 735 WREG32(CB_PERF_CTR3_SEL_0, 0); 736 WREG32(CB_PERF_CTR3_SEL_1, 0); 737 738 tmp = RREG32(HDP_MISC_CNTL); 739 tmp |= HDP_FLUSH_INVALIDATE_CACHE; 740 WREG32(HDP_MISC_CNTL, tmp); 741 742 hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL); 743 WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl); 744 745 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 746 747 udelay(50); 748 } 749 750 /* 751 * GART 752 */ 753 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) 754 { 755 /* flush hdp cache */ 756 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 757 758 /* bits 0-7 are the VM contexts0-7 */ 759 WREG32(VM_INVALIDATE_REQUEST, 1); 760 } 761 762 static int cayman_pcie_gart_enable(struct radeon_device *rdev) 763 { 764 int i, r; 765 766 if (rdev->gart.robj == NULL) { 767 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 768 return -EINVAL; 769 } 770 r = radeon_gart_table_vram_pin(rdev); 771 if (r) 772 return r; 773 radeon_gart_restore(rdev); 774 /* Setup TLB control */ 775 WREG32(MC_VM_MX_L1_TLB_CNTL, 776 (0xA << 7) | 777 ENABLE_L1_TLB | 778 ENABLE_L1_FRAGMENT_PROCESSING | 779 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 780 ENABLE_ADVANCED_DRIVER_MODEL | 781 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 782 /* Setup L2 cache */ 783 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 784 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 785 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 786 EFFECTIVE_L2_QUEUE_SIZE(7) | 787 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 788 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 789 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 790 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 791 /* setup context0 */ 792 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 793 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 794 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 795 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 796 (u32)(rdev->dummy_page.addr >> 12)); 797 WREG32(VM_CONTEXT0_CNTL2, 0); 798 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 799 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 800 801 WREG32(0x15D4, 0); 802 WREG32(0x15D8, 0); 803 WREG32(0x15DC, 0); 804 805 /* empty context1-7 */ 806 /* Assign the pt base to something valid for now; the pts used for 807 * the VMs are determined by the application and setup and assigned 808 * on the fly in the vm part of radeon_gart.c 809 */ 810 for (i = 1; i < 8; i++) { 811 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); 812 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); 813 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), 814 rdev->gart.table_addr >> 12); 815 } 816 817 /* enable context1-7 */ 818 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 819 (u32)(rdev->dummy_page.addr >> 12)); 820 WREG32(VM_CONTEXT1_CNTL2, 4); 821 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 822 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 823 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 824 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 825 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 826 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 827 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 828 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 829 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 830 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 831 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 832 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 833 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 834 835 cayman_pcie_gart_tlb_flush(rdev); 836 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 837 (unsigned)(rdev->mc.gtt_size >> 20), 838 (unsigned long long)rdev->gart.table_addr); 839 rdev->gart.ready = true; 840 return 0; 841 } 842 843 static void cayman_pcie_gart_disable(struct radeon_device *rdev) 844 { 845 /* Disable all tables */ 846 WREG32(VM_CONTEXT0_CNTL, 0); 847 WREG32(VM_CONTEXT1_CNTL, 0); 848 /* Setup TLB control */ 849 WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING | 850 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 851 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 852 /* Setup L2 cache */ 853 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 854 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 855 EFFECTIVE_L2_QUEUE_SIZE(7) | 856 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 857 WREG32(VM_L2_CNTL2, 0); 858 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 859 L2_CACHE_BIGK_FRAGMENT_SIZE(6)); 860 radeon_gart_table_vram_unpin(rdev); 861 } 862 863 static void cayman_pcie_gart_fini(struct radeon_device *rdev) 864 { 865 cayman_pcie_gart_disable(rdev); 866 radeon_gart_table_vram_free(rdev); 867 radeon_gart_fini(rdev); 868 } 869 870 void cayman_cp_int_cntl_setup(struct radeon_device *rdev, 871 int ring, u32 cp_int_cntl) 872 { 873 u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; 874 875 WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); 876 WREG32(CP_INT_CNTL, cp_int_cntl); 877 } 878 879 /* 880 * CP. 881 */ 882 void cayman_fence_ring_emit(struct radeon_device *rdev, 883 struct radeon_fence *fence) 884 { 885 struct radeon_ring *ring = &rdev->ring[fence->ring]; 886 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 887 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | 888 PACKET3_SH_ACTION_ENA; 889 890 /* flush read cache over gart for this vmid */ 891 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 892 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); 893 radeon_ring_write(ring, 0xFFFFFFFF); 894 radeon_ring_write(ring, 0); 895 radeon_ring_write(ring, 10); /* poll interval */ 896 /* EVENT_WRITE_EOP - flush caches, send int */ 897 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 898 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 899 radeon_ring_write(ring, addr & 0xffffffff); 900 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 901 radeon_ring_write(ring, fence->seq); 902 radeon_ring_write(ring, 0); 903 } 904 905 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 906 { 907 struct radeon_ring *ring = &rdev->ring[ib->ring]; 908 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | 909 PACKET3_SH_ACTION_ENA; 910 911 /* set to DX10/11 mode */ 912 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 913 radeon_ring_write(ring, 1); 914 915 if (ring->rptr_save_reg) { 916 uint32_t next_rptr = ring->wptr + 3 + 4 + 8; 917 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 918 radeon_ring_write(ring, ((ring->rptr_save_reg - 919 PACKET3_SET_CONFIG_REG_START) >> 2)); 920 radeon_ring_write(ring, next_rptr); 921 } 922 923 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 924 radeon_ring_write(ring, 925 #ifdef __BIG_ENDIAN 926 (2 << 0) | 927 #endif 928 (ib->gpu_addr & 0xFFFFFFFC)); 929 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 930 radeon_ring_write(ring, ib->length_dw | 931 (ib->vm ? (ib->vm->id << 24) : 0)); 932 933 /* flush read cache over gart for this vmid */ 934 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 935 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); 936 radeon_ring_write(ring, 0xFFFFFFFF); 937 radeon_ring_write(ring, 0); 938 radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ 939 } 940 941 static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 942 { 943 if (enable) 944 WREG32(CP_ME_CNTL, 0); 945 else { 946 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 947 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 948 WREG32(SCRATCH_UMSK, 0); 949 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 950 } 951 } 952 953 static int cayman_cp_load_microcode(struct radeon_device *rdev) 954 { 955 const __be32 *fw_data; 956 int i; 957 958 if (!rdev->me_fw || !rdev->pfp_fw) 959 return -EINVAL; 960 961 cayman_cp_enable(rdev, false); 962 963 fw_data = (const __be32 *)rdev->pfp_fw; 964 WREG32(CP_PFP_UCODE_ADDR, 0); 965 for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++) 966 WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++)); 967 WREG32(CP_PFP_UCODE_ADDR, 0); 968 969 fw_data = (const __be32 *)rdev->me_fw; 970 WREG32(CP_ME_RAM_WADDR, 0); 971 for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++) 972 WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++)); 973 974 WREG32(CP_PFP_UCODE_ADDR, 0); 975 WREG32(CP_ME_RAM_WADDR, 0); 976 WREG32(CP_ME_RAM_RADDR, 0); 977 return 0; 978 } 979 980 static int cayman_cp_start(struct radeon_device *rdev) 981 { 982 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 983 int r, i; 984 985 r = radeon_ring_lock(rdev, ring, 7); 986 if (r) { 987 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 988 return r; 989 } 990 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 991 radeon_ring_write(ring, 0x1); 992 radeon_ring_write(ring, 0x0); 993 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); 994 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 995 radeon_ring_write(ring, 0); 996 radeon_ring_write(ring, 0); 997 radeon_ring_unlock_commit(rdev, ring); 998 999 cayman_cp_enable(rdev, true); 1000 1001 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); 1002 if (r) { 1003 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 1004 return r; 1005 } 1006 1007 /* setup clear context state */ 1008 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1009 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); 1010 1011 for (i = 0; i < cayman_default_size; i++) 1012 radeon_ring_write(ring, cayman_default_state[i]); 1013 1014 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 1015 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); 1016 1017 /* set clear context state */ 1018 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); 1019 radeon_ring_write(ring, 0); 1020 1021 /* SQ_VTX_BASE_VTX_LOC */ 1022 radeon_ring_write(ring, 0xc0026f00); 1023 radeon_ring_write(ring, 0x00000000); 1024 radeon_ring_write(ring, 0x00000000); 1025 radeon_ring_write(ring, 0x00000000); 1026 1027 /* Clear consts */ 1028 radeon_ring_write(ring, 0xc0036f00); 1029 radeon_ring_write(ring, 0x00000bc4); 1030 radeon_ring_write(ring, 0xffffffff); 1031 radeon_ring_write(ring, 0xffffffff); 1032 radeon_ring_write(ring, 0xffffffff); 1033 1034 radeon_ring_write(ring, 0xc0026900); 1035 radeon_ring_write(ring, 0x00000316); 1036 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1037 radeon_ring_write(ring, 0x00000010); /* */ 1038 1039 radeon_ring_unlock_commit(rdev, ring); 1040 1041 /* XXX init other rings */ 1042 1043 return 0; 1044 } 1045 1046 static void cayman_cp_fini(struct radeon_device *rdev) 1047 { 1048 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1049 cayman_cp_enable(rdev, false); 1050 radeon_ring_fini(rdev, ring); 1051 radeon_scratch_free(rdev, ring->rptr_save_reg); 1052 } 1053 1054 static int cayman_cp_resume(struct radeon_device *rdev) 1055 { 1056 static const int ridx[] = { 1057 RADEON_RING_TYPE_GFX_INDEX, 1058 CAYMAN_RING_TYPE_CP1_INDEX, 1059 CAYMAN_RING_TYPE_CP2_INDEX 1060 }; 1061 static const unsigned cp_rb_cntl[] = { 1062 CP_RB0_CNTL, 1063 CP_RB1_CNTL, 1064 CP_RB2_CNTL, 1065 }; 1066 static const unsigned cp_rb_rptr_addr[] = { 1067 CP_RB0_RPTR_ADDR, 1068 CP_RB1_RPTR_ADDR, 1069 CP_RB2_RPTR_ADDR 1070 }; 1071 static const unsigned cp_rb_rptr_addr_hi[] = { 1072 CP_RB0_RPTR_ADDR_HI, 1073 CP_RB1_RPTR_ADDR_HI, 1074 CP_RB2_RPTR_ADDR_HI 1075 }; 1076 static const unsigned cp_rb_base[] = { 1077 CP_RB0_BASE, 1078 CP_RB1_BASE, 1079 CP_RB2_BASE 1080 }; 1081 struct radeon_ring *ring; 1082 int i, r; 1083 1084 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */ 1085 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP | 1086 SOFT_RESET_PA | 1087 SOFT_RESET_SH | 1088 SOFT_RESET_VGT | 1089 SOFT_RESET_SPI | 1090 SOFT_RESET_SX)); 1091 RREG32(GRBM_SOFT_RESET); 1092 mdelay(15); 1093 WREG32(GRBM_SOFT_RESET, 0); 1094 RREG32(GRBM_SOFT_RESET); 1095 1096 WREG32(CP_SEM_WAIT_TIMER, 0x0); 1097 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0); 1098 1099 /* Set the write pointer delay */ 1100 WREG32(CP_RB_WPTR_DELAY, 0); 1101 1102 WREG32(CP_DEBUG, (1 << 27)); 1103 1104 /* set the wb address whether it's enabled or not */ 1105 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1106 WREG32(SCRATCH_UMSK, 0xff); 1107 1108 for (i = 0; i < 3; ++i) { 1109 uint32_t rb_cntl; 1110 uint64_t addr; 1111 1112 /* Set ring buffer size */ 1113 ring = &rdev->ring[ridx[i]]; 1114 rb_cntl = drm_order(ring->ring_size / 8); 1115 rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8; 1116 #ifdef __BIG_ENDIAN 1117 rb_cntl |= BUF_SWAP_32BIT; 1118 #endif 1119 WREG32(cp_rb_cntl[i], rb_cntl); 1120 1121 /* set the wb address whether it's enabled or not */ 1122 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; 1123 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); 1124 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); 1125 } 1126 1127 /* set the rb base addr, this causes an internal reset of ALL rings */ 1128 for (i = 0; i < 3; ++i) { 1129 ring = &rdev->ring[ridx[i]]; 1130 WREG32(cp_rb_base[i], ring->gpu_addr >> 8); 1131 } 1132 1133 for (i = 0; i < 3; ++i) { 1134 /* Initialize the ring buffer's read and write pointers */ 1135 ring = &rdev->ring[ridx[i]]; 1136 WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA); 1137 1138 ring->rptr = ring->wptr = 0; 1139 WREG32(ring->rptr_reg, ring->rptr); 1140 WREG32(ring->wptr_reg, ring->wptr); 1141 1142 mdelay(1); 1143 WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA); 1144 } 1145 1146 /* start the rings */ 1147 cayman_cp_start(rdev); 1148 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; 1149 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1150 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1151 /* this only test cp0 */ 1152 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); 1153 if (r) { 1154 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1155 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 1156 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 1157 return r; 1158 } 1159 1160 return 0; 1161 } 1162 1163 /* 1164 * DMA 1165 * Starting with R600, the GPU has an asynchronous 1166 * DMA engine. The programming model is very similar 1167 * to the 3D engine (ring buffer, IBs, etc.), but the 1168 * DMA controller has it's own packet format that is 1169 * different form the PM4 format used by the 3D engine. 1170 * It supports copying data, writing embedded data, 1171 * solid fills, and a number of other things. It also 1172 * has support for tiling/detiling of buffers. 1173 * Cayman and newer support two asynchronous DMA engines. 1174 */ 1175 /** 1176 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine 1177 * 1178 * @rdev: radeon_device pointer 1179 * @ib: IB object to schedule 1180 * 1181 * Schedule an IB in the DMA ring (cayman-SI). 1182 */ 1183 void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 1184 struct radeon_ib *ib) 1185 { 1186 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1187 1188 if (rdev->wb.enabled) { 1189 u32 next_rptr = ring->wptr + 4; 1190 while ((next_rptr & 7) != 5) 1191 next_rptr++; 1192 next_rptr += 3; 1193 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 1194 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 1195 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 1196 radeon_ring_write(ring, next_rptr); 1197 } 1198 1199 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 1200 * Pad as necessary with NOPs. 1201 */ 1202 while ((ring->wptr & 7) != 5) 1203 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1204 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); 1205 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 1206 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 1207 1208 } 1209 1210 /** 1211 * cayman_dma_stop - stop the async dma engines 1212 * 1213 * @rdev: radeon_device pointer 1214 * 1215 * Stop the async dma engines (cayman-SI). 1216 */ 1217 void cayman_dma_stop(struct radeon_device *rdev) 1218 { 1219 u32 rb_cntl; 1220 1221 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1222 1223 /* dma0 */ 1224 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1225 rb_cntl &= ~DMA_RB_ENABLE; 1226 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl); 1227 1228 /* dma1 */ 1229 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1230 rb_cntl &= ~DMA_RB_ENABLE; 1231 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl); 1232 1233 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 1234 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; 1235 } 1236 1237 /** 1238 * cayman_dma_resume - setup and start the async dma engines 1239 * 1240 * @rdev: radeon_device pointer 1241 * 1242 * Set up the DMA ring buffers and enable them. (cayman-SI). 1243 * Returns 0 for success, error for failure. 1244 */ 1245 int cayman_dma_resume(struct radeon_device *rdev) 1246 { 1247 struct radeon_ring *ring; 1248 u32 rb_cntl, dma_cntl, ib_cntl; 1249 u32 rb_bufsz; 1250 u32 reg_offset, wb_offset; 1251 int i, r; 1252 1253 /* Reset dma */ 1254 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1255 RREG32(SRBM_SOFT_RESET); 1256 udelay(50); 1257 WREG32(SRBM_SOFT_RESET, 0); 1258 1259 for (i = 0; i < 2; i++) { 1260 if (i == 0) { 1261 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1262 reg_offset = DMA0_REGISTER_OFFSET; 1263 wb_offset = R600_WB_DMA_RPTR_OFFSET; 1264 } else { 1265 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1266 reg_offset = DMA1_REGISTER_OFFSET; 1267 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET; 1268 } 1269 1270 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0); 1271 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0); 1272 1273 /* Set ring buffer size in dwords */ 1274 rb_bufsz = drm_order(ring->ring_size / 4); 1275 rb_cntl = rb_bufsz << 1; 1276 #ifdef __BIG_ENDIAN 1277 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 1278 #endif 1279 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl); 1280 1281 /* Initialize the ring buffer's read and write pointers */ 1282 WREG32(DMA_RB_RPTR + reg_offset, 0); 1283 WREG32(DMA_RB_WPTR + reg_offset, 0); 1284 1285 /* set the wb address whether it's enabled or not */ 1286 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset, 1287 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF); 1288 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset, 1289 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC)); 1290 1291 if (rdev->wb.enabled) 1292 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 1293 1294 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8); 1295 1296 /* enable DMA IBs */ 1297 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; 1298 #ifdef __BIG_ENDIAN 1299 ib_cntl |= DMA_IB_SWAP_ENABLE; 1300 #endif 1301 WREG32(DMA_IB_CNTL + reg_offset, ib_cntl); 1302 1303 dma_cntl = RREG32(DMA_CNTL + reg_offset); 1304 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 1305 WREG32(DMA_CNTL + reg_offset, dma_cntl); 1306 1307 ring->wptr = 0; 1308 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2); 1309 1310 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2; 1311 1312 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE); 1313 1314 ring->ready = true; 1315 1316 r = radeon_ring_test(rdev, ring->idx, ring); 1317 if (r) { 1318 ring->ready = false; 1319 return r; 1320 } 1321 } 1322 1323 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1324 1325 return 0; 1326 } 1327 1328 /** 1329 * cayman_dma_fini - tear down the async dma engines 1330 * 1331 * @rdev: radeon_device pointer 1332 * 1333 * Stop the async dma engines and free the rings (cayman-SI). 1334 */ 1335 void cayman_dma_fini(struct radeon_device *rdev) 1336 { 1337 cayman_dma_stop(rdev); 1338 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 1339 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 1340 } 1341 1342 static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev) 1343 { 1344 u32 grbm_reset = 0; 1345 1346 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1347 return; 1348 1349 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 1350 RREG32(GRBM_STATUS)); 1351 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 1352 RREG32(GRBM_STATUS_SE0)); 1353 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 1354 RREG32(GRBM_STATUS_SE1)); 1355 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 1356 RREG32(SRBM_STATUS)); 1357 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1358 RREG32(CP_STALLED_STAT1)); 1359 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1360 RREG32(CP_STALLED_STAT2)); 1361 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1362 RREG32(CP_BUSY_STAT)); 1363 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1364 RREG32(CP_STAT)); 1365 1366 /* Disable CP parsing/prefetching */ 1367 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1368 1369 /* reset all the gfx blocks */ 1370 grbm_reset = (SOFT_RESET_CP | 1371 SOFT_RESET_CB | 1372 SOFT_RESET_DB | 1373 SOFT_RESET_GDS | 1374 SOFT_RESET_PA | 1375 SOFT_RESET_SC | 1376 SOFT_RESET_SPI | 1377 SOFT_RESET_SH | 1378 SOFT_RESET_SX | 1379 SOFT_RESET_TC | 1380 SOFT_RESET_TA | 1381 SOFT_RESET_VGT | 1382 SOFT_RESET_IA); 1383 1384 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); 1385 WREG32(GRBM_SOFT_RESET, grbm_reset); 1386 (void)RREG32(GRBM_SOFT_RESET); 1387 udelay(50); 1388 WREG32(GRBM_SOFT_RESET, 0); 1389 (void)RREG32(GRBM_SOFT_RESET); 1390 1391 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 1392 RREG32(GRBM_STATUS)); 1393 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 1394 RREG32(GRBM_STATUS_SE0)); 1395 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 1396 RREG32(GRBM_STATUS_SE1)); 1397 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 1398 RREG32(SRBM_STATUS)); 1399 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1400 RREG32(CP_STALLED_STAT1)); 1401 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1402 RREG32(CP_STALLED_STAT2)); 1403 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1404 RREG32(CP_BUSY_STAT)); 1405 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1406 RREG32(CP_STAT)); 1407 1408 } 1409 1410 static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev) 1411 { 1412 u32 tmp; 1413 1414 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1415 return; 1416 1417 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1418 RREG32(DMA_STATUS_REG)); 1419 1420 /* dma0 */ 1421 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1422 tmp &= ~DMA_RB_ENABLE; 1423 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); 1424 1425 /* dma1 */ 1426 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1427 tmp &= ~DMA_RB_ENABLE; 1428 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); 1429 1430 /* Reset dma */ 1431 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1432 RREG32(SRBM_SOFT_RESET); 1433 udelay(50); 1434 WREG32(SRBM_SOFT_RESET, 0); 1435 1436 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1437 RREG32(DMA_STATUS_REG)); 1438 1439 } 1440 1441 static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1442 { 1443 struct evergreen_mc_save save; 1444 1445 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1446 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 1447 1448 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1449 reset_mask &= ~RADEON_RESET_DMA; 1450 1451 if (reset_mask == 0) 1452 return 0; 1453 1454 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1455 1456 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1457 RREG32(0x14F8)); 1458 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1459 RREG32(0x14D8)); 1460 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1461 RREG32(0x14FC)); 1462 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1463 RREG32(0x14DC)); 1464 1465 evergreen_mc_stop(rdev, &save); 1466 if (evergreen_mc_wait_for_idle(rdev)) { 1467 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1468 } 1469 1470 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 1471 cayman_gpu_soft_reset_gfx(rdev); 1472 1473 if (reset_mask & RADEON_RESET_DMA) 1474 cayman_gpu_soft_reset_dma(rdev); 1475 1476 /* Wait a little for things to settle down */ 1477 udelay(50); 1478 1479 evergreen_mc_resume(rdev, &save); 1480 return 0; 1481 } 1482 1483 int cayman_asic_reset(struct radeon_device *rdev) 1484 { 1485 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 1486 RADEON_RESET_COMPUTE | 1487 RADEON_RESET_DMA)); 1488 } 1489 1490 /** 1491 * cayman_dma_is_lockup - Check if the DMA engine is locked up 1492 * 1493 * @rdev: radeon_device pointer 1494 * @ring: radeon_ring structure holding ring information 1495 * 1496 * Check if the async DMA engine is locked up (cayman-SI). 1497 * Returns true if the engine appears to be locked up, false if not. 1498 */ 1499 bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1500 { 1501 u32 dma_status_reg; 1502 1503 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 1504 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 1505 else 1506 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 1507 if (dma_status_reg & DMA_IDLE) { 1508 radeon_ring_lockup_update(ring); 1509 return false; 1510 } 1511 /* force ring activities */ 1512 radeon_ring_force_activity(rdev, ring); 1513 return radeon_ring_test_lockup(rdev, ring); 1514 } 1515 1516 static int cayman_startup(struct radeon_device *rdev) 1517 { 1518 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1519 int r; 1520 1521 /* enable pcie gen2 link */ 1522 evergreen_pcie_gen2_enable(rdev); 1523 1524 evergreen_mc_program(rdev); 1525 1526 if (rdev->flags & RADEON_IS_IGP) { 1527 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 1528 r = ni_init_microcode(rdev); 1529 if (r) { 1530 DRM_ERROR("Failed to load firmware!\n"); 1531 return r; 1532 } 1533 } 1534 } else { 1535 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { 1536 r = ni_init_microcode(rdev); 1537 if (r) { 1538 DRM_ERROR("Failed to load firmware!\n"); 1539 return r; 1540 } 1541 } 1542 1543 r = ni_mc_load_microcode(rdev); 1544 if (r) { 1545 DRM_ERROR("Failed to load MC firmware!\n"); 1546 return r; 1547 } 1548 } 1549 1550 r = r600_vram_scratch_init(rdev); 1551 if (r) 1552 return r; 1553 1554 r = cayman_pcie_gart_enable(rdev); 1555 if (r) 1556 return r; 1557 cayman_gpu_init(rdev); 1558 1559 r = evergreen_blit_init(rdev); 1560 if (r) { 1561 r600_blit_fini(rdev); 1562 rdev->asic->copy.copy = NULL; 1563 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1564 } 1565 1566 /* allocate rlc buffers */ 1567 if (rdev->flags & RADEON_IS_IGP) { 1568 r = si_rlc_init(rdev); 1569 if (r) { 1570 DRM_ERROR("Failed to init rlc BOs!\n"); 1571 return r; 1572 } 1573 } 1574 1575 /* allocate wb buffer */ 1576 r = radeon_wb_init(rdev); 1577 if (r) 1578 return r; 1579 1580 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 1581 if (r) { 1582 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1583 return r; 1584 } 1585 1586 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); 1587 if (r) { 1588 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1589 return r; 1590 } 1591 1592 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); 1593 if (r) { 1594 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 1595 return r; 1596 } 1597 1598 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 1599 if (r) { 1600 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1601 return r; 1602 } 1603 1604 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); 1605 if (r) { 1606 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 1607 return r; 1608 } 1609 1610 /* Enable IRQ */ 1611 if (!rdev->irq.installed) { 1612 r = radeon_irq_kms_init(rdev); 1613 if (r) 1614 return r; 1615 } 1616 1617 r = r600_irq_init(rdev); 1618 if (r) { 1619 DRM_ERROR("radeon: IH init failed (%d).\n", r); 1620 radeon_irq_kms_fini(rdev); 1621 return r; 1622 } 1623 evergreen_irq_set(rdev); 1624 1625 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1626 CP_RB0_RPTR, CP_RB0_WPTR, 1627 0, 0xfffff, RADEON_CP_PACKET2); 1628 if (r) 1629 return r; 1630 1631 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1632 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 1633 DMA_RB_RPTR + DMA0_REGISTER_OFFSET, 1634 DMA_RB_WPTR + DMA0_REGISTER_OFFSET, 1635 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1636 if (r) 1637 return r; 1638 1639 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1640 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, 1641 DMA_RB_RPTR + DMA1_REGISTER_OFFSET, 1642 DMA_RB_WPTR + DMA1_REGISTER_OFFSET, 1643 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 1644 if (r) 1645 return r; 1646 1647 r = cayman_cp_load_microcode(rdev); 1648 if (r) 1649 return r; 1650 r = cayman_cp_resume(rdev); 1651 if (r) 1652 return r; 1653 1654 r = cayman_dma_resume(rdev); 1655 if (r) 1656 return r; 1657 1658 r = radeon_ib_pool_init(rdev); 1659 if (r) { 1660 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1661 return r; 1662 } 1663 1664 r = radeon_vm_manager_init(rdev); 1665 if (r) { 1666 dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); 1667 return r; 1668 } 1669 1670 r = r600_audio_init(rdev); 1671 if (r) 1672 return r; 1673 1674 return 0; 1675 } 1676 1677 int cayman_resume(struct radeon_device *rdev) 1678 { 1679 int r; 1680 1681 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, 1682 * posting will perform necessary task to bring back GPU into good 1683 * shape. 1684 */ 1685 /* post card */ 1686 atom_asic_init(rdev->mode_info.atom_context); 1687 1688 rdev->accel_working = true; 1689 r = cayman_startup(rdev); 1690 if (r) { 1691 DRM_ERROR("cayman startup failed on resume\n"); 1692 rdev->accel_working = false; 1693 return r; 1694 } 1695 return r; 1696 } 1697 1698 int cayman_suspend(struct radeon_device *rdev) 1699 { 1700 r600_audio_fini(rdev); 1701 radeon_vm_manager_fini(rdev); 1702 cayman_cp_enable(rdev, false); 1703 cayman_dma_stop(rdev); 1704 evergreen_irq_suspend(rdev); 1705 radeon_wb_disable(rdev); 1706 cayman_pcie_gart_disable(rdev); 1707 return 0; 1708 } 1709 1710 /* Plan is to move initialization in that function and use 1711 * helper function so that radeon_device_init pretty much 1712 * do nothing more than calling asic specific function. This 1713 * should also allow to remove a bunch of callback function 1714 * like vram_info. 1715 */ 1716 int cayman_init(struct radeon_device *rdev) 1717 { 1718 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1719 int r; 1720 1721 /* Read BIOS */ 1722 if (!radeon_get_bios(rdev)) { 1723 if (ASIC_IS_AVIVO(rdev)) 1724 return -EINVAL; 1725 } 1726 /* Must be an ATOMBIOS */ 1727 if (!rdev->is_atom_bios) { 1728 dev_err(rdev->dev, "Expecting atombios for cayman GPU\n"); 1729 return -EINVAL; 1730 } 1731 r = radeon_atombios_init(rdev); 1732 if (r) 1733 return r; 1734 1735 /* Post card if necessary */ 1736 if (!radeon_card_posted(rdev)) { 1737 if (!rdev->bios) { 1738 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 1739 return -EINVAL; 1740 } 1741 DRM_INFO("GPU not posted. posting now...\n"); 1742 atom_asic_init(rdev->mode_info.atom_context); 1743 } 1744 /* Initialize scratch registers */ 1745 r600_scratch_init(rdev); 1746 /* Initialize surface registers */ 1747 radeon_surface_init(rdev); 1748 /* Initialize clocks */ 1749 radeon_get_clock_info(rdev->ddev); 1750 /* Fence driver */ 1751 r = radeon_fence_driver_init(rdev); 1752 if (r) 1753 return r; 1754 /* initialize memory controller */ 1755 r = evergreen_mc_init(rdev); 1756 if (r) 1757 return r; 1758 /* Memory manager */ 1759 r = radeon_bo_init(rdev); 1760 if (r) 1761 return r; 1762 1763 ring->ring_obj = NULL; 1764 r600_ring_init(rdev, ring, 1024 * 1024); 1765 1766 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 1767 ring->ring_obj = NULL; 1768 r600_ring_init(rdev, ring, 64 * 1024); 1769 1770 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; 1771 ring->ring_obj = NULL; 1772 r600_ring_init(rdev, ring, 64 * 1024); 1773 1774 rdev->ih.ring_obj = NULL; 1775 r600_ih_ring_init(rdev, 64 * 1024); 1776 1777 r = r600_pcie_gart_init(rdev); 1778 if (r) 1779 return r; 1780 1781 rdev->accel_working = true; 1782 r = cayman_startup(rdev); 1783 if (r) { 1784 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1785 cayman_cp_fini(rdev); 1786 cayman_dma_fini(rdev); 1787 r600_irq_fini(rdev); 1788 if (rdev->flags & RADEON_IS_IGP) 1789 si_rlc_fini(rdev); 1790 radeon_wb_fini(rdev); 1791 radeon_ib_pool_fini(rdev); 1792 radeon_vm_manager_fini(rdev); 1793 radeon_irq_kms_fini(rdev); 1794 cayman_pcie_gart_fini(rdev); 1795 rdev->accel_working = false; 1796 } 1797 1798 /* Don't start up if the MC ucode is missing. 1799 * The default clocks and voltages before the MC ucode 1800 * is loaded are not suffient for advanced operations. 1801 * 1802 * We can skip this check for TN, because there is no MC 1803 * ucode. 1804 */ 1805 if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { 1806 DRM_ERROR("radeon: MC ucode required for NI+.\n"); 1807 return -EINVAL; 1808 } 1809 1810 return 0; 1811 } 1812 1813 void cayman_fini(struct radeon_device *rdev) 1814 { 1815 r600_blit_fini(rdev); 1816 cayman_cp_fini(rdev); 1817 cayman_dma_fini(rdev); 1818 r600_irq_fini(rdev); 1819 if (rdev->flags & RADEON_IS_IGP) 1820 si_rlc_fini(rdev); 1821 radeon_wb_fini(rdev); 1822 radeon_vm_manager_fini(rdev); 1823 radeon_ib_pool_fini(rdev); 1824 radeon_irq_kms_fini(rdev); 1825 cayman_pcie_gart_fini(rdev); 1826 r600_vram_scratch_fini(rdev); 1827 radeon_gem_fini(rdev); 1828 radeon_fence_driver_fini(rdev); 1829 radeon_bo_fini(rdev); 1830 radeon_atombios_fini(rdev); 1831 kfree(rdev->bios); 1832 rdev->bios = NULL; 1833 } 1834 1835 /* 1836 * vm 1837 */ 1838 int cayman_vm_init(struct radeon_device *rdev) 1839 { 1840 /* number of VMs */ 1841 rdev->vm_manager.nvm = 8; 1842 /* base offset of vram pages */ 1843 if (rdev->flags & RADEON_IS_IGP) { 1844 u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET); 1845 tmp <<= 22; 1846 rdev->vm_manager.vram_base_offset = tmp; 1847 } else 1848 rdev->vm_manager.vram_base_offset = 0; 1849 return 0; 1850 } 1851 1852 void cayman_vm_fini(struct radeon_device *rdev) 1853 { 1854 } 1855 1856 #define R600_ENTRY_VALID (1 << 0) 1857 #define R600_PTE_SYSTEM (1 << 1) 1858 #define R600_PTE_SNOOPED (1 << 2) 1859 #define R600_PTE_READABLE (1 << 5) 1860 #define R600_PTE_WRITEABLE (1 << 6) 1861 1862 uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags) 1863 { 1864 uint32_t r600_flags = 0; 1865 r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0; 1866 r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 1867 r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 1868 if (flags & RADEON_VM_PAGE_SYSTEM) { 1869 r600_flags |= R600_PTE_SYSTEM; 1870 r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 1871 } 1872 return r600_flags; 1873 } 1874 1875 /** 1876 * cayman_vm_set_page - update the page tables using the CP 1877 * 1878 * @rdev: radeon_device pointer 1879 * @pe: addr of the page entry 1880 * @addr: dst addr to write into pe 1881 * @count: number of page entries to update 1882 * @incr: increase next addr by incr bytes 1883 * @flags: access flags 1884 * 1885 * Update the page tables using the CP (cayman-si). 1886 */ 1887 void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, 1888 uint64_t addr, unsigned count, 1889 uint32_t incr, uint32_t flags) 1890 { 1891 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1892 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1893 uint64_t value; 1894 unsigned ndw; 1895 1896 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { 1897 while (count) { 1898 ndw = 1 + count * 2; 1899 if (ndw > 0x3FFF) 1900 ndw = 0x3FFF; 1901 1902 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); 1903 radeon_ring_write(ring, pe); 1904 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1905 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 1906 if (flags & RADEON_VM_PAGE_SYSTEM) { 1907 value = radeon_vm_map_gart(rdev, addr); 1908 value &= 0xFFFFFFFFFFFFF000ULL; 1909 } else if (flags & RADEON_VM_PAGE_VALID) { 1910 value = addr; 1911 } else { 1912 value = 0; 1913 } 1914 addr += incr; 1915 value |= r600_flags; 1916 radeon_ring_write(ring, value); 1917 radeon_ring_write(ring, upper_32_bits(value)); 1918 } 1919 } 1920 } else { 1921 while (count) { 1922 ndw = count * 2; 1923 if (ndw > 0xFFFFE) 1924 ndw = 0xFFFFE; 1925 1926 /* for non-physically contiguous pages (system) */ 1927 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw)); 1928 radeon_ring_write(ring, pe); 1929 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1930 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 1931 if (flags & RADEON_VM_PAGE_SYSTEM) { 1932 value = radeon_vm_map_gart(rdev, addr); 1933 value &= 0xFFFFFFFFFFFFF000ULL; 1934 } else if (flags & RADEON_VM_PAGE_VALID) { 1935 value = addr; 1936 } else { 1937 value = 0; 1938 } 1939 addr += incr; 1940 value |= r600_flags; 1941 radeon_ring_write(ring, value); 1942 radeon_ring_write(ring, upper_32_bits(value)); 1943 } 1944 } 1945 } 1946 } 1947 1948 /** 1949 * cayman_vm_flush - vm flush using the CP 1950 * 1951 * @rdev: radeon_device pointer 1952 * 1953 * Update the page table base and flush the VM TLB 1954 * using the CP (cayman-si). 1955 */ 1956 void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 1957 { 1958 struct radeon_ring *ring = &rdev->ring[ridx]; 1959 1960 if (vm == NULL) 1961 return; 1962 1963 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); 1964 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1965 1966 /* flush hdp cache */ 1967 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 1968 radeon_ring_write(ring, 0x1); 1969 1970 /* bits 0-7 are the VM contexts0-7 */ 1971 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 1972 radeon_ring_write(ring, 1 << vm->id); 1973 1974 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 1975 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 1976 radeon_ring_write(ring, 0x0); 1977 } 1978 1979 void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 1980 { 1981 struct radeon_ring *ring = &rdev->ring[ridx]; 1982 1983 if (vm == NULL) 1984 return; 1985 1986 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1987 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 1988 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 1989 1990 /* flush hdp cache */ 1991 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1992 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 1993 radeon_ring_write(ring, 1); 1994 1995 /* bits 0-7 are the VM contexts0-7 */ 1996 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 1997 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 1998 radeon_ring_write(ring, 1 << vm->id); 1999 } 2000 2001