1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <linux/seq_file.h> 29 #include <linux/firmware.h> 30 #include <linux/module.h> 31 #include <drm/drmP.h> 32 #include <drm/radeon_drm.h> 33 #include "radeon.h" 34 #include "radeon_asic.h" 35 #include "radeon_audio.h" 36 #include "radeon_mode.h" 37 #include "r600d.h" 38 #include "atom.h" 39 #include "avivod.h" 40 #include "radeon_ucode.h" 41 42 /* Firmware Names */ 43 MODULE_FIRMWARE("radeon/R600_pfp.bin"); 44 MODULE_FIRMWARE("radeon/R600_me.bin"); 45 MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 46 MODULE_FIRMWARE("radeon/RV610_me.bin"); 47 MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 48 MODULE_FIRMWARE("radeon/RV630_me.bin"); 49 MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 50 MODULE_FIRMWARE("radeon/RV620_me.bin"); 51 MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 52 MODULE_FIRMWARE("radeon/RV635_me.bin"); 53 MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 54 MODULE_FIRMWARE("radeon/RV670_me.bin"); 55 MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 56 MODULE_FIRMWARE("radeon/RS780_me.bin"); 57 MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 58 MODULE_FIRMWARE("radeon/RV770_me.bin"); 59 MODULE_FIRMWARE("radeon/RV770_smc.bin"); 60 MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 61 MODULE_FIRMWARE("radeon/RV730_me.bin"); 62 MODULE_FIRMWARE("radeon/RV730_smc.bin"); 63 MODULE_FIRMWARE("radeon/RV740_smc.bin"); 64 MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65 MODULE_FIRMWARE("radeon/RV710_me.bin"); 66 MODULE_FIRMWARE("radeon/RV710_smc.bin"); 67 MODULE_FIRMWARE("radeon/R600_rlc.bin"); 68 MODULE_FIRMWARE("radeon/R700_rlc.bin"); 69 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 70 MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 71 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 72 MODULE_FIRMWARE("radeon/CEDAR_smc.bin"); 73 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 74 MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 75 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 76 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin"); 77 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 78 MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 79 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 80 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin"); 81 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 82 MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 83 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 84 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin"); 85 MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 86 MODULE_FIRMWARE("radeon/PALM_me.bin"); 87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); 89 MODULE_FIRMWARE("radeon/SUMO_me.bin"); 90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 91 MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 92 93 static const u32 crtc_offsets[2] = 94 { 95 0, 96 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL 97 }; 98 99 int r600_debugfs_mc_info_init(struct radeon_device *rdev); 100 101 /* r600,rv610,rv630,rv620,rv635,rv670 */ 102 static void r600_gpu_init(struct radeon_device *rdev); 103 void r600_irq_disable(struct radeon_device *rdev); 104 static void r600_pcie_gen2_enable(struct radeon_device *rdev); 105 106 /* 107 * Indirect registers accessor 108 */ 109 u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) 110 { 111 u32 r; 112 113 spin_lock(&rdev->rcu_idx_lock); 114 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 115 r = RREG32(R600_RCU_DATA); 116 spin_unlock(&rdev->rcu_idx_lock); 117 return r; 118 } 119 120 void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) 121 { 122 spin_lock(&rdev->rcu_idx_lock); 123 WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); 124 WREG32(R600_RCU_DATA, (v)); 125 spin_unlock(&rdev->rcu_idx_lock); 126 } 127 128 u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) 129 { 130 u32 r; 131 132 spin_lock(&rdev->uvd_idx_lock); 133 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 134 r = RREG32(R600_UVD_CTX_DATA); 135 spin_unlock(&rdev->uvd_idx_lock); 136 return r; 137 } 138 139 void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) 140 { 141 spin_lock(&rdev->uvd_idx_lock); 142 WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); 143 WREG32(R600_UVD_CTX_DATA, (v)); 144 spin_unlock(&rdev->uvd_idx_lock); 145 } 146 147 /** 148 * r600_get_allowed_info_register - fetch the register for the info ioctl 149 * 150 * @rdev: radeon_device pointer 151 * @reg: register offset in bytes 152 * @val: register value 153 * 154 * Returns 0 for success or -EINVAL for an invalid register 155 * 156 */ 157 int r600_get_allowed_info_register(struct radeon_device *rdev, 158 u32 reg, u32 *val) 159 { 160 switch (reg) { 161 case GRBM_STATUS: 162 case GRBM_STATUS2: 163 case R_000E50_SRBM_STATUS: 164 case DMA_STATUS_REG: 165 case UVD_STATUS: 166 *val = RREG32(reg); 167 return 0; 168 default: 169 return -EINVAL; 170 } 171 } 172 173 /** 174 * r600_get_xclk - get the xclk 175 * 176 * @rdev: radeon_device pointer 177 * 178 * Returns the reference clock used by the gfx engine 179 * (r6xx, IGPs, APUs). 180 */ 181 u32 r600_get_xclk(struct radeon_device *rdev) 182 { 183 return rdev->clock.spll.reference_freq; 184 } 185 186 int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) 187 { 188 unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; 189 int r; 190 191 /* bypass vclk and dclk with bclk */ 192 WREG32_P(CG_UPLL_FUNC_CNTL_2, 193 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), 194 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 195 196 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ 197 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( 198 UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); 199 200 if (rdev->family >= CHIP_RS780) 201 WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, 202 ~UPLL_BYPASS_CNTL); 203 204 if (!vclk || !dclk) { 205 /* keep the Bypass mode, put PLL to sleep */ 206 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); 207 return 0; 208 } 209 210 if (rdev->clock.spll.reference_freq == 10000) 211 ref_div = 34; 212 else 213 ref_div = 4; 214 215 r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, 216 ref_div + 1, 0xFFF, 2, 30, ~0, 217 &fb_div, &vclk_div, &dclk_div); 218 if (r) 219 return r; 220 221 if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) 222 fb_div >>= 1; 223 else 224 fb_div |= 1; 225 226 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 227 if (r) 228 return r; 229 230 /* assert PLL_RESET */ 231 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); 232 233 /* For RS780 we have to choose ref clk */ 234 if (rdev->family >= CHIP_RS780) 235 WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, 236 ~UPLL_REFCLK_SRC_SEL_MASK); 237 238 /* set the required fb, ref and post divder values */ 239 WREG32_P(CG_UPLL_FUNC_CNTL, 240 UPLL_FB_DIV(fb_div) | 241 UPLL_REF_DIV(ref_div), 242 ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); 243 WREG32_P(CG_UPLL_FUNC_CNTL_2, 244 UPLL_SW_HILEN(vclk_div >> 1) | 245 UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | 246 UPLL_SW_HILEN2(dclk_div >> 1) | 247 UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | 248 UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, 249 ~UPLL_SW_MASK); 250 251 /* give the PLL some time to settle */ 252 mdelay(15); 253 254 /* deassert PLL_RESET */ 255 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); 256 257 mdelay(15); 258 259 /* deassert BYPASS EN */ 260 WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); 261 262 if (rdev->family >= CHIP_RS780) 263 WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); 264 265 r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); 266 if (r) 267 return r; 268 269 /* switch VCLK and DCLK selection */ 270 WREG32_P(CG_UPLL_FUNC_CNTL_2, 271 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), 272 ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); 273 274 mdelay(100); 275 276 return 0; 277 } 278 279 void dce3_program_fmt(struct drm_encoder *encoder) 280 { 281 struct drm_device *dev = encoder->dev; 282 struct radeon_device *rdev = dev->dev_private; 283 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 284 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 285 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 286 int bpc = 0; 287 u32 tmp = 0; 288 enum radeon_connector_dither dither = RADEON_FMT_DITHER_DISABLE; 289 290 if (connector) { 291 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 292 bpc = radeon_get_monitor_bpc(connector); 293 dither = radeon_connector->dither; 294 } 295 296 /* LVDS FMT is set up by atom */ 297 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 298 return; 299 300 /* not needed for analog */ 301 if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 302 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 303 return; 304 305 if (bpc == 0) 306 return; 307 308 switch (bpc) { 309 case 6: 310 if (dither == RADEON_FMT_DITHER_ENABLE) 311 /* XXX sort out optimal dither settings */ 312 tmp |= FMT_SPATIAL_DITHER_EN; 313 else 314 tmp |= FMT_TRUNCATE_EN; 315 break; 316 case 8: 317 if (dither == RADEON_FMT_DITHER_ENABLE) 318 /* XXX sort out optimal dither settings */ 319 tmp |= (FMT_SPATIAL_DITHER_EN | FMT_SPATIAL_DITHER_DEPTH); 320 else 321 tmp |= (FMT_TRUNCATE_EN | FMT_TRUNCATE_DEPTH); 322 break; 323 case 10: 324 default: 325 /* not needed */ 326 break; 327 } 328 329 WREG32(FMT_BIT_DEPTH_CONTROL + radeon_crtc->crtc_offset, tmp); 330 } 331 332 /* get temperature in millidegrees */ 333 int rv6xx_get_temp(struct radeon_device *rdev) 334 { 335 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 336 ASIC_T_SHIFT; 337 int actual_temp = temp & 0xff; 338 339 if (temp & 0x100) 340 actual_temp -= 256; 341 342 return actual_temp * 1000; 343 } 344 345 void r600_pm_get_dynpm_state(struct radeon_device *rdev) 346 { 347 int i; 348 349 rdev->pm.dynpm_can_upclock = true; 350 rdev->pm.dynpm_can_downclock = true; 351 352 /* power state array is low to high, default is first */ 353 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { 354 int min_power_state_index = 0; 355 356 if (rdev->pm.num_power_states > 2) 357 min_power_state_index = 1; 358 359 switch (rdev->pm.dynpm_planned_action) { 360 case DYNPM_ACTION_MINIMUM: 361 rdev->pm.requested_power_state_index = min_power_state_index; 362 rdev->pm.requested_clock_mode_index = 0; 363 rdev->pm.dynpm_can_downclock = false; 364 break; 365 case DYNPM_ACTION_DOWNCLOCK: 366 if (rdev->pm.current_power_state_index == min_power_state_index) { 367 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 368 rdev->pm.dynpm_can_downclock = false; 369 } else { 370 if (rdev->pm.active_crtc_count > 1) { 371 for (i = 0; i < rdev->pm.num_power_states; i++) { 372 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 373 continue; 374 else if (i >= rdev->pm.current_power_state_index) { 375 rdev->pm.requested_power_state_index = 376 rdev->pm.current_power_state_index; 377 break; 378 } else { 379 rdev->pm.requested_power_state_index = i; 380 break; 381 } 382 } 383 } else { 384 if (rdev->pm.current_power_state_index == 0) 385 rdev->pm.requested_power_state_index = 386 rdev->pm.num_power_states - 1; 387 else 388 rdev->pm.requested_power_state_index = 389 rdev->pm.current_power_state_index - 1; 390 } 391 } 392 rdev->pm.requested_clock_mode_index = 0; 393 /* don't use the power state if crtcs are active and no display flag is set */ 394 if ((rdev->pm.active_crtc_count > 0) && 395 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 396 clock_info[rdev->pm.requested_clock_mode_index].flags & 397 RADEON_PM_MODE_NO_DISPLAY)) { 398 rdev->pm.requested_power_state_index++; 399 } 400 break; 401 case DYNPM_ACTION_UPCLOCK: 402 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 403 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 404 rdev->pm.dynpm_can_upclock = false; 405 } else { 406 if (rdev->pm.active_crtc_count > 1) { 407 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 408 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 409 continue; 410 else if (i <= rdev->pm.current_power_state_index) { 411 rdev->pm.requested_power_state_index = 412 rdev->pm.current_power_state_index; 413 break; 414 } else { 415 rdev->pm.requested_power_state_index = i; 416 break; 417 } 418 } 419 } else 420 rdev->pm.requested_power_state_index = 421 rdev->pm.current_power_state_index + 1; 422 } 423 rdev->pm.requested_clock_mode_index = 0; 424 break; 425 case DYNPM_ACTION_DEFAULT: 426 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 427 rdev->pm.requested_clock_mode_index = 0; 428 rdev->pm.dynpm_can_upclock = false; 429 break; 430 case DYNPM_ACTION_NONE: 431 default: 432 DRM_ERROR("Requested mode for not defined action\n"); 433 return; 434 } 435 } else { 436 /* XXX select a power state based on AC/DC, single/dualhead, etc. */ 437 /* for now just select the first power state and switch between clock modes */ 438 /* power state array is low to high, default is first (0) */ 439 if (rdev->pm.active_crtc_count > 1) { 440 rdev->pm.requested_power_state_index = -1; 441 /* start at 1 as we don't want the default mode */ 442 for (i = 1; i < rdev->pm.num_power_states; i++) { 443 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 444 continue; 445 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || 446 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { 447 rdev->pm.requested_power_state_index = i; 448 break; 449 } 450 } 451 /* if nothing selected, grab the default state. */ 452 if (rdev->pm.requested_power_state_index == -1) 453 rdev->pm.requested_power_state_index = 0; 454 } else 455 rdev->pm.requested_power_state_index = 1; 456 457 switch (rdev->pm.dynpm_planned_action) { 458 case DYNPM_ACTION_MINIMUM: 459 rdev->pm.requested_clock_mode_index = 0; 460 rdev->pm.dynpm_can_downclock = false; 461 break; 462 case DYNPM_ACTION_DOWNCLOCK: 463 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 464 if (rdev->pm.current_clock_mode_index == 0) { 465 rdev->pm.requested_clock_mode_index = 0; 466 rdev->pm.dynpm_can_downclock = false; 467 } else 468 rdev->pm.requested_clock_mode_index = 469 rdev->pm.current_clock_mode_index - 1; 470 } else { 471 rdev->pm.requested_clock_mode_index = 0; 472 rdev->pm.dynpm_can_downclock = false; 473 } 474 /* don't use the power state if crtcs are active and no display flag is set */ 475 if ((rdev->pm.active_crtc_count > 0) && 476 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 477 clock_info[rdev->pm.requested_clock_mode_index].flags & 478 RADEON_PM_MODE_NO_DISPLAY)) { 479 rdev->pm.requested_clock_mode_index++; 480 } 481 break; 482 case DYNPM_ACTION_UPCLOCK: 483 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 484 if (rdev->pm.current_clock_mode_index == 485 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { 486 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; 487 rdev->pm.dynpm_can_upclock = false; 488 } else 489 rdev->pm.requested_clock_mode_index = 490 rdev->pm.current_clock_mode_index + 1; 491 } else { 492 rdev->pm.requested_clock_mode_index = 493 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; 494 rdev->pm.dynpm_can_upclock = false; 495 } 496 break; 497 case DYNPM_ACTION_DEFAULT: 498 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 499 rdev->pm.requested_clock_mode_index = 0; 500 rdev->pm.dynpm_can_upclock = false; 501 break; 502 case DYNPM_ACTION_NONE: 503 default: 504 DRM_ERROR("Requested mode for not defined action\n"); 505 return; 506 } 507 } 508 509 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 510 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 511 clock_info[rdev->pm.requested_clock_mode_index].sclk, 512 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 513 clock_info[rdev->pm.requested_clock_mode_index].mclk, 514 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 515 pcie_lanes); 516 } 517 518 void rs780_pm_init_profile(struct radeon_device *rdev) 519 { 520 if (rdev->pm.num_power_states == 2) { 521 /* default */ 522 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 523 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 524 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 525 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 526 /* low sh */ 527 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 528 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 529 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 530 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 531 /* mid sh */ 532 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 535 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 536 /* high sh */ 537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 538 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 540 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 541 /* low mh */ 542 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 543 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 544 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 546 /* mid mh */ 547 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 548 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 549 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 550 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 551 /* high mh */ 552 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 553 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; 554 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 555 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 556 } else if (rdev->pm.num_power_states == 3) { 557 /* default */ 558 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 559 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 560 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 561 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 562 /* low sh */ 563 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 564 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 565 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 566 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 567 /* mid sh */ 568 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 569 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 570 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 571 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 572 /* high sh */ 573 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 574 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; 575 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 576 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 577 /* low mh */ 578 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; 579 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; 580 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 581 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 582 /* mid mh */ 583 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; 584 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; 585 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 586 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 587 /* high mh */ 588 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; 589 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 590 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 591 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 592 } else { 593 /* default */ 594 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 595 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 596 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 597 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 598 /* low sh */ 599 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; 600 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; 601 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 602 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 603 /* mid sh */ 604 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; 605 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; 606 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 607 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 608 /* high sh */ 609 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; 610 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; 611 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 612 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 613 /* low mh */ 614 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 615 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 616 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 617 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 618 /* mid mh */ 619 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 620 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 621 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 622 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 623 /* high mh */ 624 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 625 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; 626 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 627 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 628 } 629 } 630 631 void r600_pm_init_profile(struct radeon_device *rdev) 632 { 633 int idx; 634 635 if (rdev->family == CHIP_R600) { 636 /* XXX */ 637 /* default */ 638 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 639 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 640 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 641 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 642 /* low sh */ 643 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 644 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 645 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 646 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 647 /* mid sh */ 648 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 649 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 650 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 651 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 652 /* high sh */ 653 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 654 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 655 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 656 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 657 /* low mh */ 658 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 659 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 660 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 661 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 662 /* mid mh */ 663 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 664 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 665 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 666 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 667 /* high mh */ 668 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 669 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 670 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 671 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 672 } else { 673 if (rdev->pm.num_power_states < 4) { 674 /* default */ 675 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 676 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 677 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 678 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 679 /* low sh */ 680 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 681 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 682 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 683 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 684 /* mid sh */ 685 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 686 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 687 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 688 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 689 /* high sh */ 690 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 691 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 692 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 693 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 694 /* low mh */ 695 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 696 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; 697 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 698 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 699 /* low mh */ 700 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 701 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; 702 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 703 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 704 /* high mh */ 705 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 706 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 707 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 708 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 709 } else { 710 /* default */ 711 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 712 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 713 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 714 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 715 /* low sh */ 716 if (rdev->flags & RADEON_IS_MOBILITY) 717 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 718 else 719 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 720 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 721 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 722 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 723 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 724 /* mid sh */ 725 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 726 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 727 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 728 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 729 /* high sh */ 730 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 731 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 732 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 733 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 734 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 735 /* low mh */ 736 if (rdev->flags & RADEON_IS_MOBILITY) 737 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 738 else 739 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 740 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 741 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 742 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 743 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 744 /* mid mh */ 745 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 746 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 747 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 748 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 749 /* high mh */ 750 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 751 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 752 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 753 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 754 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 755 } 756 } 757 } 758 759 void r600_pm_misc(struct radeon_device *rdev) 760 { 761 int req_ps_idx = rdev->pm.requested_power_state_index; 762 int req_cm_idx = rdev->pm.requested_clock_mode_index; 763 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 764 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 765 766 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 767 /* 0xff01 is a flag rather then an actual voltage */ 768 if (voltage->voltage == 0xff01) 769 return; 770 if (voltage->voltage != rdev->pm.current_vddc) { 771 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 772 rdev->pm.current_vddc = voltage->voltage; 773 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 774 } 775 } 776 } 777 778 bool r600_gui_idle(struct radeon_device *rdev) 779 { 780 if (RREG32(GRBM_STATUS) & GUI_ACTIVE) 781 return false; 782 else 783 return true; 784 } 785 786 /* hpd for digital panel detect/disconnect */ 787 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 788 { 789 bool connected = false; 790 791 if (ASIC_IS_DCE3(rdev)) { 792 switch (hpd) { 793 case RADEON_HPD_1: 794 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 795 connected = true; 796 break; 797 case RADEON_HPD_2: 798 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 799 connected = true; 800 break; 801 case RADEON_HPD_3: 802 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 803 connected = true; 804 break; 805 case RADEON_HPD_4: 806 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 807 connected = true; 808 break; 809 /* DCE 3.2 */ 810 case RADEON_HPD_5: 811 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 812 connected = true; 813 break; 814 case RADEON_HPD_6: 815 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 816 connected = true; 817 break; 818 default: 819 break; 820 } 821 } else { 822 switch (hpd) { 823 case RADEON_HPD_1: 824 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 825 connected = true; 826 break; 827 case RADEON_HPD_2: 828 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 829 connected = true; 830 break; 831 case RADEON_HPD_3: 832 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 833 connected = true; 834 break; 835 default: 836 break; 837 } 838 } 839 return connected; 840 } 841 842 void r600_hpd_set_polarity(struct radeon_device *rdev, 843 enum radeon_hpd_id hpd) 844 { 845 u32 tmp; 846 bool connected = r600_hpd_sense(rdev, hpd); 847 848 if (ASIC_IS_DCE3(rdev)) { 849 switch (hpd) { 850 case RADEON_HPD_1: 851 tmp = RREG32(DC_HPD1_INT_CONTROL); 852 if (connected) 853 tmp &= ~DC_HPDx_INT_POLARITY; 854 else 855 tmp |= DC_HPDx_INT_POLARITY; 856 WREG32(DC_HPD1_INT_CONTROL, tmp); 857 break; 858 case RADEON_HPD_2: 859 tmp = RREG32(DC_HPD2_INT_CONTROL); 860 if (connected) 861 tmp &= ~DC_HPDx_INT_POLARITY; 862 else 863 tmp |= DC_HPDx_INT_POLARITY; 864 WREG32(DC_HPD2_INT_CONTROL, tmp); 865 break; 866 case RADEON_HPD_3: 867 tmp = RREG32(DC_HPD3_INT_CONTROL); 868 if (connected) 869 tmp &= ~DC_HPDx_INT_POLARITY; 870 else 871 tmp |= DC_HPDx_INT_POLARITY; 872 WREG32(DC_HPD3_INT_CONTROL, tmp); 873 break; 874 case RADEON_HPD_4: 875 tmp = RREG32(DC_HPD4_INT_CONTROL); 876 if (connected) 877 tmp &= ~DC_HPDx_INT_POLARITY; 878 else 879 tmp |= DC_HPDx_INT_POLARITY; 880 WREG32(DC_HPD4_INT_CONTROL, tmp); 881 break; 882 case RADEON_HPD_5: 883 tmp = RREG32(DC_HPD5_INT_CONTROL); 884 if (connected) 885 tmp &= ~DC_HPDx_INT_POLARITY; 886 else 887 tmp |= DC_HPDx_INT_POLARITY; 888 WREG32(DC_HPD5_INT_CONTROL, tmp); 889 break; 890 /* DCE 3.2 */ 891 case RADEON_HPD_6: 892 tmp = RREG32(DC_HPD6_INT_CONTROL); 893 if (connected) 894 tmp &= ~DC_HPDx_INT_POLARITY; 895 else 896 tmp |= DC_HPDx_INT_POLARITY; 897 WREG32(DC_HPD6_INT_CONTROL, tmp); 898 break; 899 default: 900 break; 901 } 902 } else { 903 switch (hpd) { 904 case RADEON_HPD_1: 905 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 906 if (connected) 907 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 908 else 909 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 910 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 911 break; 912 case RADEON_HPD_2: 913 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 914 if (connected) 915 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 916 else 917 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 918 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 919 break; 920 case RADEON_HPD_3: 921 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 922 if (connected) 923 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 924 else 925 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 926 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 927 break; 928 default: 929 break; 930 } 931 } 932 } 933 934 void r600_hpd_init(struct radeon_device *rdev) 935 { 936 struct drm_device *dev = rdev->ddev; 937 struct drm_connector *connector; 938 unsigned enable = 0; 939 940 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 941 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 942 943 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 944 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 945 /* don't try to enable hpd on eDP or LVDS avoid breaking the 946 * aux dp channel on imac and help (but not completely fix) 947 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 948 */ 949 continue; 950 } 951 if (ASIC_IS_DCE3(rdev)) { 952 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 953 if (ASIC_IS_DCE32(rdev)) 954 tmp |= DC_HPDx_EN; 955 956 switch (radeon_connector->hpd.hpd) { 957 case RADEON_HPD_1: 958 WREG32(DC_HPD1_CONTROL, tmp); 959 break; 960 case RADEON_HPD_2: 961 WREG32(DC_HPD2_CONTROL, tmp); 962 break; 963 case RADEON_HPD_3: 964 WREG32(DC_HPD3_CONTROL, tmp); 965 break; 966 case RADEON_HPD_4: 967 WREG32(DC_HPD4_CONTROL, tmp); 968 break; 969 /* DCE 3.2 */ 970 case RADEON_HPD_5: 971 WREG32(DC_HPD5_CONTROL, tmp); 972 break; 973 case RADEON_HPD_6: 974 WREG32(DC_HPD6_CONTROL, tmp); 975 break; 976 default: 977 break; 978 } 979 } else { 980 switch (radeon_connector->hpd.hpd) { 981 case RADEON_HPD_1: 982 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 983 break; 984 case RADEON_HPD_2: 985 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); 986 break; 987 case RADEON_HPD_3: 988 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); 989 break; 990 default: 991 break; 992 } 993 } 994 enable |= 1 << radeon_connector->hpd.hpd; 995 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 996 } 997 radeon_irq_kms_enable_hpd(rdev, enable); 998 } 999 1000 void r600_hpd_fini(struct radeon_device *rdev) 1001 { 1002 struct drm_device *dev = rdev->ddev; 1003 struct drm_connector *connector; 1004 unsigned disable = 0; 1005 1006 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1007 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1008 if (ASIC_IS_DCE3(rdev)) { 1009 switch (radeon_connector->hpd.hpd) { 1010 case RADEON_HPD_1: 1011 WREG32(DC_HPD1_CONTROL, 0); 1012 break; 1013 case RADEON_HPD_2: 1014 WREG32(DC_HPD2_CONTROL, 0); 1015 break; 1016 case RADEON_HPD_3: 1017 WREG32(DC_HPD3_CONTROL, 0); 1018 break; 1019 case RADEON_HPD_4: 1020 WREG32(DC_HPD4_CONTROL, 0); 1021 break; 1022 /* DCE 3.2 */ 1023 case RADEON_HPD_5: 1024 WREG32(DC_HPD5_CONTROL, 0); 1025 break; 1026 case RADEON_HPD_6: 1027 WREG32(DC_HPD6_CONTROL, 0); 1028 break; 1029 default: 1030 break; 1031 } 1032 } else { 1033 switch (radeon_connector->hpd.hpd) { 1034 case RADEON_HPD_1: 1035 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); 1036 break; 1037 case RADEON_HPD_2: 1038 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); 1039 break; 1040 case RADEON_HPD_3: 1041 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); 1042 break; 1043 default: 1044 break; 1045 } 1046 } 1047 disable |= 1 << radeon_connector->hpd.hpd; 1048 } 1049 radeon_irq_kms_disable_hpd(rdev, disable); 1050 } 1051 1052 /* 1053 * R600 PCIE GART 1054 */ 1055 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 1056 { 1057 unsigned i; 1058 u32 tmp; 1059 1060 /* flush hdp cache so updates hit vram */ 1061 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 1062 !(rdev->flags & RADEON_IS_AGP)) { 1063 void __iomem *ptr = (void *)rdev->gart.ptr; 1064 u32 tmp; 1065 1066 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 1067 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 1068 * This seems to cause problems on some AGP cards. Just use the old 1069 * method for them. 1070 */ 1071 WREG32(HDP_DEBUG1, 0); 1072 tmp = readl((void __iomem *)ptr); 1073 } else 1074 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 1075 1076 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 1077 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 1078 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 1079 for (i = 0; i < rdev->usec_timeout; i++) { 1080 /* read MC_STATUS */ 1081 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 1082 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 1083 if (tmp == 2) { 1084 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 1085 return; 1086 } 1087 if (tmp) { 1088 return; 1089 } 1090 udelay(1); 1091 } 1092 } 1093 1094 int r600_pcie_gart_init(struct radeon_device *rdev) 1095 { 1096 int r; 1097 1098 if (rdev->gart.robj) { 1099 WARN(1, "R600 PCIE GART already initialized\n"); 1100 return 0; 1101 } 1102 /* Initialize common gart structure */ 1103 r = radeon_gart_init(rdev); 1104 if (r) 1105 return r; 1106 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 1107 return radeon_gart_table_vram_alloc(rdev); 1108 } 1109 1110 static int r600_pcie_gart_enable(struct radeon_device *rdev) 1111 { 1112 u32 tmp; 1113 int r, i; 1114 1115 if (rdev->gart.robj == NULL) { 1116 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 1117 return -EINVAL; 1118 } 1119 r = radeon_gart_table_vram_pin(rdev); 1120 if (r) 1121 return r; 1122 1123 /* Setup L2 cache */ 1124 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1125 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1126 EFFECTIVE_L2_QUEUE_SIZE(7)); 1127 WREG32(VM_L2_CNTL2, 0); 1128 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1129 /* Setup TLB control */ 1130 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1131 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1132 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1133 ENABLE_WAIT_L2_QUERY; 1134 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1135 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1136 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1137 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1138 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1139 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1140 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1141 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1142 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1143 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1144 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1145 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1146 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1147 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1148 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1149 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1150 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 1151 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 1152 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 1153 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 1154 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 1155 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 1156 (u32)(rdev->dummy_page.addr >> 12)); 1157 for (i = 1; i < 7; i++) 1158 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1159 1160 r600_pcie_gart_tlb_flush(rdev); 1161 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 1162 (unsigned)(rdev->mc.gtt_size >> 20), 1163 (unsigned long long)rdev->gart.table_addr); 1164 rdev->gart.ready = true; 1165 return 0; 1166 } 1167 1168 static void r600_pcie_gart_disable(struct radeon_device *rdev) 1169 { 1170 u32 tmp; 1171 int i; 1172 1173 /* Disable all tables */ 1174 for (i = 0; i < 7; i++) 1175 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1176 1177 /* Disable L2 cache */ 1178 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 1179 EFFECTIVE_L2_QUEUE_SIZE(7)); 1180 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1181 /* Setup L1 TLB control */ 1182 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1183 ENABLE_WAIT_L2_QUERY; 1184 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1185 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1186 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1187 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1188 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1189 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1190 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1191 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1192 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); 1193 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); 1194 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1195 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1196 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 1197 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1198 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); 1199 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); 1200 radeon_gart_table_vram_unpin(rdev); 1201 } 1202 1203 static void r600_pcie_gart_fini(struct radeon_device *rdev) 1204 { 1205 radeon_gart_fini(rdev); 1206 r600_pcie_gart_disable(rdev); 1207 radeon_gart_table_vram_free(rdev); 1208 } 1209 1210 static void r600_agp_enable(struct radeon_device *rdev) 1211 { 1212 u32 tmp; 1213 int i; 1214 1215 /* Setup L2 cache */ 1216 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 1217 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 1218 EFFECTIVE_L2_QUEUE_SIZE(7)); 1219 WREG32(VM_L2_CNTL2, 0); 1220 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 1221 /* Setup TLB control */ 1222 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 1223 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 1224 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 1225 ENABLE_WAIT_L2_QUERY; 1226 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 1227 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 1228 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 1229 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 1230 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 1231 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 1232 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1233 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1234 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1235 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1236 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1237 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1238 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1239 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1240 for (i = 0; i < 7; i++) 1241 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1242 } 1243 1244 int r600_mc_wait_for_idle(struct radeon_device *rdev) 1245 { 1246 unsigned i; 1247 u32 tmp; 1248 1249 for (i = 0; i < rdev->usec_timeout; i++) { 1250 /* read MC_STATUS */ 1251 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; 1252 if (!tmp) 1253 return 0; 1254 udelay(1); 1255 } 1256 return -1; 1257 } 1258 1259 uint32_t rs780_mc_rreg(struct radeon_device *rdev, uint32_t reg) 1260 { 1261 uint32_t r; 1262 1263 spin_lock(&rdev->mc_idx_lock); 1264 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg)); 1265 r = RREG32(R_0028FC_MC_DATA); 1266 WREG32(R_0028F8_MC_INDEX, ~C_0028F8_MC_IND_ADDR); 1267 spin_unlock(&rdev->mc_idx_lock); 1268 return r; 1269 } 1270 1271 void rs780_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 1272 { 1273 spin_lock(&rdev->mc_idx_lock); 1274 WREG32(R_0028F8_MC_INDEX, S_0028F8_MC_IND_ADDR(reg) | 1275 S_0028F8_MC_IND_WR_EN(1)); 1276 WREG32(R_0028FC_MC_DATA, v); 1277 WREG32(R_0028F8_MC_INDEX, 0x7F); 1278 spin_unlock(&rdev->mc_idx_lock); 1279 } 1280 1281 static void r600_mc_program(struct radeon_device *rdev) 1282 { 1283 struct rv515_mc_save save; 1284 u32 tmp; 1285 int i, j; 1286 1287 /* Initialize HDP */ 1288 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1289 WREG32((0x2c14 + j), 0x00000000); 1290 WREG32((0x2c18 + j), 0x00000000); 1291 WREG32((0x2c1c + j), 0x00000000); 1292 WREG32((0x2c20 + j), 0x00000000); 1293 WREG32((0x2c24 + j), 0x00000000); 1294 } 1295 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1296 1297 rv515_mc_stop(rdev, &save); 1298 if (r600_mc_wait_for_idle(rdev)) { 1299 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1300 } 1301 /* Lockout access through VGA aperture (doesn't exist before R600) */ 1302 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1303 /* Update configuration */ 1304 if (rdev->flags & RADEON_IS_AGP) { 1305 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1306 /* VRAM before AGP */ 1307 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1308 rdev->mc.vram_start >> 12); 1309 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1310 rdev->mc.gtt_end >> 12); 1311 } else { 1312 /* VRAM after AGP */ 1313 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1314 rdev->mc.gtt_start >> 12); 1315 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1316 rdev->mc.vram_end >> 12); 1317 } 1318 } else { 1319 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1320 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1321 } 1322 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1323 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1324 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1325 WREG32(MC_VM_FB_LOCATION, tmp); 1326 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1327 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1328 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1329 if (rdev->flags & RADEON_IS_AGP) { 1330 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1331 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1332 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1333 } else { 1334 WREG32(MC_VM_AGP_BASE, 0); 1335 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1336 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1337 } 1338 if (r600_mc_wait_for_idle(rdev)) { 1339 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1340 } 1341 rv515_mc_resume(rdev, &save); 1342 /* we need to own VRAM, so turn off the VGA renderer here 1343 * to stop it overwriting our objects */ 1344 rv515_vga_render_disable(rdev); 1345 } 1346 1347 /** 1348 * r600_vram_gtt_location - try to find VRAM & GTT location 1349 * @rdev: radeon device structure holding all necessary informations 1350 * @mc: memory controller structure holding memory informations 1351 * 1352 * Function will place try to place VRAM at same place as in CPU (PCI) 1353 * address space as some GPU seems to have issue when we reprogram at 1354 * different address space. 1355 * 1356 * If there is not enough space to fit the unvisible VRAM after the 1357 * aperture then we limit the VRAM size to the aperture. 1358 * 1359 * If we are using AGP then place VRAM adjacent to AGP aperture are we need 1360 * them to be in one from GPU point of view so that we can program GPU to 1361 * catch access outside them (weird GPU policy see ??). 1362 * 1363 * This function will never fails, worst case are limiting VRAM or GTT. 1364 * 1365 * Note: GTT start, end, size should be initialized before calling this 1366 * function on AGP platform. 1367 */ 1368 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1369 { 1370 u64 size_bf, size_af; 1371 1372 if (mc->mc_vram_size > 0xE0000000) { 1373 /* leave room for at least 512M GTT */ 1374 dev_warn(rdev->dev, "limiting VRAM\n"); 1375 mc->real_vram_size = 0xE0000000; 1376 mc->mc_vram_size = 0xE0000000; 1377 } 1378 if (rdev->flags & RADEON_IS_AGP) { 1379 size_bf = mc->gtt_start; 1380 size_af = mc->mc_mask - mc->gtt_end; 1381 if (size_bf > size_af) { 1382 if (mc->mc_vram_size > size_bf) { 1383 dev_warn(rdev->dev, "limiting VRAM\n"); 1384 mc->real_vram_size = size_bf; 1385 mc->mc_vram_size = size_bf; 1386 } 1387 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 1388 } else { 1389 if (mc->mc_vram_size > size_af) { 1390 dev_warn(rdev->dev, "limiting VRAM\n"); 1391 mc->real_vram_size = size_af; 1392 mc->mc_vram_size = size_af; 1393 } 1394 mc->vram_start = mc->gtt_end + 1; 1395 } 1396 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1397 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1398 mc->mc_vram_size >> 20, mc->vram_start, 1399 mc->vram_end, mc->real_vram_size >> 20); 1400 } else { 1401 u64 base = 0; 1402 if (rdev->flags & RADEON_IS_IGP) { 1403 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; 1404 base <<= 24; 1405 } 1406 radeon_vram_location(rdev, &rdev->mc, base); 1407 rdev->mc.gtt_base_align = 0; 1408 radeon_gtt_location(rdev, mc); 1409 } 1410 } 1411 1412 static int r600_mc_init(struct radeon_device *rdev) 1413 { 1414 u32 tmp; 1415 int chansize, numchan; 1416 uint32_t h_addr, l_addr; 1417 unsigned long long k8_addr; 1418 1419 /* Get VRAM informations */ 1420 rdev->mc.vram_is_ddr = true; 1421 tmp = RREG32(RAMCFG); 1422 if (tmp & CHANSIZE_OVERRIDE) { 1423 chansize = 16; 1424 } else if (tmp & CHANSIZE_MASK) { 1425 chansize = 64; 1426 } else { 1427 chansize = 32; 1428 } 1429 tmp = RREG32(CHMAP); 1430 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1431 case 0: 1432 default: 1433 numchan = 1; 1434 break; 1435 case 1: 1436 numchan = 2; 1437 break; 1438 case 2: 1439 numchan = 4; 1440 break; 1441 case 3: 1442 numchan = 8; 1443 break; 1444 } 1445 rdev->mc.vram_width = numchan * chansize; 1446 /* Could aper size report 0 ? */ 1447 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 1448 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 1449 /* Setup GPU memory space */ 1450 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1451 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1452 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1453 r600_vram_gtt_location(rdev, &rdev->mc); 1454 1455 if (rdev->flags & RADEON_IS_IGP) { 1456 rs690_pm_info(rdev); 1457 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1458 1459 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 1460 /* Use K8 direct mapping for fast fb access. */ 1461 rdev->fastfb_working = false; 1462 h_addr = G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL)); 1463 l_addr = RREG32_MC(R_000011_K8_FB_LOCATION); 1464 k8_addr = ((unsigned long long)h_addr) << 32 | l_addr; 1465 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) 1466 if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL) 1467 #endif 1468 { 1469 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 1470 * memory is present. 1471 */ 1472 if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) { 1473 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 1474 (unsigned long long)rdev->mc.aper_base, k8_addr); 1475 rdev->mc.aper_base = (resource_size_t)k8_addr; 1476 rdev->fastfb_working = true; 1477 } 1478 } 1479 } 1480 } 1481 1482 radeon_update_bandwidth_info(rdev); 1483 return 0; 1484 } 1485 1486 int r600_vram_scratch_init(struct radeon_device *rdev) 1487 { 1488 int r; 1489 void *vram_scratch_ptr_ptr = &rdev->vram_scratch.ptr; 1490 1491 if (rdev->vram_scratch.robj == NULL) { 1492 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1493 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1494 0, NULL, NULL, &rdev->vram_scratch.robj); 1495 if (r) { 1496 return r; 1497 } 1498 } 1499 1500 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1501 if (unlikely(r != 0)) 1502 return r; 1503 r = radeon_bo_pin(rdev->vram_scratch.robj, 1504 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); 1505 if (r) { 1506 radeon_bo_unreserve(rdev->vram_scratch.robj); 1507 return r; 1508 } 1509 r = radeon_bo_kmap(rdev->vram_scratch.robj, 1510 vram_scratch_ptr_ptr); 1511 if (r) 1512 radeon_bo_unpin(rdev->vram_scratch.robj); 1513 radeon_bo_unreserve(rdev->vram_scratch.robj); 1514 1515 return r; 1516 } 1517 1518 void r600_vram_scratch_fini(struct radeon_device *rdev) 1519 { 1520 int r; 1521 1522 if (rdev->vram_scratch.robj == NULL) { 1523 return; 1524 } 1525 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1526 if (likely(r == 0)) { 1527 radeon_bo_kunmap(rdev->vram_scratch.robj); 1528 radeon_bo_unpin(rdev->vram_scratch.robj); 1529 radeon_bo_unreserve(rdev->vram_scratch.robj); 1530 } 1531 radeon_bo_unref(&rdev->vram_scratch.robj); 1532 } 1533 1534 void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung) 1535 { 1536 u32 tmp = RREG32(R600_BIOS_3_SCRATCH); 1537 1538 if (hung) 1539 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1540 else 1541 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; 1542 1543 WREG32(R600_BIOS_3_SCRATCH, tmp); 1544 } 1545 1546 static void r600_print_gpu_status_regs(struct radeon_device *rdev) 1547 { 1548 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1549 RREG32(R_008010_GRBM_STATUS)); 1550 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1551 RREG32(R_008014_GRBM_STATUS2)); 1552 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1553 RREG32(R_000E50_SRBM_STATUS)); 1554 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1555 RREG32(CP_STALLED_STAT1)); 1556 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1557 RREG32(CP_STALLED_STAT2)); 1558 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1559 RREG32(CP_BUSY_STAT)); 1560 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1561 RREG32(CP_STAT)); 1562 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1563 RREG32(DMA_STATUS_REG)); 1564 } 1565 1566 static bool r600_is_display_hung(struct radeon_device *rdev) 1567 { 1568 u32 crtc_hung = 0; 1569 u32 crtc_status[2]; 1570 u32 i, j, tmp; 1571 1572 for (i = 0; i < rdev->num_crtc; i++) { 1573 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) { 1574 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1575 crtc_hung |= (1 << i); 1576 } 1577 } 1578 1579 for (j = 0; j < 10; j++) { 1580 for (i = 0; i < rdev->num_crtc; i++) { 1581 if (crtc_hung & (1 << i)) { 1582 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]); 1583 if (tmp != crtc_status[i]) 1584 crtc_hung &= ~(1 << i); 1585 } 1586 } 1587 if (crtc_hung == 0) 1588 return false; 1589 udelay(100); 1590 } 1591 1592 return true; 1593 } 1594 1595 u32 r600_gpu_check_soft_reset(struct radeon_device *rdev) 1596 { 1597 u32 reset_mask = 0; 1598 u32 tmp; 1599 1600 /* GRBM_STATUS */ 1601 tmp = RREG32(R_008010_GRBM_STATUS); 1602 if (rdev->family >= CHIP_RV770) { 1603 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1604 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1605 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1606 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1607 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1608 reset_mask |= RADEON_RESET_GFX; 1609 } else { 1610 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) | 1611 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) | 1612 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) | 1613 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) | 1614 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp)) 1615 reset_mask |= RADEON_RESET_GFX; 1616 } 1617 1618 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) | 1619 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp)) 1620 reset_mask |= RADEON_RESET_CP; 1621 1622 if (G_008010_GRBM_EE_BUSY(tmp)) 1623 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; 1624 1625 /* DMA_STATUS_REG */ 1626 tmp = RREG32(DMA_STATUS_REG); 1627 if (!(tmp & DMA_IDLE)) 1628 reset_mask |= RADEON_RESET_DMA; 1629 1630 /* SRBM_STATUS */ 1631 tmp = RREG32(R_000E50_SRBM_STATUS); 1632 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp)) 1633 reset_mask |= RADEON_RESET_RLC; 1634 1635 if (G_000E50_IH_BUSY(tmp)) 1636 reset_mask |= RADEON_RESET_IH; 1637 1638 if (G_000E50_SEM_BUSY(tmp)) 1639 reset_mask |= RADEON_RESET_SEM; 1640 1641 if (G_000E50_GRBM_RQ_PENDING(tmp)) 1642 reset_mask |= RADEON_RESET_GRBM; 1643 1644 if (G_000E50_VMC_BUSY(tmp)) 1645 reset_mask |= RADEON_RESET_VMC; 1646 1647 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) | 1648 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) | 1649 G_000E50_MCDW_BUSY(tmp)) 1650 reset_mask |= RADEON_RESET_MC; 1651 1652 if (r600_is_display_hung(rdev)) 1653 reset_mask |= RADEON_RESET_DISPLAY; 1654 1655 /* Skip MC reset as it's mostly likely not hung, just busy */ 1656 if (reset_mask & RADEON_RESET_MC) { 1657 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); 1658 reset_mask &= ~RADEON_RESET_MC; 1659 } 1660 1661 return reset_mask; 1662 } 1663 1664 static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1665 { 1666 struct rv515_mc_save save; 1667 u32 grbm_soft_reset = 0, srbm_soft_reset = 0; 1668 u32 tmp; 1669 1670 if (reset_mask == 0) 1671 return; 1672 1673 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1674 1675 r600_print_gpu_status_regs(rdev); 1676 1677 /* Disable CP parsing/prefetching */ 1678 if (rdev->family >= CHIP_RV770) 1679 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1680 else 1681 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1682 1683 /* disable the RLC */ 1684 WREG32(RLC_CNTL, 0); 1685 1686 if (reset_mask & RADEON_RESET_DMA) { 1687 /* Disable DMA */ 1688 tmp = RREG32(DMA_RB_CNTL); 1689 tmp &= ~DMA_RB_ENABLE; 1690 WREG32(DMA_RB_CNTL, tmp); 1691 } 1692 1693 mdelay(50); 1694 1695 rv515_mc_stop(rdev, &save); 1696 if (r600_mc_wait_for_idle(rdev)) { 1697 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1698 } 1699 1700 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) { 1701 if (rdev->family >= CHIP_RV770) 1702 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) | 1703 S_008020_SOFT_RESET_CB(1) | 1704 S_008020_SOFT_RESET_PA(1) | 1705 S_008020_SOFT_RESET_SC(1) | 1706 S_008020_SOFT_RESET_SPI(1) | 1707 S_008020_SOFT_RESET_SX(1) | 1708 S_008020_SOFT_RESET_SH(1) | 1709 S_008020_SOFT_RESET_TC(1) | 1710 S_008020_SOFT_RESET_TA(1) | 1711 S_008020_SOFT_RESET_VC(1) | 1712 S_008020_SOFT_RESET_VGT(1); 1713 else 1714 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) | 1715 S_008020_SOFT_RESET_DB(1) | 1716 S_008020_SOFT_RESET_CB(1) | 1717 S_008020_SOFT_RESET_PA(1) | 1718 S_008020_SOFT_RESET_SC(1) | 1719 S_008020_SOFT_RESET_SMX(1) | 1720 S_008020_SOFT_RESET_SPI(1) | 1721 S_008020_SOFT_RESET_SX(1) | 1722 S_008020_SOFT_RESET_SH(1) | 1723 S_008020_SOFT_RESET_TC(1) | 1724 S_008020_SOFT_RESET_TA(1) | 1725 S_008020_SOFT_RESET_VC(1) | 1726 S_008020_SOFT_RESET_VGT(1); 1727 } 1728 1729 if (reset_mask & RADEON_RESET_CP) { 1730 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) | 1731 S_008020_SOFT_RESET_VGT(1); 1732 1733 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1734 } 1735 1736 if (reset_mask & RADEON_RESET_DMA) { 1737 if (rdev->family >= CHIP_RV770) 1738 srbm_soft_reset |= RV770_SOFT_RESET_DMA; 1739 else 1740 srbm_soft_reset |= SOFT_RESET_DMA; 1741 } 1742 1743 if (reset_mask & RADEON_RESET_RLC) 1744 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1); 1745 1746 if (reset_mask & RADEON_RESET_SEM) 1747 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1); 1748 1749 if (reset_mask & RADEON_RESET_IH) 1750 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1); 1751 1752 if (reset_mask & RADEON_RESET_GRBM) 1753 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1); 1754 1755 if (!(rdev->flags & RADEON_IS_IGP)) { 1756 if (reset_mask & RADEON_RESET_MC) 1757 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1); 1758 } 1759 1760 if (reset_mask & RADEON_RESET_VMC) 1761 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1); 1762 1763 if (grbm_soft_reset) { 1764 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1765 tmp |= grbm_soft_reset; 1766 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1767 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1768 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1769 1770 udelay(50); 1771 1772 tmp &= ~grbm_soft_reset; 1773 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1774 tmp = RREG32(R_008020_GRBM_SOFT_RESET); 1775 } 1776 1777 if (srbm_soft_reset) { 1778 tmp = RREG32(SRBM_SOFT_RESET); 1779 tmp |= srbm_soft_reset; 1780 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1781 WREG32(SRBM_SOFT_RESET, tmp); 1782 tmp = RREG32(SRBM_SOFT_RESET); 1783 1784 udelay(50); 1785 1786 tmp &= ~srbm_soft_reset; 1787 WREG32(SRBM_SOFT_RESET, tmp); 1788 tmp = RREG32(SRBM_SOFT_RESET); 1789 } 1790 1791 /* Wait a little for things to settle down */ 1792 mdelay(1); 1793 1794 rv515_mc_resume(rdev, &save); 1795 udelay(50); 1796 1797 r600_print_gpu_status_regs(rdev); 1798 } 1799 1800 static void r600_gpu_pci_config_reset(struct radeon_device *rdev) 1801 { 1802 struct rv515_mc_save save; 1803 u32 tmp, i; 1804 1805 dev_info(rdev->dev, "GPU pci config reset\n"); 1806 1807 /* disable dpm? */ 1808 1809 /* Disable CP parsing/prefetching */ 1810 if (rdev->family >= CHIP_RV770) 1811 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1)); 1812 else 1813 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1814 1815 /* disable the RLC */ 1816 WREG32(RLC_CNTL, 0); 1817 1818 /* Disable DMA */ 1819 tmp = RREG32(DMA_RB_CNTL); 1820 tmp &= ~DMA_RB_ENABLE; 1821 WREG32(DMA_RB_CNTL, tmp); 1822 1823 mdelay(50); 1824 1825 /* set mclk/sclk to bypass */ 1826 if (rdev->family >= CHIP_RV770) 1827 rv770_set_clk_bypass_mode(rdev); 1828 /* disable BM */ 1829 pci_clear_master(rdev->pdev); 1830 /* disable mem access */ 1831 rv515_mc_stop(rdev, &save); 1832 if (r600_mc_wait_for_idle(rdev)) { 1833 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1834 } 1835 1836 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1837 tmp = RREG32(BUS_CNTL); 1838 tmp |= VGA_COHE_SPEC_TIMER_DIS; 1839 WREG32(BUS_CNTL, tmp); 1840 1841 tmp = RREG32(BIF_SCRATCH0); 1842 1843 /* reset */ 1844 radeon_pci_config_reset(rdev); 1845 mdelay(1); 1846 1847 /* BIF reset workaround. Not sure if this is needed on 6xx */ 1848 tmp = SOFT_RESET_BIF; 1849 WREG32(SRBM_SOFT_RESET, tmp); 1850 mdelay(1); 1851 WREG32(SRBM_SOFT_RESET, 0); 1852 1853 /* wait for asic to come out of reset */ 1854 for (i = 0; i < rdev->usec_timeout; i++) { 1855 if (RREG32(CONFIG_MEMSIZE) != 0xffffffff) 1856 break; 1857 udelay(1); 1858 } 1859 } 1860 1861 int r600_asic_reset(struct radeon_device *rdev) 1862 { 1863 u32 reset_mask; 1864 1865 reset_mask = r600_gpu_check_soft_reset(rdev); 1866 1867 if (reset_mask) 1868 r600_set_bios_scratch_engine_hung(rdev, true); 1869 1870 /* try soft reset */ 1871 r600_gpu_soft_reset(rdev, reset_mask); 1872 1873 reset_mask = r600_gpu_check_soft_reset(rdev); 1874 1875 /* try pci config reset */ 1876 if (reset_mask && radeon_hard_reset) 1877 r600_gpu_pci_config_reset(rdev); 1878 1879 reset_mask = r600_gpu_check_soft_reset(rdev); 1880 1881 if (!reset_mask) 1882 r600_set_bios_scratch_engine_hung(rdev, false); 1883 1884 return 0; 1885 } 1886 1887 /** 1888 * r600_gfx_is_lockup - Check if the GFX engine is locked up 1889 * 1890 * @rdev: radeon_device pointer 1891 * @ring: radeon_ring structure holding ring information 1892 * 1893 * Check if the GFX engine is locked up. 1894 * Returns true if the engine appears to be locked up, false if not. 1895 */ 1896 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1897 { 1898 u32 reset_mask = r600_gpu_check_soft_reset(rdev); 1899 1900 if (!(reset_mask & (RADEON_RESET_GFX | 1901 RADEON_RESET_COMPUTE | 1902 RADEON_RESET_CP))) { 1903 radeon_ring_lockup_update(rdev, ring); 1904 return false; 1905 } 1906 return radeon_ring_test_lockup(rdev, ring); 1907 } 1908 1909 u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1910 u32 tiling_pipe_num, 1911 u32 max_rb_num, 1912 u32 total_max_rb_num, 1913 u32 disabled_rb_mask) 1914 { 1915 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1916 u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1917 u32 data = 0, mask = 1 << (max_rb_num - 1); 1918 unsigned i, j; 1919 1920 /* mask out the RBs that don't exist on that asic */ 1921 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1922 /* make sure at least one RB is available */ 1923 if ((tmp & 0xff) != 0xff) 1924 disabled_rb_mask = tmp; 1925 1926 rendering_pipe_num = 1 << tiling_pipe_num; 1927 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1928 BUG_ON(rendering_pipe_num < req_rb_num); 1929 1930 pipe_rb_ratio = rendering_pipe_num / req_rb_num; 1931 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; 1932 1933 if (rdev->family <= CHIP_RV740) { 1934 /* r6xx/r7xx */ 1935 rb_num_width = 2; 1936 } else { 1937 /* eg+ */ 1938 rb_num_width = 4; 1939 } 1940 1941 for (i = 0; i < max_rb_num; i++) { 1942 if (!(mask & disabled_rb_mask)) { 1943 for (j = 0; j < pipe_rb_ratio; j++) { 1944 data <<= rb_num_width; 1945 data |= max_rb_num - i - 1; 1946 } 1947 if (pipe_rb_remain) { 1948 data <<= rb_num_width; 1949 data |= max_rb_num - i - 1; 1950 pipe_rb_remain--; 1951 } 1952 } 1953 mask >>= 1; 1954 } 1955 1956 return data; 1957 } 1958 1959 int r600_count_pipe_bits(uint32_t val) 1960 { 1961 return hweight32(val); 1962 } 1963 1964 static void r600_gpu_init(struct radeon_device *rdev) 1965 { 1966 u32 tiling_config; 1967 u32 ramcfg; 1968 u32 cc_gc_shader_pipe_config; 1969 u32 tmp; 1970 int i, j; 1971 u32 sq_config; 1972 u32 sq_gpr_resource_mgmt_1 = 0; 1973 u32 sq_gpr_resource_mgmt_2 = 0; 1974 u32 sq_thread_resource_mgmt = 0; 1975 u32 sq_stack_resource_mgmt_1 = 0; 1976 u32 sq_stack_resource_mgmt_2 = 0; 1977 u32 disabled_rb_mask; 1978 1979 rdev->config.r600.tiling_group_size = 256; 1980 switch (rdev->family) { 1981 case CHIP_R600: 1982 rdev->config.r600.max_pipes = 4; 1983 rdev->config.r600.max_tile_pipes = 8; 1984 rdev->config.r600.max_simds = 4; 1985 rdev->config.r600.max_backends = 4; 1986 rdev->config.r600.max_gprs = 256; 1987 rdev->config.r600.max_threads = 192; 1988 rdev->config.r600.max_stack_entries = 256; 1989 rdev->config.r600.max_hw_contexts = 8; 1990 rdev->config.r600.max_gs_threads = 16; 1991 rdev->config.r600.sx_max_export_size = 128; 1992 rdev->config.r600.sx_max_export_pos_size = 16; 1993 rdev->config.r600.sx_max_export_smx_size = 128; 1994 rdev->config.r600.sq_num_cf_insts = 2; 1995 break; 1996 case CHIP_RV630: 1997 case CHIP_RV635: 1998 rdev->config.r600.max_pipes = 2; 1999 rdev->config.r600.max_tile_pipes = 2; 2000 rdev->config.r600.max_simds = 3; 2001 rdev->config.r600.max_backends = 1; 2002 rdev->config.r600.max_gprs = 128; 2003 rdev->config.r600.max_threads = 192; 2004 rdev->config.r600.max_stack_entries = 128; 2005 rdev->config.r600.max_hw_contexts = 8; 2006 rdev->config.r600.max_gs_threads = 4; 2007 rdev->config.r600.sx_max_export_size = 128; 2008 rdev->config.r600.sx_max_export_pos_size = 16; 2009 rdev->config.r600.sx_max_export_smx_size = 128; 2010 rdev->config.r600.sq_num_cf_insts = 2; 2011 break; 2012 case CHIP_RV610: 2013 case CHIP_RV620: 2014 case CHIP_RS780: 2015 case CHIP_RS880: 2016 rdev->config.r600.max_pipes = 1; 2017 rdev->config.r600.max_tile_pipes = 1; 2018 rdev->config.r600.max_simds = 2; 2019 rdev->config.r600.max_backends = 1; 2020 rdev->config.r600.max_gprs = 128; 2021 rdev->config.r600.max_threads = 192; 2022 rdev->config.r600.max_stack_entries = 128; 2023 rdev->config.r600.max_hw_contexts = 4; 2024 rdev->config.r600.max_gs_threads = 4; 2025 rdev->config.r600.sx_max_export_size = 128; 2026 rdev->config.r600.sx_max_export_pos_size = 16; 2027 rdev->config.r600.sx_max_export_smx_size = 128; 2028 rdev->config.r600.sq_num_cf_insts = 1; 2029 break; 2030 case CHIP_RV670: 2031 rdev->config.r600.max_pipes = 4; 2032 rdev->config.r600.max_tile_pipes = 4; 2033 rdev->config.r600.max_simds = 4; 2034 rdev->config.r600.max_backends = 4; 2035 rdev->config.r600.max_gprs = 192; 2036 rdev->config.r600.max_threads = 192; 2037 rdev->config.r600.max_stack_entries = 256; 2038 rdev->config.r600.max_hw_contexts = 8; 2039 rdev->config.r600.max_gs_threads = 16; 2040 rdev->config.r600.sx_max_export_size = 128; 2041 rdev->config.r600.sx_max_export_pos_size = 16; 2042 rdev->config.r600.sx_max_export_smx_size = 128; 2043 rdev->config.r600.sq_num_cf_insts = 2; 2044 break; 2045 default: 2046 break; 2047 } 2048 2049 /* Initialize HDP */ 2050 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 2051 WREG32((0x2c14 + j), 0x00000000); 2052 WREG32((0x2c18 + j), 0x00000000); 2053 WREG32((0x2c1c + j), 0x00000000); 2054 WREG32((0x2c20 + j), 0x00000000); 2055 WREG32((0x2c24 + j), 0x00000000); 2056 } 2057 2058 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 2059 2060 /* Setup tiling */ 2061 tiling_config = 0; 2062 ramcfg = RREG32(RAMCFG); 2063 switch (rdev->config.r600.max_tile_pipes) { 2064 case 1: 2065 tiling_config |= PIPE_TILING(0); 2066 break; 2067 case 2: 2068 tiling_config |= PIPE_TILING(1); 2069 break; 2070 case 4: 2071 tiling_config |= PIPE_TILING(2); 2072 break; 2073 case 8: 2074 tiling_config |= PIPE_TILING(3); 2075 break; 2076 default: 2077 break; 2078 } 2079 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; 2080 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2081 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 2082 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 2083 2084 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 2085 if (tmp > 3) { 2086 tiling_config |= ROW_TILING(3); 2087 tiling_config |= SAMPLE_SPLIT(3); 2088 } else { 2089 tiling_config |= ROW_TILING(tmp); 2090 tiling_config |= SAMPLE_SPLIT(tmp); 2091 } 2092 tiling_config |= BANK_SWAPS(1); 2093 2094 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 2095 tmp = rdev->config.r600.max_simds - 2096 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 2097 rdev->config.r600.active_simds = tmp; 2098 2099 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 2100 tmp = 0; 2101 for (i = 0; i < rdev->config.r600.max_backends; i++) 2102 tmp |= (1 << i); 2103 /* if all the backends are disabled, fix it up here */ 2104 if ((disabled_rb_mask & tmp) == tmp) { 2105 for (i = 0; i < rdev->config.r600.max_backends; i++) 2106 disabled_rb_mask &= ~(1 << i); 2107 } 2108 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 2109 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 2110 R6XX_MAX_BACKENDS, disabled_rb_mask); 2111 tiling_config |= tmp << 16; 2112 rdev->config.r600.backend_map = tmp; 2113 2114 rdev->config.r600.tile_config = tiling_config; 2115 WREG32(GB_TILING_CONFIG, tiling_config); 2116 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 2117 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 2118 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); 2119 2120 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 2121 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 2122 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 2123 2124 /* Setup some CP states */ 2125 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); 2126 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); 2127 2128 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | 2129 SYNC_WALKER | SYNC_ALIGNER)); 2130 /* Setup various GPU states */ 2131 if (rdev->family == CHIP_RV670) 2132 WREG32(ARB_GDEC_RD_CNTL, 0x00000021); 2133 2134 tmp = RREG32(SX_DEBUG_1); 2135 tmp |= SMX_EVENT_RELEASE; 2136 if ((rdev->family > CHIP_R600)) 2137 tmp |= ENABLE_NEW_SMX_ADDRESS; 2138 WREG32(SX_DEBUG_1, tmp); 2139 2140 if (((rdev->family) == CHIP_R600) || 2141 ((rdev->family) == CHIP_RV630) || 2142 ((rdev->family) == CHIP_RV610) || 2143 ((rdev->family) == CHIP_RV620) || 2144 ((rdev->family) == CHIP_RS780) || 2145 ((rdev->family) == CHIP_RS880)) { 2146 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 2147 } else { 2148 WREG32(DB_DEBUG, 0); 2149 } 2150 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | 2151 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); 2152 2153 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2154 WREG32(VGT_NUM_INSTANCES, 0); 2155 2156 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); 2157 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); 2158 2159 tmp = RREG32(SQ_MS_FIFO_SIZES); 2160 if (((rdev->family) == CHIP_RV610) || 2161 ((rdev->family) == CHIP_RV620) || 2162 ((rdev->family) == CHIP_RS780) || 2163 ((rdev->family) == CHIP_RS880)) { 2164 tmp = (CACHE_FIFO_SIZE(0xa) | 2165 FETCH_FIFO_HIWATER(0xa) | 2166 DONE_FIFO_HIWATER(0xe0) | 2167 ALU_UPDATE_FIFO_HIWATER(0x8)); 2168 } else if (((rdev->family) == CHIP_R600) || 2169 ((rdev->family) == CHIP_RV630)) { 2170 tmp &= ~DONE_FIFO_HIWATER(0xff); 2171 tmp |= DONE_FIFO_HIWATER(0x4); 2172 } 2173 WREG32(SQ_MS_FIFO_SIZES, tmp); 2174 2175 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 2176 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 2177 */ 2178 sq_config = RREG32(SQ_CONFIG); 2179 sq_config &= ~(PS_PRIO(3) | 2180 VS_PRIO(3) | 2181 GS_PRIO(3) | 2182 ES_PRIO(3)); 2183 sq_config |= (DX9_CONSTS | 2184 VC_ENABLE | 2185 PS_PRIO(0) | 2186 VS_PRIO(1) | 2187 GS_PRIO(2) | 2188 ES_PRIO(3)); 2189 2190 if ((rdev->family) == CHIP_R600) { 2191 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | 2192 NUM_VS_GPRS(124) | 2193 NUM_CLAUSE_TEMP_GPRS(4)); 2194 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | 2195 NUM_ES_GPRS(0)); 2196 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | 2197 NUM_VS_THREADS(48) | 2198 NUM_GS_THREADS(4) | 2199 NUM_ES_THREADS(4)); 2200 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | 2201 NUM_VS_STACK_ENTRIES(128)); 2202 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | 2203 NUM_ES_STACK_ENTRIES(0)); 2204 } else if (((rdev->family) == CHIP_RV610) || 2205 ((rdev->family) == CHIP_RV620) || 2206 ((rdev->family) == CHIP_RS780) || 2207 ((rdev->family) == CHIP_RS880)) { 2208 /* no vertex cache */ 2209 sq_config &= ~VC_ENABLE; 2210 2211 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2212 NUM_VS_GPRS(44) | 2213 NUM_CLAUSE_TEMP_GPRS(2)); 2214 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2215 NUM_ES_GPRS(17)); 2216 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2217 NUM_VS_THREADS(78) | 2218 NUM_GS_THREADS(4) | 2219 NUM_ES_THREADS(31)); 2220 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2221 NUM_VS_STACK_ENTRIES(40)); 2222 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2223 NUM_ES_STACK_ENTRIES(16)); 2224 } else if (((rdev->family) == CHIP_RV630) || 2225 ((rdev->family) == CHIP_RV635)) { 2226 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2227 NUM_VS_GPRS(44) | 2228 NUM_CLAUSE_TEMP_GPRS(2)); 2229 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | 2230 NUM_ES_GPRS(18)); 2231 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2232 NUM_VS_THREADS(78) | 2233 NUM_GS_THREADS(4) | 2234 NUM_ES_THREADS(31)); 2235 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 2236 NUM_VS_STACK_ENTRIES(40)); 2237 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 2238 NUM_ES_STACK_ENTRIES(16)); 2239 } else if ((rdev->family) == CHIP_RV670) { 2240 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 2241 NUM_VS_GPRS(44) | 2242 NUM_CLAUSE_TEMP_GPRS(2)); 2243 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 2244 NUM_ES_GPRS(17)); 2245 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 2246 NUM_VS_THREADS(78) | 2247 NUM_GS_THREADS(4) | 2248 NUM_ES_THREADS(31)); 2249 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | 2250 NUM_VS_STACK_ENTRIES(64)); 2251 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | 2252 NUM_ES_STACK_ENTRIES(64)); 2253 } 2254 2255 WREG32(SQ_CONFIG, sq_config); 2256 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 2257 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 2258 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 2259 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 2260 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 2261 2262 if (((rdev->family) == CHIP_RV610) || 2263 ((rdev->family) == CHIP_RV620) || 2264 ((rdev->family) == CHIP_RS780) || 2265 ((rdev->family) == CHIP_RS880)) { 2266 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 2267 } else { 2268 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 2269 } 2270 2271 /* More default values. 2D/3D driver should adjust as needed */ 2272 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | 2273 S1_X(0x4) | S1_Y(0xc))); 2274 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | 2275 S1_X(0x2) | S1_Y(0x2) | 2276 S2_X(0xa) | S2_Y(0x6) | 2277 S3_X(0x6) | S3_Y(0xa))); 2278 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | 2279 S1_X(0x4) | S1_Y(0xc) | 2280 S2_X(0x1) | S2_Y(0x6) | 2281 S3_X(0xa) | S3_Y(0xe))); 2282 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | 2283 S5_X(0x0) | S5_Y(0x0) | 2284 S6_X(0xb) | S6_Y(0x4) | 2285 S7_X(0x7) | S7_Y(0x8))); 2286 2287 WREG32(VGT_STRMOUT_EN, 0); 2288 tmp = rdev->config.r600.max_pipes * 16; 2289 switch (rdev->family) { 2290 case CHIP_RV610: 2291 case CHIP_RV620: 2292 case CHIP_RS780: 2293 case CHIP_RS880: 2294 tmp += 32; 2295 break; 2296 case CHIP_RV670: 2297 tmp += 128; 2298 break; 2299 default: 2300 break; 2301 } 2302 if (tmp > 256) { 2303 tmp = 256; 2304 } 2305 WREG32(VGT_ES_PER_GS, 128); 2306 WREG32(VGT_GS_PER_ES, tmp); 2307 WREG32(VGT_GS_PER_VS, 2); 2308 WREG32(VGT_GS_VERTEX_REUSE, 16); 2309 2310 /* more default values. 2D/3D driver should adjust as needed */ 2311 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 2312 WREG32(VGT_STRMOUT_EN, 0); 2313 WREG32(SX_MISC, 0); 2314 WREG32(PA_SC_MODE_CNTL, 0); 2315 WREG32(PA_SC_AA_CONFIG, 0); 2316 WREG32(PA_SC_LINE_STIPPLE, 0); 2317 WREG32(SPI_INPUT_Z, 0); 2318 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 2319 WREG32(CB_COLOR7_FRAG, 0); 2320 2321 /* Clear render buffer base addresses */ 2322 WREG32(CB_COLOR0_BASE, 0); 2323 WREG32(CB_COLOR1_BASE, 0); 2324 WREG32(CB_COLOR2_BASE, 0); 2325 WREG32(CB_COLOR3_BASE, 0); 2326 WREG32(CB_COLOR4_BASE, 0); 2327 WREG32(CB_COLOR5_BASE, 0); 2328 WREG32(CB_COLOR6_BASE, 0); 2329 WREG32(CB_COLOR7_BASE, 0); 2330 WREG32(CB_COLOR7_FRAG, 0); 2331 2332 switch (rdev->family) { 2333 case CHIP_RV610: 2334 case CHIP_RV620: 2335 case CHIP_RS780: 2336 case CHIP_RS880: 2337 tmp = TC_L2_SIZE(8); 2338 break; 2339 case CHIP_RV630: 2340 case CHIP_RV635: 2341 tmp = TC_L2_SIZE(4); 2342 break; 2343 case CHIP_R600: 2344 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; 2345 break; 2346 default: 2347 tmp = TC_L2_SIZE(0); 2348 break; 2349 } 2350 WREG32(TC_CNTL, tmp); 2351 2352 tmp = RREG32(HDP_HOST_PATH_CNTL); 2353 WREG32(HDP_HOST_PATH_CNTL, tmp); 2354 2355 tmp = RREG32(ARB_POP); 2356 tmp |= ENABLE_TC128; 2357 WREG32(ARB_POP, tmp); 2358 2359 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 2360 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 2361 NUM_CLIP_SEQ(3))); 2362 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 2363 WREG32(VC_ENHANCE, 0); 2364 } 2365 2366 2367 /* 2368 * Indirect registers accessor 2369 */ 2370 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 2371 { 2372 u32 r; 2373 2374 spin_lock(&rdev->pciep_idx_lock); 2375 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2376 (void)RREG32(PCIE_PORT_INDEX); 2377 r = RREG32(PCIE_PORT_DATA); 2378 spin_unlock(&rdev->pciep_idx_lock); 2379 return r; 2380 } 2381 2382 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 2383 { 2384 spin_lock(&rdev->pciep_idx_lock); 2385 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 2386 (void)RREG32(PCIE_PORT_INDEX); 2387 WREG32(PCIE_PORT_DATA, (v)); 2388 (void)RREG32(PCIE_PORT_DATA); 2389 spin_unlock(&rdev->pciep_idx_lock); 2390 } 2391 2392 /* 2393 * CP & Ring 2394 */ 2395 void r600_cp_stop(struct radeon_device *rdev) 2396 { 2397 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2398 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2399 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 2400 WREG32(SCRATCH_UMSK, 0); 2401 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2402 } 2403 2404 int r600_init_microcode(struct radeon_device *rdev) 2405 { 2406 const char *chip_name; 2407 const char *rlc_chip_name; 2408 const char *smc_chip_name = "RV770"; 2409 size_t pfp_req_size, me_req_size, rlc_req_size, smc_req_size = 0; 2410 char fw_name[30]; 2411 int err; 2412 2413 DRM_DEBUG("\n"); 2414 2415 switch (rdev->family) { 2416 case CHIP_R600: 2417 chip_name = "R600"; 2418 rlc_chip_name = "R600"; 2419 break; 2420 case CHIP_RV610: 2421 chip_name = "RV610"; 2422 rlc_chip_name = "R600"; 2423 break; 2424 case CHIP_RV630: 2425 chip_name = "RV630"; 2426 rlc_chip_name = "R600"; 2427 break; 2428 case CHIP_RV620: 2429 chip_name = "RV620"; 2430 rlc_chip_name = "R600"; 2431 break; 2432 case CHIP_RV635: 2433 chip_name = "RV635"; 2434 rlc_chip_name = "R600"; 2435 break; 2436 case CHIP_RV670: 2437 chip_name = "RV670"; 2438 rlc_chip_name = "R600"; 2439 break; 2440 case CHIP_RS780: 2441 case CHIP_RS880: 2442 chip_name = "RS780"; 2443 rlc_chip_name = "R600"; 2444 break; 2445 case CHIP_RV770: 2446 chip_name = "RV770"; 2447 rlc_chip_name = "R700"; 2448 smc_chip_name = "RV770"; 2449 smc_req_size = ALIGN(RV770_SMC_UCODE_SIZE, 4); 2450 break; 2451 case CHIP_RV730: 2452 chip_name = "RV730"; 2453 rlc_chip_name = "R700"; 2454 smc_chip_name = "RV730"; 2455 smc_req_size = ALIGN(RV730_SMC_UCODE_SIZE, 4); 2456 break; 2457 case CHIP_RV710: 2458 chip_name = "RV710"; 2459 rlc_chip_name = "R700"; 2460 smc_chip_name = "RV710"; 2461 smc_req_size = ALIGN(RV710_SMC_UCODE_SIZE, 4); 2462 break; 2463 case CHIP_RV740: 2464 chip_name = "RV730"; 2465 rlc_chip_name = "R700"; 2466 smc_chip_name = "RV740"; 2467 smc_req_size = ALIGN(RV740_SMC_UCODE_SIZE, 4); 2468 break; 2469 case CHIP_CEDAR: 2470 chip_name = "CEDAR"; 2471 rlc_chip_name = "CEDAR"; 2472 smc_chip_name = "CEDAR"; 2473 smc_req_size = ALIGN(CEDAR_SMC_UCODE_SIZE, 4); 2474 break; 2475 case CHIP_REDWOOD: 2476 chip_name = "REDWOOD"; 2477 rlc_chip_name = "REDWOOD"; 2478 smc_chip_name = "REDWOOD"; 2479 smc_req_size = ALIGN(REDWOOD_SMC_UCODE_SIZE, 4); 2480 break; 2481 case CHIP_JUNIPER: 2482 chip_name = "JUNIPER"; 2483 rlc_chip_name = "JUNIPER"; 2484 smc_chip_name = "JUNIPER"; 2485 smc_req_size = ALIGN(JUNIPER_SMC_UCODE_SIZE, 4); 2486 break; 2487 case CHIP_CYPRESS: 2488 case CHIP_HEMLOCK: 2489 chip_name = "CYPRESS"; 2490 rlc_chip_name = "CYPRESS"; 2491 smc_chip_name = "CYPRESS"; 2492 smc_req_size = ALIGN(CYPRESS_SMC_UCODE_SIZE, 4); 2493 break; 2494 case CHIP_PALM: 2495 chip_name = "PALM"; 2496 rlc_chip_name = "SUMO"; 2497 break; 2498 case CHIP_SUMO: 2499 chip_name = "SUMO"; 2500 rlc_chip_name = "SUMO"; 2501 break; 2502 case CHIP_SUMO2: 2503 chip_name = "SUMO2"; 2504 rlc_chip_name = "SUMO"; 2505 break; 2506 default: BUG(); 2507 } 2508 2509 if (rdev->family >= CHIP_CEDAR) { 2510 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 2511 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 2512 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 2513 } else if (rdev->family >= CHIP_RV770) { 2514 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 2515 me_req_size = R700_PM4_UCODE_SIZE * 4; 2516 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2517 } else { 2518 pfp_req_size = R600_PFP_UCODE_SIZE * 4; 2519 me_req_size = R600_PM4_UCODE_SIZE * 12; 2520 rlc_req_size = R600_RLC_UCODE_SIZE * 4; 2521 } 2522 2523 DRM_INFO("Loading %s Microcode\n", chip_name); 2524 2525 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name); 2526 err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev); 2527 if (err) 2528 goto out; 2529 if (rdev->pfp_fw->datasize != pfp_req_size) { 2530 printk(KERN_ERR 2531 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2532 rdev->pfp_fw->datasize, fw_name); 2533 err = -EINVAL; 2534 goto out; 2535 } 2536 2537 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name); 2538 err = request_firmware(&rdev->me_fw, fw_name, rdev->dev); 2539 if (err) 2540 goto out; 2541 if (rdev->me_fw->datasize != me_req_size) { 2542 printk(KERN_ERR 2543 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2544 rdev->me_fw->datasize, fw_name); 2545 err = -EINVAL; 2546 } 2547 2548 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", rlc_chip_name); 2549 err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev); 2550 if (err) 2551 goto out; 2552 if (rdev->rlc_fw->datasize != rlc_req_size) { 2553 printk(KERN_ERR 2554 "r600_rlc: Bogus length %zu in firmware \"%s\"\n", 2555 rdev->rlc_fw->datasize, fw_name); 2556 err = -EINVAL; 2557 } 2558 2559 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { 2560 ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", smc_chip_name); 2561 2562 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2563 if (err) { 2564 printk(KERN_ERR 2565 "smc: error loading firmware \"%s\"\n", 2566 fw_name); 2567 release_firmware(rdev->smc_fw); 2568 rdev->smc_fw = NULL; 2569 err = 0; 2570 } else if (rdev->smc_fw->datasize != smc_req_size) { 2571 printk(KERN_ERR 2572 "smc: Bogus length %zu in firmware \"%s\"\n", 2573 rdev->smc_fw->datasize, fw_name); 2574 err = -EINVAL; 2575 } 2576 } 2577 2578 out: 2579 if (err) { 2580 if (err != -EINVAL) 2581 printk(KERN_ERR 2582 "r600_cp: Failed to load firmware \"%s\"\n", 2583 fw_name); 2584 release_firmware(rdev->pfp_fw); 2585 rdev->pfp_fw = NULL; 2586 release_firmware(rdev->me_fw); 2587 rdev->me_fw = NULL; 2588 release_firmware(rdev->rlc_fw); 2589 rdev->rlc_fw = NULL; 2590 release_firmware(rdev->smc_fw); 2591 rdev->smc_fw = NULL; 2592 } 2593 return err; 2594 } 2595 2596 u32 r600_gfx_get_rptr(struct radeon_device *rdev, 2597 struct radeon_ring *ring) 2598 { 2599 u32 rptr; 2600 2601 if (rdev->wb.enabled) 2602 rptr = rdev->wb.wb[ring->rptr_offs/4]; 2603 else 2604 rptr = RREG32(R600_CP_RB_RPTR); 2605 2606 return rptr; 2607 } 2608 2609 u32 r600_gfx_get_wptr(struct radeon_device *rdev, 2610 struct radeon_ring *ring) 2611 { 2612 u32 wptr; 2613 2614 wptr = RREG32(R600_CP_RB_WPTR); 2615 2616 return wptr; 2617 } 2618 2619 void r600_gfx_set_wptr(struct radeon_device *rdev, 2620 struct radeon_ring *ring) 2621 { 2622 WREG32(R600_CP_RB_WPTR, ring->wptr); 2623 (void)RREG32(R600_CP_RB_WPTR); 2624 } 2625 2626 /** 2627 * r600_fini_microcode - drop the firmwares image references 2628 * 2629 * @rdev: radeon_device pointer 2630 * 2631 * Drop the pfp, me and rlc firmwares image references. 2632 * Called at driver shutdown. 2633 */ 2634 void r600_fini_microcode(struct radeon_device *rdev) 2635 { 2636 release_firmware(rdev->pfp_fw); 2637 rdev->pfp_fw = NULL; 2638 release_firmware(rdev->me_fw); 2639 rdev->me_fw = NULL; 2640 release_firmware(rdev->rlc_fw); 2641 rdev->rlc_fw = NULL; 2642 release_firmware(rdev->smc_fw); 2643 rdev->smc_fw = NULL; 2644 } 2645 2646 static int r600_cp_load_microcode(struct radeon_device *rdev) 2647 { 2648 const __be32 *fw_data; 2649 int i; 2650 2651 if (!rdev->me_fw || !rdev->pfp_fw) 2652 return -EINVAL; 2653 2654 r600_cp_stop(rdev); 2655 2656 WREG32(CP_RB_CNTL, 2657 #ifdef __BIG_ENDIAN 2658 BUF_SWAP_32BIT | 2659 #endif 2660 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2661 2662 /* Reset cp */ 2663 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2664 RREG32(GRBM_SOFT_RESET); 2665 mdelay(15); 2666 WREG32(GRBM_SOFT_RESET, 0); 2667 2668 WREG32(CP_ME_RAM_WADDR, 0); 2669 2670 fw_data = (const __be32 *)rdev->me_fw->data; 2671 WREG32(CP_ME_RAM_WADDR, 0); 2672 for (i = 0; i < R600_PM4_UCODE_SIZE * 3; i++) 2673 WREG32(CP_ME_RAM_DATA, 2674 be32_to_cpup(fw_data++)); 2675 2676 fw_data = (const __be32 *)rdev->pfp_fw->data; 2677 WREG32(CP_PFP_UCODE_ADDR, 0); 2678 for (i = 0; i < R600_PFP_UCODE_SIZE; i++) 2679 WREG32(CP_PFP_UCODE_DATA, 2680 be32_to_cpup(fw_data++)); 2681 2682 WREG32(CP_PFP_UCODE_ADDR, 0); 2683 WREG32(CP_ME_RAM_WADDR, 0); 2684 WREG32(CP_ME_RAM_RADDR, 0); 2685 return 0; 2686 } 2687 2688 int r600_cp_start(struct radeon_device *rdev) 2689 { 2690 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2691 int r; 2692 uint32_t cp_me; 2693 2694 r = radeon_ring_lock(rdev, ring, 7); 2695 if (r) { 2696 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2697 return r; 2698 } 2699 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2700 radeon_ring_write(ring, 0x1); 2701 if (rdev->family >= CHIP_RV770) { 2702 radeon_ring_write(ring, 0x0); 2703 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); 2704 } else { 2705 radeon_ring_write(ring, 0x3); 2706 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); 2707 } 2708 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2709 radeon_ring_write(ring, 0); 2710 radeon_ring_write(ring, 0); 2711 radeon_ring_unlock_commit(rdev, ring, false); 2712 2713 cp_me = 0xff; 2714 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2715 return 0; 2716 } 2717 2718 int r600_cp_resume(struct radeon_device *rdev) 2719 { 2720 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2721 u32 tmp; 2722 u32 rb_bufsz; 2723 int r; 2724 2725 /* Reset cp */ 2726 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2727 RREG32(GRBM_SOFT_RESET); 2728 mdelay(15); 2729 WREG32(GRBM_SOFT_RESET, 0); 2730 2731 /* Set ring buffer size */ 2732 rb_bufsz = order_base_2(ring->ring_size / 8); 2733 tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2734 #ifdef __BIG_ENDIAN 2735 tmp |= BUF_SWAP_32BIT; 2736 #endif 2737 WREG32(CP_RB_CNTL, tmp); 2738 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2739 2740 /* Set the write pointer delay */ 2741 WREG32(CP_RB_WPTR_DELAY, 0); 2742 2743 /* Initialize the ring buffer's read and write pointers */ 2744 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2745 WREG32(CP_RB_RPTR_WR, 0); 2746 ring->wptr = 0; 2747 WREG32(CP_RB_WPTR, ring->wptr); 2748 2749 /* set the wb address whether it's enabled or not */ 2750 WREG32(CP_RB_RPTR_ADDR, 2751 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2752 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2753 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2754 2755 if (rdev->wb.enabled) 2756 WREG32(SCRATCH_UMSK, 0xff); 2757 else { 2758 tmp |= RB_NO_UPDATE; 2759 WREG32(SCRATCH_UMSK, 0); 2760 } 2761 2762 mdelay(1); 2763 WREG32(CP_RB_CNTL, tmp); 2764 2765 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2766 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2767 2768 r600_cp_start(rdev); 2769 ring->ready = true; 2770 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2771 if (r) { 2772 ring->ready = false; 2773 return r; 2774 } 2775 2776 if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX) 2777 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 2778 2779 return 0; 2780 } 2781 2782 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) 2783 { 2784 u32 rb_bufsz; 2785 int r; 2786 2787 /* Align ring size */ 2788 rb_bufsz = order_base_2(ring_size / 8); 2789 ring_size = (1 << (rb_bufsz + 1)) * 4; 2790 ring->ring_size = ring_size; 2791 ring->align_mask = 16 - 1; 2792 2793 if (radeon_ring_supports_scratch_reg(rdev, ring)) { 2794 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 2795 if (r) { 2796 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 2797 ring->rptr_save_reg = 0; 2798 } 2799 } 2800 } 2801 2802 void r600_cp_fini(struct radeon_device *rdev) 2803 { 2804 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2805 r600_cp_stop(rdev); 2806 radeon_ring_fini(rdev, ring); 2807 radeon_scratch_free(rdev, ring->rptr_save_reg); 2808 } 2809 2810 /* 2811 * GPU scratch registers helpers function. 2812 */ 2813 void r600_scratch_init(struct radeon_device *rdev) 2814 { 2815 int i; 2816 2817 rdev->scratch.num_reg = 7; 2818 rdev->scratch.reg_base = SCRATCH_REG0; 2819 for (i = 0; i < rdev->scratch.num_reg; i++) { 2820 rdev->scratch.free[i] = true; 2821 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 2822 } 2823 } 2824 2825 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2826 { 2827 uint32_t scratch; 2828 uint32_t tmp = 0; 2829 unsigned i; 2830 int r; 2831 2832 r = radeon_scratch_get(rdev, &scratch); 2833 if (r) { 2834 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2835 return r; 2836 } 2837 WREG32(scratch, 0xCAFEDEAD); 2838 r = radeon_ring_lock(rdev, ring, 3); 2839 if (r) { 2840 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 2841 radeon_scratch_free(rdev, scratch); 2842 return r; 2843 } 2844 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2845 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2846 radeon_ring_write(ring, 0xDEADBEEF); 2847 radeon_ring_unlock_commit(rdev, ring, false); 2848 for (i = 0; i < rdev->usec_timeout; i++) { 2849 tmp = RREG32(scratch); 2850 if (tmp == 0xDEADBEEF) 2851 break; 2852 DRM_UDELAY(1); 2853 } 2854 if (i < rdev->usec_timeout) { 2855 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2856 } else { 2857 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 2858 ring->idx, scratch, tmp); 2859 r = -EINVAL; 2860 } 2861 radeon_scratch_free(rdev, scratch); 2862 return r; 2863 } 2864 2865 /* 2866 * CP fences/semaphores 2867 */ 2868 2869 void r600_fence_ring_emit(struct radeon_device *rdev, 2870 struct radeon_fence *fence) 2871 { 2872 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2873 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | 2874 PACKET3_SH_ACTION_ENA; 2875 2876 if (rdev->family >= CHIP_RV770) 2877 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; 2878 2879 if (rdev->wb.use_event) { 2880 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2881 /* flush read cache over gart */ 2882 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2883 radeon_ring_write(ring, cp_coher_cntl); 2884 radeon_ring_write(ring, 0xFFFFFFFF); 2885 radeon_ring_write(ring, 0); 2886 radeon_ring_write(ring, 10); /* poll interval */ 2887 /* EVENT_WRITE_EOP - flush caches, send int */ 2888 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2889 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2890 radeon_ring_write(ring, lower_32_bits(addr)); 2891 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2892 radeon_ring_write(ring, fence->seq); 2893 radeon_ring_write(ring, 0); 2894 } else { 2895 /* flush read cache over gart */ 2896 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2897 radeon_ring_write(ring, cp_coher_cntl); 2898 radeon_ring_write(ring, 0xFFFFFFFF); 2899 radeon_ring_write(ring, 0); 2900 radeon_ring_write(ring, 10); /* poll interval */ 2901 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2902 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2903 /* wait for 3D idle clean */ 2904 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2905 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2906 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2907 /* Emit fence sequence & fire IRQ */ 2908 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2909 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2910 radeon_ring_write(ring, fence->seq); 2911 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2912 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); 2913 radeon_ring_write(ring, RB_INT_STAT); 2914 } 2915 } 2916 2917 /** 2918 * r600_semaphore_ring_emit - emit a semaphore on the CP ring 2919 * 2920 * @rdev: radeon_device pointer 2921 * @ring: radeon ring buffer object 2922 * @semaphore: radeon semaphore object 2923 * @emit_wait: Is this a sempahore wait? 2924 * 2925 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP 2926 * from running ahead of semaphore waits. 2927 */ 2928 bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2929 struct radeon_ring *ring, 2930 struct radeon_semaphore *semaphore, 2931 bool emit_wait) 2932 { 2933 uint64_t addr = semaphore->gpu_addr; 2934 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2935 2936 if (rdev->family < CHIP_CAYMAN) 2937 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2938 2939 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2940 radeon_ring_write(ring, lower_32_bits(addr)); 2941 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2942 2943 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ 2944 if (emit_wait && (rdev->family >= CHIP_CEDAR)) { 2945 /* Prevent the PFP from running ahead of the semaphore wait */ 2946 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2947 radeon_ring_write(ring, 0x0); 2948 } 2949 2950 return true; 2951 } 2952 2953 /** 2954 * r600_copy_cpdma - copy pages using the CP DMA engine 2955 * 2956 * @rdev: radeon_device pointer 2957 * @src_offset: src GPU address 2958 * @dst_offset: dst GPU address 2959 * @num_gpu_pages: number of GPU pages to xfer 2960 * @fence: radeon fence object 2961 * 2962 * Copy GPU paging using the CP DMA engine (r6xx+). 2963 * Used by the radeon ttm implementation to move pages if 2964 * registered as the asic copy callback. 2965 */ 2966 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, 2967 uint64_t src_offset, uint64_t dst_offset, 2968 unsigned num_gpu_pages, 2969 struct reservation_object *resv) 2970 { 2971 struct radeon_fence *fence; 2972 struct radeon_sync sync; 2973 int ring_index = rdev->asic->copy.blit_ring_index; 2974 struct radeon_ring *ring = &rdev->ring[ring_index]; 2975 u32 size_in_bytes, cur_size_in_bytes, tmp; 2976 int i, num_loops; 2977 int r = 0; 2978 2979 radeon_sync_create(&sync); 2980 2981 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 2982 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 2983 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); 2984 if (r) { 2985 DRM_ERROR("radeon: moving bo (%d).\n", r); 2986 radeon_sync_free(rdev, &sync, NULL); 2987 return ERR_PTR(r); 2988 } 2989 2990 radeon_sync_resv(rdev, &sync, resv, false); 2991 radeon_sync_rings(rdev, &sync, ring->idx); 2992 2993 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2994 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2995 radeon_ring_write(ring, WAIT_3D_IDLE_bit); 2996 for (i = 0; i < num_loops; i++) { 2997 cur_size_in_bytes = size_in_bytes; 2998 if (cur_size_in_bytes > 0x1fffff) 2999 cur_size_in_bytes = 0x1fffff; 3000 size_in_bytes -= cur_size_in_bytes; 3001 tmp = upper_32_bits(src_offset) & 0xff; 3002 if (size_in_bytes == 0) 3003 tmp |= PACKET3_CP_DMA_CP_SYNC; 3004 radeon_ring_write(ring, PACKET3(PACKET3_CP_DMA, 4)); 3005 radeon_ring_write(ring, lower_32_bits(src_offset)); 3006 radeon_ring_write(ring, tmp); 3007 radeon_ring_write(ring, lower_32_bits(dst_offset)); 3008 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 3009 radeon_ring_write(ring, cur_size_in_bytes); 3010 src_offset += cur_size_in_bytes; 3011 dst_offset += cur_size_in_bytes; 3012 } 3013 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3014 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3015 radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); 3016 3017 r = radeon_fence_emit(rdev, &fence, ring->idx); 3018 if (r) { 3019 radeon_ring_unlock_undo(rdev, ring); 3020 radeon_sync_free(rdev, &sync, NULL); 3021 return ERR_PTR(r); 3022 } 3023 3024 radeon_ring_unlock_commit(rdev, ring, false); 3025 radeon_sync_free(rdev, &sync, fence); 3026 3027 return fence; 3028 } 3029 3030 int r600_set_surface_reg(struct radeon_device *rdev, int reg, 3031 uint32_t tiling_flags, uint32_t pitch, 3032 uint32_t offset, uint32_t obj_size) 3033 { 3034 /* FIXME: implement */ 3035 return 0; 3036 } 3037 3038 void r600_clear_surface_reg(struct radeon_device *rdev, int reg) 3039 { 3040 /* FIXME: implement */ 3041 } 3042 3043 static int r600_startup(struct radeon_device *rdev) 3044 { 3045 struct radeon_ring *ring; 3046 int r; 3047 3048 /* enable pcie gen2 link */ 3049 r600_pcie_gen2_enable(rdev); 3050 3051 /* scratch needs to be initialized before MC */ 3052 r = r600_vram_scratch_init(rdev); 3053 if (r) 3054 return r; 3055 3056 r600_mc_program(rdev); 3057 3058 if (rdev->flags & RADEON_IS_AGP) { 3059 r600_agp_enable(rdev); 3060 } else { 3061 r = r600_pcie_gart_enable(rdev); 3062 if (r) 3063 return r; 3064 } 3065 r600_gpu_init(rdev); 3066 3067 /* allocate wb buffer */ 3068 r = radeon_wb_init(rdev); 3069 if (r) 3070 return r; 3071 3072 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 3073 if (r) { 3074 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 3075 return r; 3076 } 3077 3078 if (rdev->has_uvd) { 3079 r = uvd_v1_0_resume(rdev); 3080 if (!r) { 3081 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); 3082 if (r) { 3083 dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); 3084 } 3085 } 3086 if (r) 3087 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; 3088 } 3089 3090 /* Enable IRQ */ 3091 if (!rdev->irq.installed) { 3092 r = radeon_irq_kms_init(rdev); 3093 if (r) 3094 return r; 3095 } 3096 3097 r = r600_irq_init(rdev); 3098 if (r) { 3099 DRM_ERROR("radeon: IH init failed (%d).\n", r); 3100 radeon_irq_kms_fini(rdev); 3101 return r; 3102 } 3103 r600_irq_set(rdev); 3104 3105 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3106 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 3107 RADEON_CP_PACKET2); 3108 if (r) 3109 return r; 3110 3111 r = r600_cp_load_microcode(rdev); 3112 if (r) 3113 return r; 3114 r = r600_cp_resume(rdev); 3115 if (r) 3116 return r; 3117 3118 if (rdev->has_uvd) { 3119 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 3120 if (ring->ring_size) { 3121 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 3122 RADEON_CP_PACKET2); 3123 if (!r) 3124 r = uvd_v1_0_init(rdev); 3125 if (r) 3126 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 3127 } 3128 } 3129 3130 r = radeon_ib_pool_init(rdev); 3131 if (r) { 3132 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 3133 return r; 3134 } 3135 3136 r = radeon_audio_init(rdev); 3137 if (r) { 3138 DRM_ERROR("radeon: audio init failed\n"); 3139 return r; 3140 } 3141 3142 return 0; 3143 } 3144 3145 void r600_vga_set_state(struct radeon_device *rdev, bool state) 3146 { 3147 uint32_t temp; 3148 3149 temp = RREG32(CONFIG_CNTL); 3150 if (state == false) { 3151 temp &= ~(1<<0); 3152 temp |= (1<<1); 3153 } else { 3154 temp &= ~(1<<1); 3155 } 3156 WREG32(CONFIG_CNTL, temp); 3157 } 3158 3159 int r600_resume(struct radeon_device *rdev) 3160 { 3161 int r; 3162 3163 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, 3164 * posting will perform necessary task to bring back GPU into good 3165 * shape. 3166 */ 3167 /* post card */ 3168 atom_asic_init(rdev->mode_info.atom_context); 3169 3170 if (rdev->pm.pm_method == PM_METHOD_DPM) 3171 radeon_pm_resume(rdev); 3172 3173 rdev->accel_working = true; 3174 r = r600_startup(rdev); 3175 if (r) { 3176 DRM_ERROR("r600 startup failed on resume\n"); 3177 rdev->accel_working = false; 3178 return r; 3179 } 3180 3181 return r; 3182 } 3183 3184 int r600_suspend(struct radeon_device *rdev) 3185 { 3186 radeon_pm_suspend(rdev); 3187 radeon_audio_fini(rdev); 3188 r600_cp_stop(rdev); 3189 if (rdev->has_uvd) { 3190 uvd_v1_0_fini(rdev); 3191 radeon_uvd_suspend(rdev); 3192 } 3193 r600_irq_suspend(rdev); 3194 radeon_wb_disable(rdev); 3195 r600_pcie_gart_disable(rdev); 3196 3197 return 0; 3198 } 3199 3200 /* Plan is to move initialization in that function and use 3201 * helper function so that radeon_device_init pretty much 3202 * do nothing more than calling asic specific function. This 3203 * should also allow to remove a bunch of callback function 3204 * like vram_info. 3205 */ 3206 int r600_init(struct radeon_device *rdev) 3207 { 3208 int r; 3209 3210 if (r600_debugfs_mc_info_init(rdev)) { 3211 DRM_ERROR("Failed to register debugfs file for mc !\n"); 3212 } 3213 /* Read BIOS */ 3214 if (!radeon_get_bios(rdev)) { 3215 if (ASIC_IS_AVIVO(rdev)) 3216 return -EINVAL; 3217 } 3218 /* Must be an ATOMBIOS */ 3219 if (!rdev->is_atom_bios) { 3220 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 3221 return -EINVAL; 3222 } 3223 r = radeon_atombios_init(rdev); 3224 if (r) 3225 return r; 3226 /* Post card if necessary */ 3227 if (!radeon_card_posted(rdev)) { 3228 if (!rdev->bios) { 3229 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 3230 return -EINVAL; 3231 } 3232 DRM_INFO("GPU not posted. posting now...\n"); 3233 atom_asic_init(rdev->mode_info.atom_context); 3234 } 3235 /* Initialize scratch registers */ 3236 r600_scratch_init(rdev); 3237 /* Initialize surface registers */ 3238 radeon_surface_init(rdev); 3239 /* Initialize clocks */ 3240 radeon_get_clock_info(rdev->ddev); 3241 /* Fence driver */ 3242 r = radeon_fence_driver_init(rdev); 3243 if (r) 3244 return r; 3245 if (rdev->flags & RADEON_IS_AGP) { 3246 r = radeon_agp_init(rdev); 3247 if (r) 3248 radeon_agp_disable(rdev); 3249 } 3250 r = r600_mc_init(rdev); 3251 if (r) 3252 return r; 3253 /* Memory manager */ 3254 r = radeon_bo_init(rdev); 3255 if (r) 3256 return r; 3257 3258 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 3259 r = r600_init_microcode(rdev); 3260 if (r) { 3261 DRM_ERROR("Failed to load firmware!\n"); 3262 return r; 3263 } 3264 } 3265 3266 /* Initialize power management */ 3267 radeon_pm_init(rdev); 3268 3269 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3270 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3271 3272 if (rdev->has_uvd) { 3273 r = radeon_uvd_init(rdev); 3274 if (!r) { 3275 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; 3276 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); 3277 } 3278 } 3279 3280 rdev->ih.ring_obj = NULL; 3281 r600_ih_ring_init(rdev, 64 * 1024); 3282 3283 r = r600_pcie_gart_init(rdev); 3284 if (r) 3285 return r; 3286 3287 rdev->accel_working = true; 3288 r = r600_startup(rdev); 3289 if (r) { 3290 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3291 r600_cp_fini(rdev); 3292 r600_irq_fini(rdev); 3293 radeon_wb_fini(rdev); 3294 radeon_ib_pool_fini(rdev); 3295 radeon_irq_kms_fini(rdev); 3296 r600_pcie_gart_fini(rdev); 3297 rdev->accel_working = false; 3298 } 3299 3300 return 0; 3301 } 3302 3303 void r600_fini(struct radeon_device *rdev) 3304 { 3305 radeon_pm_fini(rdev); 3306 radeon_audio_fini(rdev); 3307 r600_cp_fini(rdev); 3308 r600_irq_fini(rdev); 3309 if (rdev->has_uvd) { 3310 uvd_v1_0_fini(rdev); 3311 radeon_uvd_fini(rdev); 3312 } 3313 radeon_wb_fini(rdev); 3314 radeon_ib_pool_fini(rdev); 3315 radeon_irq_kms_fini(rdev); 3316 r600_pcie_gart_fini(rdev); 3317 r600_vram_scratch_fini(rdev); 3318 radeon_agp_fini(rdev); 3319 radeon_gem_fini(rdev); 3320 radeon_fence_driver_fini(rdev); 3321 radeon_bo_fini(rdev); 3322 radeon_atombios_fini(rdev); 3323 r600_fini_microcode(rdev); 3324 kfree(rdev->bios); 3325 rdev->bios = NULL; 3326 } 3327 3328 3329 /* 3330 * CS stuff 3331 */ 3332 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3333 { 3334 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3335 u32 next_rptr; 3336 3337 if (ring->rptr_save_reg) { 3338 next_rptr = ring->wptr + 3 + 4; 3339 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3340 radeon_ring_write(ring, ((ring->rptr_save_reg - 3341 PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 3342 radeon_ring_write(ring, next_rptr); 3343 } else if (rdev->wb.enabled) { 3344 next_rptr = ring->wptr + 5 + 4; 3345 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 3346 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3347 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 3348 radeon_ring_write(ring, next_rptr); 3349 radeon_ring_write(ring, 0); 3350 } 3351 3352 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3353 radeon_ring_write(ring, 3354 #ifdef __BIG_ENDIAN 3355 (2 << 0) | 3356 #endif 3357 (ib->gpu_addr & 0xFFFFFFFC)); 3358 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 3359 radeon_ring_write(ring, ib->length_dw); 3360 } 3361 3362 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3363 { 3364 struct radeon_ib ib; 3365 uint32_t scratch; 3366 uint32_t tmp = 0; 3367 unsigned i; 3368 int r; 3369 3370 r = radeon_scratch_get(rdev, &scratch); 3371 if (r) { 3372 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3373 return r; 3374 } 3375 WREG32(scratch, 0xCAFEDEAD); 3376 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3377 if (r) { 3378 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3379 goto free_scratch; 3380 } 3381 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 3382 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3383 ib.ptr[2] = 0xDEADBEEF; 3384 ib.length_dw = 3; 3385 r = radeon_ib_schedule(rdev, &ib, NULL, false); 3386 if (r) { 3387 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3388 goto free_ib; 3389 } 3390 r = radeon_fence_wait(ib.fence, false); 3391 if (r) { 3392 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3393 goto free_ib; 3394 } 3395 for (i = 0; i < rdev->usec_timeout; i++) { 3396 tmp = RREG32(scratch); 3397 if (tmp == 0xDEADBEEF) 3398 break; 3399 DRM_UDELAY(1); 3400 } 3401 if (i < rdev->usec_timeout) { 3402 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3403 } else { 3404 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3405 scratch, tmp); 3406 r = -EINVAL; 3407 } 3408 free_ib: 3409 radeon_ib_free(rdev, &ib); 3410 free_scratch: 3411 radeon_scratch_free(rdev, scratch); 3412 return r; 3413 } 3414 3415 /* 3416 * Interrupts 3417 * 3418 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty 3419 * the same as the CP ring buffer, but in reverse. Rather than the CPU 3420 * writing to the ring and the GPU consuming, the GPU writes to the ring 3421 * and host consumes. As the host irq handler processes interrupts, it 3422 * increments the rptr. When the rptr catches up with the wptr, all the 3423 * current interrupts have been processed. 3424 */ 3425 3426 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) 3427 { 3428 u32 rb_bufsz; 3429 3430 /* Align ring size */ 3431 rb_bufsz = order_base_2(ring_size / 4); 3432 ring_size = (1 << rb_bufsz) * 4; 3433 rdev->ih.ring_size = ring_size; 3434 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3435 rdev->ih.rptr = 0; 3436 } 3437 3438 int r600_ih_ring_alloc(struct radeon_device *rdev) 3439 { 3440 int r; 3441 void *ring_ptr = &rdev->ih.ring; 3442 3443 /* Allocate ring buffer */ 3444 if (rdev->ih.ring_obj == NULL) { 3445 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3446 PAGE_SIZE, true, 3447 RADEON_GEM_DOMAIN_GTT, 0, 3448 NULL, NULL, &rdev->ih.ring_obj); 3449 if (r) { 3450 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3451 return r; 3452 } 3453 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3454 if (unlikely(r != 0)) 3455 return r; 3456 r = radeon_bo_pin(rdev->ih.ring_obj, 3457 RADEON_GEM_DOMAIN_GTT, 3458 (u64 *)&rdev->ih.gpu_addr); 3459 if (r) { 3460 radeon_bo_unreserve(rdev->ih.ring_obj); 3461 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 3462 return r; 3463 } 3464 r = radeon_bo_kmap(rdev->ih.ring_obj, 3465 ring_ptr); 3466 radeon_bo_unreserve(rdev->ih.ring_obj); 3467 if (r) { 3468 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 3469 return r; 3470 } 3471 } 3472 return 0; 3473 } 3474 3475 void r600_ih_ring_fini(struct radeon_device *rdev) 3476 { 3477 int r; 3478 if (rdev->ih.ring_obj) { 3479 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3480 if (likely(r == 0)) { 3481 radeon_bo_kunmap(rdev->ih.ring_obj); 3482 radeon_bo_unpin(rdev->ih.ring_obj); 3483 radeon_bo_unreserve(rdev->ih.ring_obj); 3484 } 3485 radeon_bo_unref(&rdev->ih.ring_obj); 3486 rdev->ih.ring = NULL; 3487 rdev->ih.ring_obj = NULL; 3488 } 3489 } 3490 3491 void r600_rlc_stop(struct radeon_device *rdev) 3492 { 3493 3494 if ((rdev->family >= CHIP_RV770) && 3495 (rdev->family <= CHIP_RV740)) { 3496 /* r7xx asics need to soft reset RLC before halting */ 3497 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 3498 RREG32(SRBM_SOFT_RESET); 3499 mdelay(15); 3500 WREG32(SRBM_SOFT_RESET, 0); 3501 RREG32(SRBM_SOFT_RESET); 3502 } 3503 3504 WREG32(RLC_CNTL, 0); 3505 } 3506 3507 static void r600_rlc_start(struct radeon_device *rdev) 3508 { 3509 WREG32(RLC_CNTL, RLC_ENABLE); 3510 } 3511 3512 static int r600_rlc_resume(struct radeon_device *rdev) 3513 { 3514 u32 i; 3515 const __be32 *fw_data; 3516 3517 if (!rdev->rlc_fw) 3518 return -EINVAL; 3519 3520 r600_rlc_stop(rdev); 3521 3522 WREG32(RLC_HB_CNTL, 0); 3523 3524 WREG32(RLC_HB_BASE, 0); 3525 WREG32(RLC_HB_RPTR, 0); 3526 WREG32(RLC_HB_WPTR, 0); 3527 WREG32(RLC_HB_WPTR_LSB_ADDR, 0); 3528 WREG32(RLC_HB_WPTR_MSB_ADDR, 0); 3529 WREG32(RLC_MC_CNTL, 0); 3530 WREG32(RLC_UCODE_CNTL, 0); 3531 3532 fw_data = (const __be32 *)rdev->rlc_fw->data; 3533 if (rdev->family >= CHIP_RV770) { 3534 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3535 WREG32(RLC_UCODE_ADDR, i); 3536 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3537 } 3538 } else { 3539 for (i = 0; i < R600_RLC_UCODE_SIZE; i++) { 3540 WREG32(RLC_UCODE_ADDR, i); 3541 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3542 } 3543 } 3544 WREG32(RLC_UCODE_ADDR, 0); 3545 3546 r600_rlc_start(rdev); 3547 3548 return 0; 3549 } 3550 3551 static void r600_enable_interrupts(struct radeon_device *rdev) 3552 { 3553 u32 ih_cntl = RREG32(IH_CNTL); 3554 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3555 3556 ih_cntl |= ENABLE_INTR; 3557 ih_rb_cntl |= IH_RB_ENABLE; 3558 WREG32(IH_CNTL, ih_cntl); 3559 WREG32(IH_RB_CNTL, ih_rb_cntl); 3560 rdev->ih.enabled = true; 3561 } 3562 3563 void r600_disable_interrupts(struct radeon_device *rdev) 3564 { 3565 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3566 u32 ih_cntl = RREG32(IH_CNTL); 3567 3568 ih_rb_cntl &= ~IH_RB_ENABLE; 3569 ih_cntl &= ~ENABLE_INTR; 3570 WREG32(IH_RB_CNTL, ih_rb_cntl); 3571 WREG32(IH_CNTL, ih_cntl); 3572 /* set rptr, wptr to 0 */ 3573 WREG32(IH_RB_RPTR, 0); 3574 WREG32(IH_RB_WPTR, 0); 3575 rdev->ih.enabled = false; 3576 rdev->ih.rptr = 0; 3577 } 3578 3579 static void r600_disable_interrupt_state(struct radeon_device *rdev) 3580 { 3581 u32 tmp; 3582 3583 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3584 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3585 WREG32(DMA_CNTL, tmp); 3586 WREG32(GRBM_INT_CNTL, 0); 3587 WREG32(DxMODE_INT_MASK, 0); 3588 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3589 WREG32(D2GRPH_INTERRUPT_CONTROL, 0); 3590 if (ASIC_IS_DCE3(rdev)) { 3591 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 3592 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 3593 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3594 WREG32(DC_HPD1_INT_CONTROL, tmp); 3595 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3596 WREG32(DC_HPD2_INT_CONTROL, tmp); 3597 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3598 WREG32(DC_HPD3_INT_CONTROL, tmp); 3599 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3600 WREG32(DC_HPD4_INT_CONTROL, tmp); 3601 if (ASIC_IS_DCE32(rdev)) { 3602 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3603 WREG32(DC_HPD5_INT_CONTROL, tmp); 3604 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3605 WREG32(DC_HPD6_INT_CONTROL, tmp); 3606 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3607 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3608 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3609 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3610 } else { 3611 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3612 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3613 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3614 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3615 } 3616 } else { 3617 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3618 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 3619 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3620 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3621 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3622 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3623 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3624 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3625 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3626 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3627 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3628 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3629 } 3630 } 3631 3632 int r600_irq_init(struct radeon_device *rdev) 3633 { 3634 int ret = 0; 3635 int rb_bufsz; 3636 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3637 3638 /* allocate ring */ 3639 ret = r600_ih_ring_alloc(rdev); 3640 if (ret) 3641 return ret; 3642 3643 /* disable irqs */ 3644 r600_disable_interrupts(rdev); 3645 3646 /* init rlc */ 3647 if (rdev->family >= CHIP_CEDAR) 3648 ret = evergreen_rlc_resume(rdev); 3649 else 3650 ret = r600_rlc_resume(rdev); 3651 if (ret) { 3652 r600_ih_ring_fini(rdev); 3653 return ret; 3654 } 3655 3656 /* setup interrupt control */ 3657 /* set dummy read address to ring address */ 3658 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3659 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3660 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3661 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3662 */ 3663 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3664 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3665 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3666 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3667 3668 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3669 rb_bufsz = order_base_2(rdev->ih.ring_size / 4); 3670 3671 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3672 IH_WPTR_OVERFLOW_CLEAR | 3673 (rb_bufsz << 1)); 3674 3675 if (rdev->wb.enabled) 3676 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3677 3678 /* set the writeback address whether it's enabled or not */ 3679 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3680 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3681 3682 WREG32(IH_RB_CNTL, ih_rb_cntl); 3683 3684 /* set rptr, wptr to 0 */ 3685 WREG32(IH_RB_RPTR, 0); 3686 WREG32(IH_RB_WPTR, 0); 3687 3688 /* Default settings for IH_CNTL (disabled at first) */ 3689 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); 3690 /* RPTR_REARM only works if msi's are enabled */ 3691 if (rdev->msi_enabled) 3692 ih_cntl |= RPTR_REARM; 3693 WREG32(IH_CNTL, ih_cntl); 3694 3695 /* force the active interrupt state to all disabled */ 3696 if (rdev->family >= CHIP_CEDAR) 3697 evergreen_disable_interrupt_state(rdev); 3698 else 3699 r600_disable_interrupt_state(rdev); 3700 3701 /* at this point everything should be setup correctly to enable master */ 3702 pci_set_master(rdev->pdev); 3703 3704 /* enable irqs */ 3705 r600_enable_interrupts(rdev); 3706 3707 return ret; 3708 } 3709 3710 void r600_irq_suspend(struct radeon_device *rdev) 3711 { 3712 r600_irq_disable(rdev); 3713 r600_rlc_stop(rdev); 3714 } 3715 3716 void r600_irq_fini(struct radeon_device *rdev) 3717 { 3718 r600_irq_suspend(rdev); 3719 r600_ih_ring_fini(rdev); 3720 } 3721 3722 int r600_irq_set(struct radeon_device *rdev) 3723 { 3724 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 3725 u32 mode_int = 0; 3726 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3727 u32 grbm_int_cntl = 0; 3728 u32 hdmi0, hdmi1; 3729 u32 dma_cntl; 3730 u32 thermal_int = 0; 3731 3732 if (!rdev->irq.installed) { 3733 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3734 return -EINVAL; 3735 } 3736 /* don't enable anything if the ih is disabled */ 3737 if (!rdev->ih.enabled) { 3738 r600_disable_interrupts(rdev); 3739 /* force the active interrupt state to all disabled */ 3740 r600_disable_interrupt_state(rdev); 3741 return 0; 3742 } 3743 3744 if (ASIC_IS_DCE3(rdev)) { 3745 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3746 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3747 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3748 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 3749 if (ASIC_IS_DCE32(rdev)) { 3750 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3751 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3752 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3753 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3754 } else { 3755 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3756 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3757 } 3758 } else { 3759 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3760 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3761 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3762 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3763 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3764 } 3765 3766 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3767 3768 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3769 thermal_int = RREG32(CG_THERMAL_INT) & 3770 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3771 } else if (rdev->family >= CHIP_RV770) { 3772 thermal_int = RREG32(RV770_CG_THERMAL_INT) & 3773 ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); 3774 } 3775 if (rdev->irq.dpm_thermal) { 3776 DRM_DEBUG("dpm thermal\n"); 3777 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; 3778 } 3779 3780 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3781 DRM_DEBUG("r600_irq_set: sw int\n"); 3782 cp_int_cntl |= RB_INT_ENABLE; 3783 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3784 } 3785 3786 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 3787 DRM_DEBUG("r600_irq_set: sw int dma\n"); 3788 dma_cntl |= TRAP_ENABLE; 3789 } 3790 3791 if (rdev->irq.crtc_vblank_int[0] || 3792 atomic_read(&rdev->irq.pflip[0])) { 3793 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3794 mode_int |= D1MODE_VBLANK_INT_MASK; 3795 } 3796 if (rdev->irq.crtc_vblank_int[1] || 3797 atomic_read(&rdev->irq.pflip[1])) { 3798 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3799 mode_int |= D2MODE_VBLANK_INT_MASK; 3800 } 3801 if (rdev->irq.hpd[0]) { 3802 DRM_DEBUG("r600_irq_set: hpd 1\n"); 3803 hpd1 |= DC_HPDx_INT_EN; 3804 } 3805 if (rdev->irq.hpd[1]) { 3806 DRM_DEBUG("r600_irq_set: hpd 2\n"); 3807 hpd2 |= DC_HPDx_INT_EN; 3808 } 3809 if (rdev->irq.hpd[2]) { 3810 DRM_DEBUG("r600_irq_set: hpd 3\n"); 3811 hpd3 |= DC_HPDx_INT_EN; 3812 } 3813 if (rdev->irq.hpd[3]) { 3814 DRM_DEBUG("r600_irq_set: hpd 4\n"); 3815 hpd4 |= DC_HPDx_INT_EN; 3816 } 3817 if (rdev->irq.hpd[4]) { 3818 DRM_DEBUG("r600_irq_set: hpd 5\n"); 3819 hpd5 |= DC_HPDx_INT_EN; 3820 } 3821 if (rdev->irq.hpd[5]) { 3822 DRM_DEBUG("r600_irq_set: hpd 6\n"); 3823 hpd6 |= DC_HPDx_INT_EN; 3824 } 3825 if (rdev->irq.afmt[0]) { 3826 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3827 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3828 } 3829 if (rdev->irq.afmt[1]) { 3830 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3831 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3832 } 3833 3834 WREG32(CP_INT_CNTL, cp_int_cntl); 3835 WREG32(DMA_CNTL, dma_cntl); 3836 WREG32(DxMODE_INT_MASK, mode_int); 3837 WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3838 WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); 3839 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3840 if (ASIC_IS_DCE3(rdev)) { 3841 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3842 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3843 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3844 WREG32(DC_HPD4_INT_CONTROL, hpd4); 3845 if (ASIC_IS_DCE32(rdev)) { 3846 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3847 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3848 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0); 3849 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1); 3850 } else { 3851 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3852 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3853 } 3854 } else { 3855 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3856 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3857 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3858 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3859 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3860 } 3861 if ((rdev->family > CHIP_R600) && (rdev->family < CHIP_RV770)) { 3862 WREG32(CG_THERMAL_INT, thermal_int); 3863 } else if (rdev->family >= CHIP_RV770) { 3864 WREG32(RV770_CG_THERMAL_INT, thermal_int); 3865 } 3866 3867 /* posting read */ 3868 RREG32(R_000E50_SRBM_STATUS); 3869 3870 return 0; 3871 } 3872 3873 static void r600_irq_ack(struct radeon_device *rdev) 3874 { 3875 u32 tmp; 3876 3877 if (ASIC_IS_DCE3(rdev)) { 3878 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3879 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3880 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3881 if (ASIC_IS_DCE32(rdev)) { 3882 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0); 3883 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1); 3884 } else { 3885 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3886 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); 3887 } 3888 } else { 3889 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3890 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3891 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 3892 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3893 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); 3894 } 3895 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 3896 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 3897 3898 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3899 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3900 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3901 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3902 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) 3903 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3904 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) 3905 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3906 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) 3907 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3908 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) 3909 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3910 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 3911 if (ASIC_IS_DCE3(rdev)) { 3912 tmp = RREG32(DC_HPD1_INT_CONTROL); 3913 tmp |= DC_HPDx_INT_ACK; 3914 WREG32(DC_HPD1_INT_CONTROL, tmp); 3915 } else { 3916 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 3917 tmp |= DC_HPDx_INT_ACK; 3918 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3919 } 3920 } 3921 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 3922 if (ASIC_IS_DCE3(rdev)) { 3923 tmp = RREG32(DC_HPD2_INT_CONTROL); 3924 tmp |= DC_HPDx_INT_ACK; 3925 WREG32(DC_HPD2_INT_CONTROL, tmp); 3926 } else { 3927 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 3928 tmp |= DC_HPDx_INT_ACK; 3929 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3930 } 3931 } 3932 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 3933 if (ASIC_IS_DCE3(rdev)) { 3934 tmp = RREG32(DC_HPD3_INT_CONTROL); 3935 tmp |= DC_HPDx_INT_ACK; 3936 WREG32(DC_HPD3_INT_CONTROL, tmp); 3937 } else { 3938 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 3939 tmp |= DC_HPDx_INT_ACK; 3940 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3941 } 3942 } 3943 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 3944 tmp = RREG32(DC_HPD4_INT_CONTROL); 3945 tmp |= DC_HPDx_INT_ACK; 3946 WREG32(DC_HPD4_INT_CONTROL, tmp); 3947 } 3948 if (ASIC_IS_DCE32(rdev)) { 3949 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 3950 tmp = RREG32(DC_HPD5_INT_CONTROL); 3951 tmp |= DC_HPDx_INT_ACK; 3952 WREG32(DC_HPD5_INT_CONTROL, tmp); 3953 } 3954 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3955 tmp = RREG32(DC_HPD6_INT_CONTROL); 3956 tmp |= DC_HPDx_INT_ACK; 3957 WREG32(DC_HPD6_INT_CONTROL, tmp); 3958 } 3959 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { 3960 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0); 3961 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3962 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3963 } 3964 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { 3965 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1); 3966 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3967 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3968 } 3969 } else { 3970 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 3971 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); 3972 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3973 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3974 } 3975 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 3976 if (ASIC_IS_DCE3(rdev)) { 3977 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); 3978 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3979 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3980 } else { 3981 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); 3982 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3983 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3984 } 3985 } 3986 } 3987 } 3988 3989 void r600_irq_disable(struct radeon_device *rdev) 3990 { 3991 r600_disable_interrupts(rdev); 3992 /* Wait and acknowledge irq */ 3993 mdelay(1); 3994 r600_irq_ack(rdev); 3995 r600_disable_interrupt_state(rdev); 3996 } 3997 3998 static u32 r600_get_ih_wptr(struct radeon_device *rdev) 3999 { 4000 u32 wptr, tmp; 4001 4002 if (rdev->wb.enabled) 4003 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 4004 else 4005 wptr = RREG32(IH_RB_WPTR); 4006 4007 if (wptr & RB_OVERFLOW) { 4008 wptr &= ~RB_OVERFLOW; 4009 /* When a ring buffer overflow happen start parsing interrupt 4010 * from the last not overwritten vector (wptr + 16). Hopefully 4011 * this should allow us to catchup. 4012 */ 4013 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", 4014 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); 4015 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 4016 tmp = RREG32(IH_RB_CNTL); 4017 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4018 WREG32(IH_RB_CNTL, tmp); 4019 } 4020 return (wptr & rdev->ih.ptr_mask); 4021 } 4022 4023 /* r600 IV Ring 4024 * Each IV ring entry is 128 bits: 4025 * [7:0] - interrupt source id 4026 * [31:8] - reserved 4027 * [59:32] - interrupt source data 4028 * [127:60] - reserved 4029 * 4030 * The basic interrupt vector entries 4031 * are decoded as follows: 4032 * src_id src_data description 4033 * 1 0 D1 Vblank 4034 * 1 1 D1 Vline 4035 * 5 0 D2 Vblank 4036 * 5 1 D2 Vline 4037 * 19 0 FP Hot plug detection A 4038 * 19 1 FP Hot plug detection B 4039 * 19 2 DAC A auto-detection 4040 * 19 3 DAC B auto-detection 4041 * 21 4 HDMI block A 4042 * 21 5 HDMI block B 4043 * 176 - CP_INT RB 4044 * 177 - CP_INT IB1 4045 * 178 - CP_INT IB2 4046 * 181 - EOP Interrupt 4047 * 233 - GUI Idle 4048 * 4049 * Note, these are based on r600 and may need to be 4050 * adjusted or added to on newer asics 4051 */ 4052 4053 irqreturn_t r600_irq_process(struct radeon_device *rdev) 4054 { 4055 u32 wptr; 4056 u32 rptr; 4057 u32 src_id, src_data; 4058 u32 ring_index; 4059 bool queue_hotplug = false; 4060 bool queue_hdmi = false; 4061 bool queue_thermal = false; 4062 4063 if (!rdev->ih.enabled || rdev->shutdown) 4064 return IRQ_NONE; 4065 4066 /* No MSIs, need a dummy read to flush PCI DMAs */ 4067 if (!rdev->msi_enabled) 4068 RREG32(IH_RB_WPTR); 4069 4070 wptr = r600_get_ih_wptr(rdev); 4071 4072 restart_ih: 4073 /* is somebody else already processing irqs? */ 4074 if (atomic_xchg(&rdev->ih.lock, 1)) 4075 return IRQ_NONE; 4076 4077 rptr = rdev->ih.rptr; 4078 DRM_DEBUG_VBLANK("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4079 4080 /* Order reading of wptr vs. reading of IH ring data */ 4081 rmb(); 4082 4083 /* display interrupts */ 4084 r600_irq_ack(rdev); 4085 4086 while (rptr != wptr) { 4087 /* wptr/rptr are in bytes! */ 4088 ring_index = rptr / 4; 4089 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 4090 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 4091 4092 switch (src_id) { 4093 case 1: /* D1 vblank/vline */ 4094 switch (src_data) { 4095 case 0: /* D1 vblank */ 4096 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)) 4097 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n"); 4098 4099 if (rdev->irq.crtc_vblank_int[0]) { 4100 drm_handle_vblank(rdev->ddev, 0); 4101 rdev->pm.vblank_sync = true; 4102 wake_up(&rdev->irq.vblank_queue); 4103 } 4104 if (atomic_read(&rdev->irq.pflip[0])) 4105 radeon_crtc_handle_vblank(rdev, 0); 4106 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 4107 DRM_DEBUG_VBLANK("IH: D1 vblank\n"); 4108 4109 break; 4110 case 1: /* D1 vline */ 4111 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)) 4112 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n"); 4113 4114 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4115 DRM_DEBUG_VBLANK("IH: D1 vline\n"); 4116 4117 break; 4118 default: 4119 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4120 break; 4121 } 4122 break; 4123 case 5: /* D2 vblank/vline */ 4124 switch (src_data) { 4125 case 0: /* D2 vblank */ 4126 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)) 4127 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n"); 4128 4129 if (rdev->irq.crtc_vblank_int[1]) { 4130 drm_handle_vblank(rdev->ddev, 1); 4131 rdev->pm.vblank_sync = true; 4132 wake_up(&rdev->irq.vblank_queue); 4133 } 4134 if (atomic_read(&rdev->irq.pflip[1])) 4135 radeon_crtc_handle_vblank(rdev, 1); 4136 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 4137 DRM_DEBUG_VBLANK("IH: D2 vblank\n"); 4138 4139 break; 4140 case 1: /* D1 vline */ 4141 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)) 4142 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n"); 4143 4144 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4145 DRM_DEBUG_VBLANK("IH: D2 vline\n"); 4146 4147 break; 4148 default: 4149 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4150 break; 4151 } 4152 break; 4153 case 9: /* D1 pflip */ 4154 DRM_DEBUG_VBLANK("IH: D1 flip\n"); 4155 if (radeon_use_pflipirq > 0) 4156 radeon_crtc_handle_flip(rdev, 0); 4157 break; 4158 case 11: /* D2 pflip */ 4159 DRM_DEBUG_VBLANK("IH: D2 flip\n"); 4160 if (radeon_use_pflipirq > 0) 4161 radeon_crtc_handle_flip(rdev, 1); 4162 break; 4163 case 19: /* HPD/DAC hotplug */ 4164 switch (src_data) { 4165 case 0: 4166 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT)) 4167 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n"); 4168 4169 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4170 queue_hotplug = true; 4171 DRM_DEBUG("IH: HPD1\n"); 4172 break; 4173 case 1: 4174 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT)) 4175 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n"); 4176 4177 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4178 queue_hotplug = true; 4179 DRM_DEBUG("IH: HPD2\n"); 4180 break; 4181 case 4: 4182 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT)) 4183 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n"); 4184 4185 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4186 queue_hotplug = true; 4187 DRM_DEBUG("IH: HPD3\n"); 4188 break; 4189 case 5: 4190 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT)) 4191 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n"); 4192 4193 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4194 queue_hotplug = true; 4195 DRM_DEBUG("IH: HPD4\n"); 4196 break; 4197 case 10: 4198 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT)) 4199 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n"); 4200 4201 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4202 queue_hotplug = true; 4203 DRM_DEBUG("IH: HPD5\n"); 4204 break; 4205 case 12: 4206 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT)) 4207 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n"); 4208 4209 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4210 queue_hotplug = true; 4211 DRM_DEBUG("IH: HPD6\n"); 4212 4213 break; 4214 default: 4215 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4216 break; 4217 } 4218 break; 4219 case 21: /* hdmi */ 4220 switch (src_data) { 4221 case 4: 4222 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG)) 4223 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n"); 4224 4225 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4226 queue_hdmi = true; 4227 DRM_DEBUG("IH: HDMI0\n"); 4228 4229 break; 4230 case 5: 4231 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG)) 4232 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n"); 4233 4234 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4235 queue_hdmi = true; 4236 DRM_DEBUG("IH: HDMI1\n"); 4237 4238 break; 4239 default: 4240 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4241 break; 4242 } 4243 break; 4244 case 124: /* UVD */ 4245 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); 4246 radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); 4247 break; 4248 case 176: /* CP_INT in ring buffer */ 4249 case 177: /* CP_INT in IB1 */ 4250 case 178: /* CP_INT in IB2 */ 4251 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 4252 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4253 break; 4254 case 181: /* CP EOP event */ 4255 DRM_DEBUG("IH: CP EOP\n"); 4256 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4257 break; 4258 case 224: /* DMA trap event */ 4259 DRM_DEBUG("IH: DMA trap\n"); 4260 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4261 break; 4262 case 230: /* thermal low to high */ 4263 DRM_DEBUG("IH: thermal low to high\n"); 4264 rdev->pm.dpm.thermal.high_to_low = false; 4265 queue_thermal = true; 4266 break; 4267 case 231: /* thermal high to low */ 4268 DRM_DEBUG("IH: thermal high to low\n"); 4269 rdev->pm.dpm.thermal.high_to_low = true; 4270 queue_thermal = true; 4271 break; 4272 case 233: /* GUI IDLE */ 4273 DRM_DEBUG("IH: GUI idle\n"); 4274 break; 4275 default: 4276 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4277 break; 4278 } 4279 4280 /* wptr/rptr are in bytes! */ 4281 rptr += 16; 4282 rptr &= rdev->ih.ptr_mask; 4283 WREG32(IH_RB_RPTR, rptr); 4284 } 4285 if (queue_hotplug) 4286 schedule_delayed_work(&rdev->hotplug_work, 0); 4287 if (queue_hdmi) 4288 schedule_work(&rdev->audio_work); 4289 if (queue_thermal && rdev->pm.dpm_enabled) 4290 schedule_work(&rdev->pm.dpm.thermal.work); 4291 rdev->ih.rptr = rptr; 4292 atomic_set(&rdev->ih.lock, 0); 4293 4294 /* make sure wptr hasn't changed while processing */ 4295 wptr = r600_get_ih_wptr(rdev); 4296 if (wptr != rptr) 4297 goto restart_ih; 4298 4299 return IRQ_HANDLED; 4300 } 4301 4302 /* 4303 * Debugfs info 4304 */ 4305 #if defined(CONFIG_DEBUG_FS) 4306 4307 static int r600_debugfs_mc_info(struct seq_file *m, void *data) 4308 { 4309 struct drm_info_node *node = (struct drm_info_node *) m->private; 4310 struct drm_device *dev = node->minor->dev; 4311 struct radeon_device *rdev = dev->dev_private; 4312 4313 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); 4314 DREG32_SYS(m, rdev, VM_L2_STATUS); 4315 return 0; 4316 } 4317 4318 static struct drm_info_list r600_mc_info_list[] = { 4319 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 4320 }; 4321 #endif 4322 4323 int r600_debugfs_mc_info_init(struct radeon_device *rdev) 4324 { 4325 #if defined(CONFIG_DEBUG_FS) 4326 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); 4327 #else 4328 return 0; 4329 #endif 4330 } 4331 4332 /** 4333 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO 4334 * rdev: radeon device structure 4335 * 4336 * Some R6XX/R7XX don't seem to take into account HDP flushes performed 4337 * through the ring buffer. This leads to corruption in rendering, see 4338 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we 4339 * directly perform the HDP flush by writing the register through MMIO. 4340 */ 4341 void r600_mmio_hdp_flush(struct radeon_device *rdev) 4342 { 4343 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4344 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4345 * This seems to cause problems on some AGP cards. Just use the old 4346 * method for them. 4347 */ 4348 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 4349 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 4350 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr; 4351 u32 tmp; 4352 4353 WREG32(HDP_DEBUG1, 0); 4354 tmp = readl((volatile void __iomem *)ptr); 4355 } else 4356 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 4357 } 4358 4359 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4360 { 4361 u32 link_width_cntl, mask; 4362 4363 if (rdev->flags & RADEON_IS_IGP) 4364 return; 4365 4366 if (!(rdev->flags & RADEON_IS_PCIE)) 4367 return; 4368 4369 /* x2 cards have a special sequence */ 4370 if (ASIC_IS_X2(rdev)) 4371 return; 4372 4373 radeon_gui_idle(rdev); 4374 4375 switch (lanes) { 4376 case 0: 4377 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 4378 break; 4379 case 1: 4380 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 4381 break; 4382 case 2: 4383 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 4384 break; 4385 case 4: 4386 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 4387 break; 4388 case 8: 4389 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4390 break; 4391 case 12: 4392 /* not actually supported */ 4393 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4394 break; 4395 case 16: 4396 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4397 break; 4398 default: 4399 DRM_ERROR("invalid pcie lane request: %d\n", lanes); 4400 return; 4401 } 4402 4403 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4404 link_width_cntl &= ~RADEON_PCIE_LC_LINK_WIDTH_MASK; 4405 link_width_cntl |= mask << RADEON_PCIE_LC_LINK_WIDTH_SHIFT; 4406 link_width_cntl |= (RADEON_PCIE_LC_RECONFIG_NOW | 4407 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4408 4409 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4410 } 4411 4412 int r600_get_pcie_lanes(struct radeon_device *rdev) 4413 { 4414 u32 link_width_cntl; 4415 4416 if (rdev->flags & RADEON_IS_IGP) 4417 return 0; 4418 4419 if (!(rdev->flags & RADEON_IS_PCIE)) 4420 return 0; 4421 4422 /* x2 cards have a special sequence */ 4423 if (ASIC_IS_X2(rdev)) 4424 return 0; 4425 4426 radeon_gui_idle(rdev); 4427 4428 link_width_cntl = RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4429 4430 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4431 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4432 return 1; 4433 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4434 return 2; 4435 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4436 return 4; 4437 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4438 return 8; 4439 case RADEON_PCIE_LC_LINK_WIDTH_X12: 4440 /* not actually supported */ 4441 return 12; 4442 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4443 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4444 default: 4445 return 16; 4446 } 4447 } 4448 4449 static void r600_pcie_gen2_enable(struct radeon_device *rdev) 4450 { 4451 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4452 u16 link_cntl2; 4453 u32 mask; 4454 4455 if (radeon_pcie_gen2 == 0) 4456 return; 4457 4458 if (rdev->flags & RADEON_IS_IGP) 4459 return; 4460 4461 if (!(rdev->flags & RADEON_IS_PCIE)) 4462 return; 4463 4464 /* x2 cards have a special sequence */ 4465 if (ASIC_IS_X2(rdev)) 4466 return; 4467 4468 /* only RV6xx+ chips are supported */ 4469 if (rdev->family <= CHIP_R600) 4470 return; 4471 4472 #ifdef __DragonFly__ 4473 if (drm_pcie_get_speed_cap_mask(rdev->ddev, &mask) != 0) 4474 return; 4475 #endif 4476 4477 if (!(mask & DRM_PCIE_SPEED_50)) 4478 return; 4479 4480 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4481 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4482 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4483 return; 4484 } 4485 4486 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 4487 4488 /* 55 nm r6xx asics */ 4489 if ((rdev->family == CHIP_RV670) || 4490 (rdev->family == CHIP_RV620) || 4491 (rdev->family == CHIP_RV635)) { 4492 /* advertise upconfig capability */ 4493 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4494 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4495 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4496 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4497 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4498 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4499 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4500 LC_RECONFIG_ARC_MISSING_ESCAPE); 4501 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4502 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4503 } else { 4504 link_width_cntl |= LC_UPCONFIGURE_DIS; 4505 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4506 } 4507 } 4508 4509 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4510 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4511 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4512 4513 /* 55 nm r6xx asics */ 4514 if ((rdev->family == CHIP_RV670) || 4515 (rdev->family == CHIP_RV620) || 4516 (rdev->family == CHIP_RV635)) { 4517 WREG32(MM_CFGREGS_CNTL, 0x8); 4518 link_cntl2 = RREG32(0x4088); 4519 WREG32(MM_CFGREGS_CNTL, 0); 4520 /* not supported yet */ 4521 if (link_cntl2 & SELECTABLE_DEEMPHASIS) 4522 return; 4523 } 4524 4525 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; 4526 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); 4527 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4528 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4529 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4530 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4531 4532 tmp = RREG32(0x541c); 4533 WREG32(0x541c, tmp | 0x8); 4534 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 4535 link_cntl2 = RREG16(0x4088); 4536 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 4537 link_cntl2 |= 0x2; 4538 WREG16(0x4088, link_cntl2); 4539 WREG32(MM_CFGREGS_CNTL, 0); 4540 4541 if ((rdev->family == CHIP_RV670) || 4542 (rdev->family == CHIP_RV620) || 4543 (rdev->family == CHIP_RV635)) { 4544 training_cntl = RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL); 4545 training_cntl &= ~LC_POINT_7_PLUS_EN; 4546 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL, training_cntl); 4547 } else { 4548 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4549 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4550 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4551 } 4552 4553 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 4554 speed_cntl |= LC_GEN2_EN_STRAP; 4555 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl); 4556 4557 } else { 4558 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL); 4559 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4560 if (1) 4561 link_width_cntl |= LC_UPCONFIGURE_DIS; 4562 else 4563 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4564 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4565 } 4566 } 4567 4568 /** 4569 * r600_get_gpu_clock_counter - return GPU clock counter snapshot 4570 * 4571 * @rdev: radeon_device pointer 4572 * 4573 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4574 * Returns the 64 bit clock counter snapshot. 4575 */ 4576 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev) 4577 { 4578 uint64_t clock; 4579 4580 mutex_lock(&rdev->gpu_clock_mutex); 4581 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4582 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4583 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4584 mutex_unlock(&rdev->gpu_clock_mutex); 4585 return clock; 4586 } 4587