1 /* $OpenBSD: r600.c,v 1.19 2015/04/06 14:10:59 jsg Exp $ */ 2 /* 3 * Copyright 2008 Advanced Micro Devices, Inc. 4 * Copyright 2008 Red Hat Inc. 5 * Copyright 2009 Jerome Glisse. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Dave Airlie 26 * Alex Deucher 27 * Jerome Glisse 28 */ 29 #include <dev/pci/drm/drmP.h> 30 #include <dev/pci/drm/radeon_drm.h> 31 #include "radeon.h" 32 #include "radeon_asic.h" 33 #include "radeon_mode.h" 34 #include "r600d.h" 35 #include "atom.h" 36 #include "avivod.h" 37 38 #define PFP_UCODE_SIZE 576 39 #define PM4_UCODE_SIZE 1792 40 #define RLC_UCODE_SIZE 768 41 #define R700_PFP_UCODE_SIZE 848 42 #define R700_PM4_UCODE_SIZE 1360 43 #define R700_RLC_UCODE_SIZE 1024 44 #define EVERGREEN_PFP_UCODE_SIZE 1120 45 #define EVERGREEN_PM4_UCODE_SIZE 1376 46 #define EVERGREEN_RLC_UCODE_SIZE 768 47 #define CAYMAN_RLC_UCODE_SIZE 1024 48 #define ARUBA_RLC_UCODE_SIZE 1536 49 50 /* Firmware Names */ 51 MODULE_FIRMWARE("radeon/R600_pfp.bin"); 52 MODULE_FIRMWARE("radeon/R600_me.bin"); 53 MODULE_FIRMWARE("radeon/RV610_pfp.bin"); 54 MODULE_FIRMWARE("radeon/RV610_me.bin"); 55 MODULE_FIRMWARE("radeon/RV630_pfp.bin"); 56 MODULE_FIRMWARE("radeon/RV630_me.bin"); 57 MODULE_FIRMWARE("radeon/RV620_pfp.bin"); 58 MODULE_FIRMWARE("radeon/RV620_me.bin"); 59 MODULE_FIRMWARE("radeon/RV635_pfp.bin"); 60 MODULE_FIRMWARE("radeon/RV635_me.bin"); 61 MODULE_FIRMWARE("radeon/RV670_pfp.bin"); 62 MODULE_FIRMWARE("radeon/RV670_me.bin"); 63 MODULE_FIRMWARE("radeon/RS780_pfp.bin"); 64 MODULE_FIRMWARE("radeon/RS780_me.bin"); 65 MODULE_FIRMWARE("radeon/RV770_pfp.bin"); 66 MODULE_FIRMWARE("radeon/RV770_me.bin"); 67 MODULE_FIRMWARE("radeon/RV730_pfp.bin"); 68 MODULE_FIRMWARE("radeon/RV730_me.bin"); 69 MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 70 MODULE_FIRMWARE("radeon/RV710_me.bin"); 71 MODULE_FIRMWARE("radeon/R600_rlc.bin"); 72 MODULE_FIRMWARE("radeon/R700_rlc.bin"); 73 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin"); 74 MODULE_FIRMWARE("radeon/CEDAR_me.bin"); 75 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin"); 76 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin"); 77 MODULE_FIRMWARE("radeon/REDWOOD_me.bin"); 78 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin"); 79 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin"); 80 MODULE_FIRMWARE("radeon/JUNIPER_me.bin"); 81 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin"); 82 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin"); 83 MODULE_FIRMWARE("radeon/CYPRESS_me.bin"); 84 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin"); 85 MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 86 MODULE_FIRMWARE("radeon/PALM_me.bin"); 87 MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 88 MODULE_FIRMWARE("radeon/SUMO_pfp.bin"); 89 MODULE_FIRMWARE("radeon/SUMO_me.bin"); 90 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 91 MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 92 93 int r600_debugfs_mc_info_init(struct radeon_device *rdev); 94 95 /* r600,rv610,rv630,rv620,rv635,rv670 */ 96 int r600_mc_wait_for_idle(struct radeon_device *rdev); 97 static void r600_gpu_init(struct radeon_device *rdev); 98 void r600_fini(struct radeon_device *rdev); 99 void r600_irq_disable(struct radeon_device *rdev); 100 static void r600_pcie_gen2_enable(struct radeon_device *rdev); 101 102 /* get temperature in millidegrees */ 103 int rv6xx_get_temp(struct radeon_device *rdev) 104 { 105 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> 106 ASIC_T_SHIFT; 107 int actual_temp = temp & 0xff; 108 109 if (temp & 0x100) 110 actual_temp -= 256; 111 112 return actual_temp * 1000; 113 } 114 115 void r600_pm_get_dynpm_state(struct radeon_device *rdev) 116 { 117 int i; 118 119 rdev->pm.dynpm_can_upclock = true; 120 rdev->pm.dynpm_can_downclock = true; 121 122 /* power state array is low to high, default is first */ 123 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) { 124 int min_power_state_index = 0; 125 126 if (rdev->pm.num_power_states > 2) 127 min_power_state_index = 1; 128 129 switch (rdev->pm.dynpm_planned_action) { 130 case DYNPM_ACTION_MINIMUM: 131 rdev->pm.requested_power_state_index = min_power_state_index; 132 rdev->pm.requested_clock_mode_index = 0; 133 rdev->pm.dynpm_can_downclock = false; 134 break; 135 case DYNPM_ACTION_DOWNCLOCK: 136 if (rdev->pm.current_power_state_index == min_power_state_index) { 137 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 138 rdev->pm.dynpm_can_downclock = false; 139 } else { 140 if (rdev->pm.active_crtc_count > 1) { 141 for (i = 0; i < rdev->pm.num_power_states; i++) { 142 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 143 continue; 144 else if (i >= rdev->pm.current_power_state_index) { 145 rdev->pm.requested_power_state_index = 146 rdev->pm.current_power_state_index; 147 break; 148 } else { 149 rdev->pm.requested_power_state_index = i; 150 break; 151 } 152 } 153 } else { 154 if (rdev->pm.current_power_state_index == 0) 155 rdev->pm.requested_power_state_index = 156 rdev->pm.num_power_states - 1; 157 else 158 rdev->pm.requested_power_state_index = 159 rdev->pm.current_power_state_index - 1; 160 } 161 } 162 rdev->pm.requested_clock_mode_index = 0; 163 /* don't use the power state if crtcs are active and no display flag is set */ 164 if ((rdev->pm.active_crtc_count > 0) && 165 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 166 clock_info[rdev->pm.requested_clock_mode_index].flags & 167 RADEON_PM_MODE_NO_DISPLAY)) { 168 rdev->pm.requested_power_state_index++; 169 } 170 break; 171 case DYNPM_ACTION_UPCLOCK: 172 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) { 173 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index; 174 rdev->pm.dynpm_can_upclock = false; 175 } else { 176 if (rdev->pm.active_crtc_count > 1) { 177 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) { 178 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 179 continue; 180 else if (i <= rdev->pm.current_power_state_index) { 181 rdev->pm.requested_power_state_index = 182 rdev->pm.current_power_state_index; 183 break; 184 } else { 185 rdev->pm.requested_power_state_index = i; 186 break; 187 } 188 } 189 } else 190 rdev->pm.requested_power_state_index = 191 rdev->pm.current_power_state_index + 1; 192 } 193 rdev->pm.requested_clock_mode_index = 0; 194 break; 195 case DYNPM_ACTION_DEFAULT: 196 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 197 rdev->pm.requested_clock_mode_index = 0; 198 rdev->pm.dynpm_can_upclock = false; 199 break; 200 case DYNPM_ACTION_NONE: 201 default: 202 DRM_ERROR("Requested mode for not defined action\n"); 203 return; 204 } 205 } else { 206 /* XXX select a power state based on AC/DC, single/dualhead, etc. */ 207 /* for now just select the first power state and switch between clock modes */ 208 /* power state array is low to high, default is first (0) */ 209 if (rdev->pm.active_crtc_count > 1) { 210 rdev->pm.requested_power_state_index = -1; 211 /* start at 1 as we don't want the default mode */ 212 for (i = 1; i < rdev->pm.num_power_states; i++) { 213 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 214 continue; 215 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) || 216 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) { 217 rdev->pm.requested_power_state_index = i; 218 break; 219 } 220 } 221 /* if nothing selected, grab the default state. */ 222 if (rdev->pm.requested_power_state_index == -1) 223 rdev->pm.requested_power_state_index = 0; 224 } else 225 rdev->pm.requested_power_state_index = 1; 226 227 switch (rdev->pm.dynpm_planned_action) { 228 case DYNPM_ACTION_MINIMUM: 229 rdev->pm.requested_clock_mode_index = 0; 230 rdev->pm.dynpm_can_downclock = false; 231 break; 232 case DYNPM_ACTION_DOWNCLOCK: 233 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 234 if (rdev->pm.current_clock_mode_index == 0) { 235 rdev->pm.requested_clock_mode_index = 0; 236 rdev->pm.dynpm_can_downclock = false; 237 } else 238 rdev->pm.requested_clock_mode_index = 239 rdev->pm.current_clock_mode_index - 1; 240 } else { 241 rdev->pm.requested_clock_mode_index = 0; 242 rdev->pm.dynpm_can_downclock = false; 243 } 244 /* don't use the power state if crtcs are active and no display flag is set */ 245 if ((rdev->pm.active_crtc_count > 0) && 246 (rdev->pm.power_state[rdev->pm.requested_power_state_index]. 247 clock_info[rdev->pm.requested_clock_mode_index].flags & 248 RADEON_PM_MODE_NO_DISPLAY)) { 249 rdev->pm.requested_clock_mode_index++; 250 } 251 break; 252 case DYNPM_ACTION_UPCLOCK: 253 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) { 254 if (rdev->pm.current_clock_mode_index == 255 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) { 256 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index; 257 rdev->pm.dynpm_can_upclock = false; 258 } else 259 rdev->pm.requested_clock_mode_index = 260 rdev->pm.current_clock_mode_index + 1; 261 } else { 262 rdev->pm.requested_clock_mode_index = 263 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1; 264 rdev->pm.dynpm_can_upclock = false; 265 } 266 break; 267 case DYNPM_ACTION_DEFAULT: 268 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 269 rdev->pm.requested_clock_mode_index = 0; 270 rdev->pm.dynpm_can_upclock = false; 271 break; 272 case DYNPM_ACTION_NONE: 273 default: 274 DRM_ERROR("Requested mode for not defined action\n"); 275 return; 276 } 277 } 278 279 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", 280 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 281 clock_info[rdev->pm.requested_clock_mode_index].sclk, 282 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 283 clock_info[rdev->pm.requested_clock_mode_index].mclk, 284 rdev->pm.power_state[rdev->pm.requested_power_state_index]. 285 pcie_lanes); 286 } 287 288 void rs780_pm_init_profile(struct radeon_device *rdev) 289 { 290 if (rdev->pm.num_power_states == 2) { 291 /* default */ 292 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 293 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 294 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 295 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 296 /* low sh */ 297 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0; 298 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0; 299 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 300 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 301 /* mid sh */ 302 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0; 303 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0; 304 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 305 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 306 /* high sh */ 307 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0; 308 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 309 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 310 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 311 /* low mh */ 312 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0; 313 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 314 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 315 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 316 /* mid mh */ 317 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0; 318 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 319 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 320 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 321 /* high mh */ 322 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0; 323 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1; 324 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 325 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 326 } else if (rdev->pm.num_power_states == 3) { 327 /* default */ 328 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 329 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 330 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 331 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 332 /* low sh */ 333 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 334 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 335 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 336 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 337 /* mid sh */ 338 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 339 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 340 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 341 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 342 /* high sh */ 343 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 344 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2; 345 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 346 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 347 /* low mh */ 348 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1; 349 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1; 350 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 351 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 352 /* mid mh */ 353 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1; 354 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1; 355 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 356 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 357 /* high mh */ 358 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1; 359 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 360 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 361 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 362 } else { 363 /* default */ 364 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 365 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 366 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 367 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 368 /* low sh */ 369 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2; 370 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2; 371 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 372 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 373 /* mid sh */ 374 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2; 375 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2; 376 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 377 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 378 /* high sh */ 379 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2; 380 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3; 381 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 382 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 383 /* low mh */ 384 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 385 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0; 386 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 387 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 388 /* mid mh */ 389 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 390 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0; 391 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 392 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 393 /* high mh */ 394 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 395 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3; 396 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 397 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 398 } 399 } 400 401 void r600_pm_init_profile(struct radeon_device *rdev) 402 { 403 int idx; 404 405 if (rdev->family == CHIP_R600) { 406 /* XXX */ 407 /* default */ 408 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 409 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 410 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 411 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0; 412 /* low sh */ 413 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 414 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 415 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 416 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 417 /* mid sh */ 418 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 419 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 420 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 421 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0; 422 /* high sh */ 423 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 424 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 425 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 426 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0; 427 /* low mh */ 428 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 429 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 430 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 431 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 432 /* mid mh */ 433 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 434 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 435 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 436 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0; 437 /* high mh */ 438 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 439 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 440 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 441 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0; 442 } else { 443 if (rdev->pm.num_power_states < 4) { 444 /* default */ 445 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 446 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 447 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 448 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 449 /* low sh */ 450 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1; 451 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1; 452 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 453 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 454 /* mid sh */ 455 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1; 456 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1; 457 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 458 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 459 /* high sh */ 460 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1; 461 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1; 462 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 463 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 464 /* low mh */ 465 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2; 466 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2; 467 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 468 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 469 /* low mh */ 470 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2; 471 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2; 472 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 473 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 474 /* high mh */ 475 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2; 476 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2; 477 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 478 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 479 } else { 480 /* default */ 481 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index; 482 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index; 483 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 484 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 485 /* low sh */ 486 if (rdev->flags & RADEON_IS_MOBILITY) 487 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 488 else 489 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 490 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx; 491 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx; 492 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 494 /* mid sh */ 495 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx; 496 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx; 497 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0; 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1; 499 /* high sh */ 500 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 501 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx; 502 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx; 503 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 505 /* low mh */ 506 if (rdev->flags & RADEON_IS_MOBILITY) 507 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 508 else 509 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 510 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx; 511 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx; 512 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 514 /* mid mh */ 515 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx; 516 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx; 517 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0; 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1; 519 /* high mh */ 520 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 521 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx; 522 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx; 523 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 525 } 526 } 527 } 528 529 void r600_pm_misc(struct radeon_device *rdev) 530 { 531 int req_ps_idx = rdev->pm.requested_power_state_index; 532 int req_cm_idx = rdev->pm.requested_clock_mode_index; 533 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx]; 534 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 535 536 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 537 /* 0xff01 is a flag rather then an actual voltage */ 538 if (voltage->voltage == 0xff01) 539 return; 540 if (voltage->voltage != rdev->pm.current_vddc) { 541 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 542 rdev->pm.current_vddc = voltage->voltage; 543 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); 544 } 545 } 546 } 547 548 bool r600_gui_idle(struct radeon_device *rdev) 549 { 550 if (RREG32(GRBM_STATUS) & GUI_ACTIVE) 551 return false; 552 else 553 return true; 554 } 555 556 /* hpd for digital panel detect/disconnect */ 557 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 558 { 559 bool connected = false; 560 561 if (ASIC_IS_DCE3(rdev)) { 562 switch (hpd) { 563 case RADEON_HPD_1: 564 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) 565 connected = true; 566 break; 567 case RADEON_HPD_2: 568 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) 569 connected = true; 570 break; 571 case RADEON_HPD_3: 572 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) 573 connected = true; 574 break; 575 case RADEON_HPD_4: 576 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) 577 connected = true; 578 break; 579 /* DCE 3.2 */ 580 case RADEON_HPD_5: 581 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) 582 connected = true; 583 break; 584 case RADEON_HPD_6: 585 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) 586 connected = true; 587 break; 588 default: 589 break; 590 } 591 } else { 592 switch (hpd) { 593 case RADEON_HPD_1: 594 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 595 connected = true; 596 break; 597 case RADEON_HPD_2: 598 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 599 connected = true; 600 break; 601 case RADEON_HPD_3: 602 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) 603 connected = true; 604 break; 605 default: 606 break; 607 } 608 } 609 return connected; 610 } 611 612 void r600_hpd_set_polarity(struct radeon_device *rdev, 613 enum radeon_hpd_id hpd) 614 { 615 u32 tmp; 616 bool connected = r600_hpd_sense(rdev, hpd); 617 618 if (ASIC_IS_DCE3(rdev)) { 619 switch (hpd) { 620 case RADEON_HPD_1: 621 tmp = RREG32(DC_HPD1_INT_CONTROL); 622 if (connected) 623 tmp &= ~DC_HPDx_INT_POLARITY; 624 else 625 tmp |= DC_HPDx_INT_POLARITY; 626 WREG32(DC_HPD1_INT_CONTROL, tmp); 627 break; 628 case RADEON_HPD_2: 629 tmp = RREG32(DC_HPD2_INT_CONTROL); 630 if (connected) 631 tmp &= ~DC_HPDx_INT_POLARITY; 632 else 633 tmp |= DC_HPDx_INT_POLARITY; 634 WREG32(DC_HPD2_INT_CONTROL, tmp); 635 break; 636 case RADEON_HPD_3: 637 tmp = RREG32(DC_HPD3_INT_CONTROL); 638 if (connected) 639 tmp &= ~DC_HPDx_INT_POLARITY; 640 else 641 tmp |= DC_HPDx_INT_POLARITY; 642 WREG32(DC_HPD3_INT_CONTROL, tmp); 643 break; 644 case RADEON_HPD_4: 645 tmp = RREG32(DC_HPD4_INT_CONTROL); 646 if (connected) 647 tmp &= ~DC_HPDx_INT_POLARITY; 648 else 649 tmp |= DC_HPDx_INT_POLARITY; 650 WREG32(DC_HPD4_INT_CONTROL, tmp); 651 break; 652 case RADEON_HPD_5: 653 tmp = RREG32(DC_HPD5_INT_CONTROL); 654 if (connected) 655 tmp &= ~DC_HPDx_INT_POLARITY; 656 else 657 tmp |= DC_HPDx_INT_POLARITY; 658 WREG32(DC_HPD5_INT_CONTROL, tmp); 659 break; 660 /* DCE 3.2 */ 661 case RADEON_HPD_6: 662 tmp = RREG32(DC_HPD6_INT_CONTROL); 663 if (connected) 664 tmp &= ~DC_HPDx_INT_POLARITY; 665 else 666 tmp |= DC_HPDx_INT_POLARITY; 667 WREG32(DC_HPD6_INT_CONTROL, tmp); 668 break; 669 default: 670 break; 671 } 672 } else { 673 switch (hpd) { 674 case RADEON_HPD_1: 675 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 676 if (connected) 677 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 678 else 679 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 680 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 681 break; 682 case RADEON_HPD_2: 683 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 684 if (connected) 685 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 686 else 687 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 688 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 689 break; 690 case RADEON_HPD_3: 691 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 692 if (connected) 693 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; 694 else 695 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; 696 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 697 break; 698 default: 699 break; 700 } 701 } 702 } 703 704 void r600_hpd_init(struct radeon_device *rdev) 705 { 706 struct drm_device *dev = rdev->ddev; 707 struct drm_connector *connector; 708 unsigned enable = 0; 709 710 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 711 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 712 713 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 714 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 715 /* don't try to enable hpd on eDP or LVDS avoid breaking the 716 * aux dp channel on imac and help (but not completely fix) 717 * https://bugzilla.redhat.com/show_bug.cgi?id=726143 718 */ 719 continue; 720 } 721 if (ASIC_IS_DCE3(rdev)) { 722 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); 723 if (ASIC_IS_DCE32(rdev)) 724 tmp |= DC_HPDx_EN; 725 726 switch (radeon_connector->hpd.hpd) { 727 case RADEON_HPD_1: 728 WREG32(DC_HPD1_CONTROL, tmp); 729 break; 730 case RADEON_HPD_2: 731 WREG32(DC_HPD2_CONTROL, tmp); 732 break; 733 case RADEON_HPD_3: 734 WREG32(DC_HPD3_CONTROL, tmp); 735 break; 736 case RADEON_HPD_4: 737 WREG32(DC_HPD4_CONTROL, tmp); 738 break; 739 /* DCE 3.2 */ 740 case RADEON_HPD_5: 741 WREG32(DC_HPD5_CONTROL, tmp); 742 break; 743 case RADEON_HPD_6: 744 WREG32(DC_HPD6_CONTROL, tmp); 745 break; 746 default: 747 break; 748 } 749 } else { 750 switch (radeon_connector->hpd.hpd) { 751 case RADEON_HPD_1: 752 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); 753 break; 754 case RADEON_HPD_2: 755 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); 756 break; 757 case RADEON_HPD_3: 758 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); 759 break; 760 default: 761 break; 762 } 763 } 764 enable |= 1 << radeon_connector->hpd.hpd; 765 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 766 } 767 radeon_irq_kms_enable_hpd(rdev, enable); 768 } 769 770 void r600_hpd_fini(struct radeon_device *rdev) 771 { 772 struct drm_device *dev = rdev->ddev; 773 struct drm_connector *connector; 774 unsigned disable = 0; 775 776 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 777 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 778 if (ASIC_IS_DCE3(rdev)) { 779 switch (radeon_connector->hpd.hpd) { 780 case RADEON_HPD_1: 781 WREG32(DC_HPD1_CONTROL, 0); 782 break; 783 case RADEON_HPD_2: 784 WREG32(DC_HPD2_CONTROL, 0); 785 break; 786 case RADEON_HPD_3: 787 WREG32(DC_HPD3_CONTROL, 0); 788 break; 789 case RADEON_HPD_4: 790 WREG32(DC_HPD4_CONTROL, 0); 791 break; 792 /* DCE 3.2 */ 793 case RADEON_HPD_5: 794 WREG32(DC_HPD5_CONTROL, 0); 795 break; 796 case RADEON_HPD_6: 797 WREG32(DC_HPD6_CONTROL, 0); 798 break; 799 default: 800 break; 801 } 802 } else { 803 switch (radeon_connector->hpd.hpd) { 804 case RADEON_HPD_1: 805 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); 806 break; 807 case RADEON_HPD_2: 808 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); 809 break; 810 case RADEON_HPD_3: 811 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); 812 break; 813 default: 814 break; 815 } 816 } 817 disable |= 1 << radeon_connector->hpd.hpd; 818 } 819 radeon_irq_kms_disable_hpd(rdev, disable); 820 } 821 822 /* 823 * R600 PCIE GART 824 */ 825 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) 826 { 827 unsigned i; 828 u32 tmp; 829 830 /* flush hdp cache so updates hit vram */ 831 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 832 !(rdev->flags & RADEON_IS_AGP)) { 833 volatile uint32_t *ptr = rdev->gart.ptr; 834 u32 tmp; 835 836 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 837 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL 838 * This seems to cause problems on some AGP cards. Just use the old 839 * method for them. 840 */ 841 WREG32(HDP_DEBUG1, 0); 842 tmp = *ptr; 843 } else 844 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 845 846 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); 847 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); 848 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); 849 for (i = 0; i < rdev->usec_timeout; i++) { 850 /* read MC_STATUS */ 851 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE); 852 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT; 853 if (tmp == 2) { 854 printk(KERN_WARNING "[drm] r600 flush TLB failed\n"); 855 return; 856 } 857 if (tmp) { 858 return; 859 } 860 udelay(1); 861 } 862 } 863 864 int r600_pcie_gart_init(struct radeon_device *rdev) 865 { 866 int r; 867 868 if (rdev->gart.robj) { 869 WARN(1, "R600 PCIE GART already initialized\n"); 870 return 0; 871 } 872 /* Initialize common gart structure */ 873 r = radeon_gart_init(rdev); 874 if (r) 875 return r; 876 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; 877 return radeon_gart_table_vram_alloc(rdev); 878 } 879 880 static int r600_pcie_gart_enable(struct radeon_device *rdev) 881 { 882 u32 tmp; 883 int r, i; 884 885 if (rdev->gart.robj == NULL) { 886 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); 887 return -EINVAL; 888 } 889 r = radeon_gart_table_vram_pin(rdev); 890 if (r) 891 return r; 892 radeon_gart_restore(rdev); 893 894 /* Setup L2 cache */ 895 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 896 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 897 EFFECTIVE_L2_QUEUE_SIZE(7)); 898 WREG32(VM_L2_CNTL2, 0); 899 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 900 /* Setup TLB control */ 901 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 902 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 903 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 904 ENABLE_WAIT_L2_QUERY; 905 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 906 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 907 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 908 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 909 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 910 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 911 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 912 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 913 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 914 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 915 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 916 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 917 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 918 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 919 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); 920 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); 921 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); 922 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 923 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 924 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 925 (u32)(rdev->dummy_page.addr >> 12)); 926 for (i = 1; i < 7; i++) 927 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 928 929 r600_pcie_gart_tlb_flush(rdev); 930 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 931 (unsigned)(rdev->mc.gtt_size >> 20), 932 (unsigned long long)rdev->gart.table_addr); 933 rdev->gart.ready = true; 934 return 0; 935 } 936 937 static void r600_pcie_gart_disable(struct radeon_device *rdev) 938 { 939 u32 tmp; 940 int i; 941 942 /* Disable all tables */ 943 for (i = 0; i < 7; i++) 944 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 945 946 /* Disable L2 cache */ 947 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | 948 EFFECTIVE_L2_QUEUE_SIZE(7)); 949 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 950 /* Setup L1 TLB control */ 951 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 952 ENABLE_WAIT_L2_QUERY; 953 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 954 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 955 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 956 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 957 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 958 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 959 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 960 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 961 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp); 962 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp); 963 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 964 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 965 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 966 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 967 radeon_gart_table_vram_unpin(rdev); 968 } 969 970 static void r600_pcie_gart_fini(struct radeon_device *rdev) 971 { 972 radeon_gart_fini(rdev); 973 r600_pcie_gart_disable(rdev); 974 radeon_gart_table_vram_free(rdev); 975 } 976 977 static void r600_agp_enable(struct radeon_device *rdev) 978 { 979 u32 tmp; 980 int i; 981 982 /* Setup L2 cache */ 983 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | 984 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 985 EFFECTIVE_L2_QUEUE_SIZE(7)); 986 WREG32(VM_L2_CNTL2, 0); 987 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); 988 /* Setup TLB control */ 989 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | 990 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 991 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | 992 ENABLE_WAIT_L2_QUERY; 993 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); 994 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); 995 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); 996 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 997 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); 998 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); 999 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); 1000 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); 1001 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); 1002 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); 1003 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); 1004 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); 1005 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1006 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); 1007 for (i = 0; i < 7; i++) 1008 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); 1009 } 1010 1011 int r600_mc_wait_for_idle(struct radeon_device *rdev) 1012 { 1013 unsigned i; 1014 u32 tmp; 1015 1016 for (i = 0; i < rdev->usec_timeout; i++) { 1017 /* read MC_STATUS */ 1018 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00; 1019 if (!tmp) 1020 return 0; 1021 udelay(1); 1022 } 1023 return -1; 1024 } 1025 1026 static void r600_mc_program(struct radeon_device *rdev) 1027 { 1028 struct rv515_mc_save save; 1029 u32 tmp; 1030 int i, j; 1031 1032 /* Initialize HDP */ 1033 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1034 WREG32((0x2c14 + j), 0x00000000); 1035 WREG32((0x2c18 + j), 0x00000000); 1036 WREG32((0x2c1c + j), 0x00000000); 1037 WREG32((0x2c20 + j), 0x00000000); 1038 WREG32((0x2c24 + j), 0x00000000); 1039 } 1040 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 1041 1042 rv515_mc_stop(rdev, &save); 1043 if (r600_mc_wait_for_idle(rdev)) { 1044 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1045 } 1046 /* Lockout access through VGA aperture (doesn't exist before R600) */ 1047 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 1048 /* Update configuration */ 1049 if (rdev->flags & RADEON_IS_AGP) { 1050 if (rdev->mc.vram_start < rdev->mc.gtt_start) { 1051 /* VRAM before AGP */ 1052 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1053 rdev->mc.vram_start >> 12); 1054 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1055 rdev->mc.gtt_end >> 12); 1056 } else { 1057 /* VRAM after AGP */ 1058 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 1059 rdev->mc.gtt_start >> 12); 1060 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1061 rdev->mc.vram_end >> 12); 1062 } 1063 } else { 1064 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); 1065 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); 1066 } 1067 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12); 1068 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; 1069 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); 1070 WREG32(MC_VM_FB_LOCATION, tmp); 1071 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); 1072 WREG32(HDP_NONSURFACE_INFO, (2 << 7)); 1073 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 1074 if (rdev->flags & RADEON_IS_AGP) { 1075 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); 1076 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); 1077 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); 1078 } else { 1079 WREG32(MC_VM_AGP_BASE, 0); 1080 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 1081 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 1082 } 1083 if (r600_mc_wait_for_idle(rdev)) { 1084 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1085 } 1086 rv515_mc_resume(rdev, &save); 1087 /* we need to own VRAM, so turn off the VGA renderer here 1088 * to stop it overwriting our objects */ 1089 rv515_vga_render_disable(rdev); 1090 } 1091 1092 /** 1093 * r600_vram_gtt_location - try to find VRAM & GTT location 1094 * @rdev: radeon device structure holding all necessary informations 1095 * @mc: memory controller structure holding memory informations 1096 * 1097 * Function will place try to place VRAM at same place as in CPU (PCI) 1098 * address space as some GPU seems to have issue when we reprogram at 1099 * different address space. 1100 * 1101 * If there is not enough space to fit the unvisible VRAM after the 1102 * aperture then we limit the VRAM size to the aperture. 1103 * 1104 * If we are using AGP then place VRAM adjacent to AGP aperture are we need 1105 * them to be in one from GPU point of view so that we can program GPU to 1106 * catch access outside them (weird GPU policy see ??). 1107 * 1108 * This function will never fails, worst case are limiting VRAM or GTT. 1109 * 1110 * Note: GTT start, end, size should be initialized before calling this 1111 * function on AGP platform. 1112 */ 1113 static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 1114 { 1115 u64 size_bf, size_af; 1116 1117 if (mc->mc_vram_size > 0xE0000000) { 1118 /* leave room for at least 512M GTT */ 1119 dev_warn(rdev->dev, "limiting VRAM\n"); 1120 mc->real_vram_size = 0xE0000000; 1121 mc->mc_vram_size = 0xE0000000; 1122 } 1123 if (rdev->flags & RADEON_IS_AGP) { 1124 size_bf = mc->gtt_start; 1125 size_af = 0xFFFFFFFF - mc->gtt_end; 1126 if (size_bf > size_af) { 1127 if (mc->mc_vram_size > size_bf) { 1128 dev_warn(rdev->dev, "limiting VRAM\n"); 1129 mc->real_vram_size = size_bf; 1130 mc->mc_vram_size = size_bf; 1131 } 1132 mc->vram_start = mc->gtt_start - mc->mc_vram_size; 1133 } else { 1134 if (mc->mc_vram_size > size_af) { 1135 dev_warn(rdev->dev, "limiting VRAM\n"); 1136 mc->real_vram_size = size_af; 1137 mc->mc_vram_size = size_af; 1138 } 1139 mc->vram_start = mc->gtt_end + 1; 1140 } 1141 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 1142 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 1143 mc->mc_vram_size >> 20, mc->vram_start, 1144 mc->vram_end, mc->real_vram_size >> 20); 1145 } else { 1146 u64 base = 0; 1147 if (rdev->flags & RADEON_IS_IGP) { 1148 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; 1149 base <<= 24; 1150 } 1151 radeon_vram_location(rdev, &rdev->mc, base); 1152 rdev->mc.gtt_base_align = 0; 1153 radeon_gtt_location(rdev, mc); 1154 } 1155 } 1156 1157 static int r600_mc_init(struct radeon_device *rdev) 1158 { 1159 u32 tmp; 1160 int chansize, numchan; 1161 1162 /* Get VRAM informations */ 1163 rdev->mc.vram_is_ddr = true; 1164 tmp = RREG32(RAMCFG); 1165 if (tmp & CHANSIZE_OVERRIDE) { 1166 chansize = 16; 1167 } else if (tmp & CHANSIZE_MASK) { 1168 chansize = 64; 1169 } else { 1170 chansize = 32; 1171 } 1172 tmp = RREG32(CHMAP); 1173 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 1174 case 0: 1175 default: 1176 numchan = 1; 1177 break; 1178 case 1: 1179 numchan = 2; 1180 break; 1181 case 2: 1182 numchan = 4; 1183 break; 1184 case 3: 1185 numchan = 8; 1186 break; 1187 } 1188 rdev->mc.vram_width = numchan * chansize; 1189 /* Could aper size report 0 ? */ 1190 rdev->mc.aper_base = rdev->fb_aper_offset; 1191 rdev->mc.aper_size = rdev->fb_aper_size; 1192 /* Setup GPU memory space */ 1193 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1195 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1196 r600_vram_gtt_location(rdev, &rdev->mc); 1197 1198 if (rdev->flags & RADEON_IS_IGP) { 1199 rs690_pm_info(rdev); 1200 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 1201 } 1202 radeon_update_bandwidth_info(rdev); 1203 return 0; 1204 } 1205 1206 int r600_vram_scratch_init(struct radeon_device *rdev) 1207 { 1208 int r; 1209 1210 if (rdev->vram_scratch.robj == NULL) { 1211 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, 1212 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 1213 NULL, &rdev->vram_scratch.robj); 1214 if (r) { 1215 return r; 1216 } 1217 } 1218 1219 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1220 if (unlikely(r != 0)) 1221 return r; 1222 r = radeon_bo_pin(rdev->vram_scratch.robj, 1223 RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr); 1224 if (r) { 1225 radeon_bo_unreserve(rdev->vram_scratch.robj); 1226 return r; 1227 } 1228 r = radeon_bo_kmap(rdev->vram_scratch.robj, 1229 (void **)&rdev->vram_scratch.ptr); 1230 if (r) 1231 radeon_bo_unpin(rdev->vram_scratch.robj); 1232 radeon_bo_unreserve(rdev->vram_scratch.robj); 1233 1234 return r; 1235 } 1236 1237 void r600_vram_scratch_fini(struct radeon_device *rdev) 1238 { 1239 int r; 1240 1241 if (rdev->vram_scratch.robj == NULL) { 1242 return; 1243 } 1244 r = radeon_bo_reserve(rdev->vram_scratch.robj, false); 1245 if (likely(r == 0)) { 1246 radeon_bo_kunmap(rdev->vram_scratch.robj); 1247 radeon_bo_unpin(rdev->vram_scratch.robj); 1248 radeon_bo_unreserve(rdev->vram_scratch.robj); 1249 } 1250 radeon_bo_unref(&rdev->vram_scratch.robj); 1251 } 1252 1253 /* We doesn't check that the GPU really needs a reset we simply do the 1254 * reset, it's up to the caller to determine if the GPU needs one. We 1255 * might add an helper function to check that. 1256 */ 1257 static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev) 1258 { 1259 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1260 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | 1261 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | 1262 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) | 1263 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) | 1264 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) | 1265 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) | 1266 S_008010_GUI_ACTIVE(1); 1267 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) | 1268 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) | 1269 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) | 1270 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) | 1271 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) | 1272 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) | 1273 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | 1274 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); 1275 u32 tmp; 1276 1277 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1278 return; 1279 1280 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1281 RREG32(R_008010_GRBM_STATUS)); 1282 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1283 RREG32(R_008014_GRBM_STATUS2)); 1284 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1285 RREG32(R_000E50_SRBM_STATUS)); 1286 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1287 RREG32(CP_STALLED_STAT1)); 1288 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1289 RREG32(CP_STALLED_STAT2)); 1290 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1291 RREG32(CP_BUSY_STAT)); 1292 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1293 RREG32(CP_STAT)); 1294 1295 /* Disable CP parsing/prefetching */ 1296 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1297 1298 /* Check if any of the rendering block is busy and reset it */ 1299 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 1300 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 1301 tmp = S_008020_SOFT_RESET_CR(1) | 1302 S_008020_SOFT_RESET_DB(1) | 1303 S_008020_SOFT_RESET_CB(1) | 1304 S_008020_SOFT_RESET_PA(1) | 1305 S_008020_SOFT_RESET_SC(1) | 1306 S_008020_SOFT_RESET_SMX(1) | 1307 S_008020_SOFT_RESET_SPI(1) | 1308 S_008020_SOFT_RESET_SX(1) | 1309 S_008020_SOFT_RESET_SH(1) | 1310 S_008020_SOFT_RESET_TC(1) | 1311 S_008020_SOFT_RESET_TA(1) | 1312 S_008020_SOFT_RESET_VC(1) | 1313 S_008020_SOFT_RESET_VGT(1); 1314 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1315 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1316 RREG32(R_008020_GRBM_SOFT_RESET); 1317 mdelay(15); 1318 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1319 } 1320 /* Reset CP (we always reset CP) */ 1321 tmp = S_008020_SOFT_RESET_CP(1); 1322 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); 1323 WREG32(R_008020_GRBM_SOFT_RESET, tmp); 1324 RREG32(R_008020_GRBM_SOFT_RESET); 1325 mdelay(15); 1326 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1327 1328 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1329 RREG32(R_008010_GRBM_STATUS)); 1330 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1331 RREG32(R_008014_GRBM_STATUS2)); 1332 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1333 RREG32(R_000E50_SRBM_STATUS)); 1334 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1335 RREG32(CP_STALLED_STAT1)); 1336 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1337 RREG32(CP_STALLED_STAT2)); 1338 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1339 RREG32(CP_BUSY_STAT)); 1340 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1341 RREG32(CP_STAT)); 1342 1343 } 1344 1345 static void r600_gpu_soft_reset_dma(struct radeon_device *rdev) 1346 { 1347 u32 tmp; 1348 1349 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1350 return; 1351 1352 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1353 RREG32(DMA_STATUS_REG)); 1354 1355 /* Disable DMA */ 1356 tmp = RREG32(DMA_RB_CNTL); 1357 tmp &= ~DMA_RB_ENABLE; 1358 WREG32(DMA_RB_CNTL, tmp); 1359 1360 /* Reset dma */ 1361 if (rdev->family >= CHIP_RV770) 1362 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); 1363 else 1364 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 1365 RREG32(SRBM_SOFT_RESET); 1366 udelay(50); 1367 WREG32(SRBM_SOFT_RESET, 0); 1368 1369 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1370 RREG32(DMA_STATUS_REG)); 1371 } 1372 1373 static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1374 { 1375 struct rv515_mc_save save; 1376 1377 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1378 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE); 1379 1380 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1381 reset_mask &= ~RADEON_RESET_DMA; 1382 1383 if (reset_mask == 0) 1384 return 0; 1385 1386 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1387 1388 rv515_mc_stop(rdev, &save); 1389 if (r600_mc_wait_for_idle(rdev)) { 1390 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1391 } 1392 1393 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 1394 r600_gpu_soft_reset_gfx(rdev); 1395 1396 if (reset_mask & RADEON_RESET_DMA) 1397 r600_gpu_soft_reset_dma(rdev); 1398 1399 /* Wait a little for things to settle down */ 1400 mdelay(1); 1401 1402 rv515_mc_resume(rdev, &save); 1403 return 0; 1404 } 1405 1406 bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1407 { 1408 u32 srbm_status; 1409 u32 grbm_status; 1410 u32 grbm_status2; 1411 1412 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1413 grbm_status = RREG32(R_008010_GRBM_STATUS); 1414 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1415 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1416 radeon_ring_lockup_update(ring); 1417 return false; 1418 } 1419 /* force CP activities */ 1420 radeon_ring_force_activity(rdev, ring); 1421 return radeon_ring_test_lockup(rdev, ring); 1422 } 1423 1424 /** 1425 * r600_dma_is_lockup - Check if the DMA engine is locked up 1426 * 1427 * @rdev: radeon_device pointer 1428 * @ring: radeon_ring structure holding ring information 1429 * 1430 * Check if the async DMA engine is locked up (r6xx-evergreen). 1431 * Returns true if the engine appears to be locked up, false if not. 1432 */ 1433 bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1434 { 1435 u32 dma_status_reg; 1436 1437 dma_status_reg = RREG32(DMA_STATUS_REG); 1438 if (dma_status_reg & DMA_IDLE) { 1439 radeon_ring_lockup_update(ring); 1440 return false; 1441 } 1442 /* force ring activities */ 1443 radeon_ring_force_activity(rdev, ring); 1444 return radeon_ring_test_lockup(rdev, ring); 1445 } 1446 1447 int r600_asic_reset(struct radeon_device *rdev) 1448 { 1449 return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 1450 RADEON_RESET_COMPUTE | 1451 RADEON_RESET_DMA)); 1452 } 1453 1454 u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1455 u32 tiling_pipe_num, 1456 u32 max_rb_num, 1457 u32 total_max_rb_num, 1458 u32 disabled_rb_mask) 1459 { 1460 u32 rendering_pipe_num, rb_num_width, req_rb_num; 1461 u32 pipe_rb_ratio, pipe_rb_remain, tmp; 1462 u32 data = 0, mask = 1 << (max_rb_num - 1); 1463 unsigned i, j; 1464 1465 /* mask out the RBs that don't exist on that asic */ 1466 tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff); 1467 /* make sure at least one RB is available */ 1468 if ((tmp & 0xff) != 0xff) 1469 disabled_rb_mask = tmp; 1470 1471 rendering_pipe_num = 1 << tiling_pipe_num; 1472 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); 1473 BUG_ON(rendering_pipe_num < req_rb_num); 1474 1475 pipe_rb_ratio = rendering_pipe_num / req_rb_num; 1476 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; 1477 1478 if (rdev->family <= CHIP_RV740) { 1479 /* r6xx/r7xx */ 1480 rb_num_width = 2; 1481 } else { 1482 /* eg+ */ 1483 rb_num_width = 4; 1484 } 1485 1486 for (i = 0; i < max_rb_num; i++) { 1487 if (!(mask & disabled_rb_mask)) { 1488 for (j = 0; j < pipe_rb_ratio; j++) { 1489 data <<= rb_num_width; 1490 data |= max_rb_num - i - 1; 1491 } 1492 if (pipe_rb_remain) { 1493 data <<= rb_num_width; 1494 data |= max_rb_num - i - 1; 1495 pipe_rb_remain--; 1496 } 1497 } 1498 mask >>= 1; 1499 } 1500 1501 return data; 1502 } 1503 1504 int r600_count_pipe_bits(uint32_t val) 1505 { 1506 return hweight32(val); 1507 } 1508 1509 static void r600_gpu_init(struct radeon_device *rdev) 1510 { 1511 u32 tiling_config; 1512 u32 ramcfg; 1513 u32 cc_rb_backend_disable; 1514 u32 cc_gc_shader_pipe_config; 1515 u32 tmp; 1516 int i, j; 1517 u32 sq_config; 1518 u32 sq_gpr_resource_mgmt_1 = 0; 1519 u32 sq_gpr_resource_mgmt_2 = 0; 1520 u32 sq_thread_resource_mgmt = 0; 1521 u32 sq_stack_resource_mgmt_1 = 0; 1522 u32 sq_stack_resource_mgmt_2 = 0; 1523 u32 disabled_rb_mask; 1524 1525 rdev->config.r600.tiling_group_size = 256; 1526 switch (rdev->family) { 1527 case CHIP_R600: 1528 rdev->config.r600.max_pipes = 4; 1529 rdev->config.r600.max_tile_pipes = 8; 1530 rdev->config.r600.max_simds = 4; 1531 rdev->config.r600.max_backends = 4; 1532 rdev->config.r600.max_gprs = 256; 1533 rdev->config.r600.max_threads = 192; 1534 rdev->config.r600.max_stack_entries = 256; 1535 rdev->config.r600.max_hw_contexts = 8; 1536 rdev->config.r600.max_gs_threads = 16; 1537 rdev->config.r600.sx_max_export_size = 128; 1538 rdev->config.r600.sx_max_export_pos_size = 16; 1539 rdev->config.r600.sx_max_export_smx_size = 128; 1540 rdev->config.r600.sq_num_cf_insts = 2; 1541 break; 1542 case CHIP_RV630: 1543 case CHIP_RV635: 1544 rdev->config.r600.max_pipes = 2; 1545 rdev->config.r600.max_tile_pipes = 2; 1546 rdev->config.r600.max_simds = 3; 1547 rdev->config.r600.max_backends = 1; 1548 rdev->config.r600.max_gprs = 128; 1549 rdev->config.r600.max_threads = 192; 1550 rdev->config.r600.max_stack_entries = 128; 1551 rdev->config.r600.max_hw_contexts = 8; 1552 rdev->config.r600.max_gs_threads = 4; 1553 rdev->config.r600.sx_max_export_size = 128; 1554 rdev->config.r600.sx_max_export_pos_size = 16; 1555 rdev->config.r600.sx_max_export_smx_size = 128; 1556 rdev->config.r600.sq_num_cf_insts = 2; 1557 break; 1558 case CHIP_RV610: 1559 case CHIP_RV620: 1560 case CHIP_RS780: 1561 case CHIP_RS880: 1562 rdev->config.r600.max_pipes = 1; 1563 rdev->config.r600.max_tile_pipes = 1; 1564 rdev->config.r600.max_simds = 2; 1565 rdev->config.r600.max_backends = 1; 1566 rdev->config.r600.max_gprs = 128; 1567 rdev->config.r600.max_threads = 192; 1568 rdev->config.r600.max_stack_entries = 128; 1569 rdev->config.r600.max_hw_contexts = 4; 1570 rdev->config.r600.max_gs_threads = 4; 1571 rdev->config.r600.sx_max_export_size = 128; 1572 rdev->config.r600.sx_max_export_pos_size = 16; 1573 rdev->config.r600.sx_max_export_smx_size = 128; 1574 rdev->config.r600.sq_num_cf_insts = 1; 1575 break; 1576 case CHIP_RV670: 1577 rdev->config.r600.max_pipes = 4; 1578 rdev->config.r600.max_tile_pipes = 4; 1579 rdev->config.r600.max_simds = 4; 1580 rdev->config.r600.max_backends = 4; 1581 rdev->config.r600.max_gprs = 192; 1582 rdev->config.r600.max_threads = 192; 1583 rdev->config.r600.max_stack_entries = 256; 1584 rdev->config.r600.max_hw_contexts = 8; 1585 rdev->config.r600.max_gs_threads = 16; 1586 rdev->config.r600.sx_max_export_size = 128; 1587 rdev->config.r600.sx_max_export_pos_size = 16; 1588 rdev->config.r600.sx_max_export_smx_size = 128; 1589 rdev->config.r600.sq_num_cf_insts = 2; 1590 break; 1591 default: 1592 break; 1593 } 1594 1595 /* Initialize HDP */ 1596 for (i = 0, j = 0; i < 32; i++, j += 0x18) { 1597 WREG32((0x2c14 + j), 0x00000000); 1598 WREG32((0x2c18 + j), 0x00000000); 1599 WREG32((0x2c1c + j), 0x00000000); 1600 WREG32((0x2c20 + j), 0x00000000); 1601 WREG32((0x2c24 + j), 0x00000000); 1602 } 1603 1604 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); 1605 1606 /* Setup tiling */ 1607 tiling_config = 0; 1608 ramcfg = RREG32(RAMCFG); 1609 switch (rdev->config.r600.max_tile_pipes) { 1610 case 1: 1611 tiling_config |= PIPE_TILING(0); 1612 break; 1613 case 2: 1614 tiling_config |= PIPE_TILING(1); 1615 break; 1616 case 4: 1617 tiling_config |= PIPE_TILING(2); 1618 break; 1619 case 8: 1620 tiling_config |= PIPE_TILING(3); 1621 break; 1622 default: 1623 break; 1624 } 1625 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; 1626 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1627 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1628 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1629 1630 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1631 if (tmp > 3) { 1632 tiling_config |= ROW_TILING(3); 1633 tiling_config |= SAMPLE_SPLIT(3); 1634 } else { 1635 tiling_config |= ROW_TILING(tmp); 1636 tiling_config |= SAMPLE_SPLIT(tmp); 1637 } 1638 tiling_config |= BANK_SWAPS(1); 1639 1640 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1641 tmp = R6XX_MAX_BACKENDS - 1642 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); 1643 if (tmp < rdev->config.r600.max_backends) { 1644 rdev->config.r600.max_backends = tmp; 1645 } 1646 1647 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 1648 tmp = R6XX_MAX_PIPES - 1649 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); 1650 if (tmp < rdev->config.r600.max_pipes) { 1651 rdev->config.r600.max_pipes = tmp; 1652 } 1653 tmp = R6XX_MAX_SIMDS - 1654 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1655 if (tmp < rdev->config.r600.max_simds) { 1656 rdev->config.r600.max_simds = tmp; 1657 } 1658 1659 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1660 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1661 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 1662 R6XX_MAX_BACKENDS, disabled_rb_mask); 1663 tiling_config |= tmp << 16; 1664 rdev->config.r600.backend_map = tmp; 1665 1666 rdev->config.r600.tile_config = tiling_config; 1667 WREG32(GB_TILING_CONFIG, tiling_config); 1668 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1669 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1670 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff); 1671 1672 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1673 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1674 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1675 1676 /* Setup some CP states */ 1677 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b))); 1678 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40))); 1679 1680 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT | 1681 SYNC_WALKER | SYNC_ALIGNER)); 1682 /* Setup various GPU states */ 1683 if (rdev->family == CHIP_RV670) 1684 WREG32(ARB_GDEC_RD_CNTL, 0x00000021); 1685 1686 tmp = RREG32(SX_DEBUG_1); 1687 tmp |= SMX_EVENT_RELEASE; 1688 if ((rdev->family > CHIP_R600)) 1689 tmp |= ENABLE_NEW_SMX_ADDRESS; 1690 WREG32(SX_DEBUG_1, tmp); 1691 1692 if (((rdev->family) == CHIP_R600) || 1693 ((rdev->family) == CHIP_RV630) || 1694 ((rdev->family) == CHIP_RV610) || 1695 ((rdev->family) == CHIP_RV620) || 1696 ((rdev->family) == CHIP_RS780) || 1697 ((rdev->family) == CHIP_RS880)) { 1698 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 1699 } else { 1700 WREG32(DB_DEBUG, 0); 1701 } 1702 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) | 1703 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4))); 1704 1705 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 1706 WREG32(VGT_NUM_INSTANCES, 0); 1707 1708 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); 1709 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0)); 1710 1711 tmp = RREG32(SQ_MS_FIFO_SIZES); 1712 if (((rdev->family) == CHIP_RV610) || 1713 ((rdev->family) == CHIP_RV620) || 1714 ((rdev->family) == CHIP_RS780) || 1715 ((rdev->family) == CHIP_RS880)) { 1716 tmp = (CACHE_FIFO_SIZE(0xa) | 1717 FETCH_FIFO_HIWATER(0xa) | 1718 DONE_FIFO_HIWATER(0xe0) | 1719 ALU_UPDATE_FIFO_HIWATER(0x8)); 1720 } else if (((rdev->family) == CHIP_R600) || 1721 ((rdev->family) == CHIP_RV630)) { 1722 tmp &= ~DONE_FIFO_HIWATER(0xff); 1723 tmp |= DONE_FIFO_HIWATER(0x4); 1724 } 1725 WREG32(SQ_MS_FIFO_SIZES, tmp); 1726 1727 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT 1728 * should be adjusted as needed by the 2D/3D drivers. This just sets default values 1729 */ 1730 sq_config = RREG32(SQ_CONFIG); 1731 sq_config &= ~(PS_PRIO(3) | 1732 VS_PRIO(3) | 1733 GS_PRIO(3) | 1734 ES_PRIO(3)); 1735 sq_config |= (DX9_CONSTS | 1736 VC_ENABLE | 1737 PS_PRIO(0) | 1738 VS_PRIO(1) | 1739 GS_PRIO(2) | 1740 ES_PRIO(3)); 1741 1742 if ((rdev->family) == CHIP_R600) { 1743 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) | 1744 NUM_VS_GPRS(124) | 1745 NUM_CLAUSE_TEMP_GPRS(4)); 1746 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) | 1747 NUM_ES_GPRS(0)); 1748 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) | 1749 NUM_VS_THREADS(48) | 1750 NUM_GS_THREADS(4) | 1751 NUM_ES_THREADS(4)); 1752 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) | 1753 NUM_VS_STACK_ENTRIES(128)); 1754 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) | 1755 NUM_ES_STACK_ENTRIES(0)); 1756 } else if (((rdev->family) == CHIP_RV610) || 1757 ((rdev->family) == CHIP_RV620) || 1758 ((rdev->family) == CHIP_RS780) || 1759 ((rdev->family) == CHIP_RS880)) { 1760 /* no vertex cache */ 1761 sq_config &= ~VC_ENABLE; 1762 1763 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1764 NUM_VS_GPRS(44) | 1765 NUM_CLAUSE_TEMP_GPRS(2)); 1766 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 1767 NUM_ES_GPRS(17)); 1768 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1769 NUM_VS_THREADS(78) | 1770 NUM_GS_THREADS(4) | 1771 NUM_ES_THREADS(31)); 1772 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 1773 NUM_VS_STACK_ENTRIES(40)); 1774 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 1775 NUM_ES_STACK_ENTRIES(16)); 1776 } else if (((rdev->family) == CHIP_RV630) || 1777 ((rdev->family) == CHIP_RV635)) { 1778 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1779 NUM_VS_GPRS(44) | 1780 NUM_CLAUSE_TEMP_GPRS(2)); 1781 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) | 1782 NUM_ES_GPRS(18)); 1783 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1784 NUM_VS_THREADS(78) | 1785 NUM_GS_THREADS(4) | 1786 NUM_ES_THREADS(31)); 1787 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) | 1788 NUM_VS_STACK_ENTRIES(40)); 1789 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) | 1790 NUM_ES_STACK_ENTRIES(16)); 1791 } else if ((rdev->family) == CHIP_RV670) { 1792 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) | 1793 NUM_VS_GPRS(44) | 1794 NUM_CLAUSE_TEMP_GPRS(2)); 1795 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) | 1796 NUM_ES_GPRS(17)); 1797 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) | 1798 NUM_VS_THREADS(78) | 1799 NUM_GS_THREADS(4) | 1800 NUM_ES_THREADS(31)); 1801 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) | 1802 NUM_VS_STACK_ENTRIES(64)); 1803 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) | 1804 NUM_ES_STACK_ENTRIES(64)); 1805 } 1806 1807 WREG32(SQ_CONFIG, sq_config); 1808 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1); 1809 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2); 1810 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt); 1811 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1); 1812 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2); 1813 1814 if (((rdev->family) == CHIP_RV610) || 1815 ((rdev->family) == CHIP_RV620) || 1816 ((rdev->family) == CHIP_RS780) || 1817 ((rdev->family) == CHIP_RS880)) { 1818 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 1819 } else { 1820 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 1821 } 1822 1823 /* More default values. 2D/3D driver should adjust as needed */ 1824 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) | 1825 S1_X(0x4) | S1_Y(0xc))); 1826 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) | 1827 S1_X(0x2) | S1_Y(0x2) | 1828 S2_X(0xa) | S2_Y(0x6) | 1829 S3_X(0x6) | S3_Y(0xa))); 1830 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) | 1831 S1_X(0x4) | S1_Y(0xc) | 1832 S2_X(0x1) | S2_Y(0x6) | 1833 S3_X(0xa) | S3_Y(0xe))); 1834 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) | 1835 S5_X(0x0) | S5_Y(0x0) | 1836 S6_X(0xb) | S6_Y(0x4) | 1837 S7_X(0x7) | S7_Y(0x8))); 1838 1839 WREG32(VGT_STRMOUT_EN, 0); 1840 tmp = rdev->config.r600.max_pipes * 16; 1841 switch (rdev->family) { 1842 case CHIP_RV610: 1843 case CHIP_RV620: 1844 case CHIP_RS780: 1845 case CHIP_RS880: 1846 tmp += 32; 1847 break; 1848 case CHIP_RV670: 1849 tmp += 128; 1850 break; 1851 default: 1852 break; 1853 } 1854 if (tmp > 256) { 1855 tmp = 256; 1856 } 1857 WREG32(VGT_ES_PER_GS, 128); 1858 WREG32(VGT_GS_PER_ES, tmp); 1859 WREG32(VGT_GS_PER_VS, 2); 1860 WREG32(VGT_GS_VERTEX_REUSE, 16); 1861 1862 /* more default values. 2D/3D driver should adjust as needed */ 1863 WREG32(PA_SC_LINE_STIPPLE_STATE, 0); 1864 WREG32(VGT_STRMOUT_EN, 0); 1865 WREG32(SX_MISC, 0); 1866 WREG32(PA_SC_MODE_CNTL, 0); 1867 WREG32(PA_SC_AA_CONFIG, 0); 1868 WREG32(PA_SC_LINE_STIPPLE, 0); 1869 WREG32(SPI_INPUT_Z, 0); 1870 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2)); 1871 WREG32(CB_COLOR7_FRAG, 0); 1872 1873 /* Clear render buffer base addresses */ 1874 WREG32(CB_COLOR0_BASE, 0); 1875 WREG32(CB_COLOR1_BASE, 0); 1876 WREG32(CB_COLOR2_BASE, 0); 1877 WREG32(CB_COLOR3_BASE, 0); 1878 WREG32(CB_COLOR4_BASE, 0); 1879 WREG32(CB_COLOR5_BASE, 0); 1880 WREG32(CB_COLOR6_BASE, 0); 1881 WREG32(CB_COLOR7_BASE, 0); 1882 WREG32(CB_COLOR7_FRAG, 0); 1883 1884 switch (rdev->family) { 1885 case CHIP_RV610: 1886 case CHIP_RV620: 1887 case CHIP_RS780: 1888 case CHIP_RS880: 1889 tmp = TC_L2_SIZE(8); 1890 break; 1891 case CHIP_RV630: 1892 case CHIP_RV635: 1893 tmp = TC_L2_SIZE(4); 1894 break; 1895 case CHIP_R600: 1896 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT; 1897 break; 1898 default: 1899 tmp = TC_L2_SIZE(0); 1900 break; 1901 } 1902 WREG32(TC_CNTL, tmp); 1903 1904 tmp = RREG32(HDP_HOST_PATH_CNTL); 1905 WREG32(HDP_HOST_PATH_CNTL, tmp); 1906 1907 tmp = RREG32(ARB_POP); 1908 tmp |= ENABLE_TC128; 1909 WREG32(ARB_POP, tmp); 1910 1911 WREG32(PA_SC_MULTI_CHIP_CNTL, 0); 1912 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 1913 NUM_CLIP_SEQ(3))); 1914 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 1915 WREG32(VC_ENHANCE, 0); 1916 } 1917 1918 1919 /* 1920 * Indirect registers accessor 1921 */ 1922 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg) 1923 { 1924 u32 r; 1925 1926 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 1927 (void)RREG32(PCIE_PORT_INDEX); 1928 r = RREG32(PCIE_PORT_DATA); 1929 return r; 1930 } 1931 1932 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) 1933 { 1934 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff)); 1935 (void)RREG32(PCIE_PORT_INDEX); 1936 WREG32(PCIE_PORT_DATA, (v)); 1937 (void)RREG32(PCIE_PORT_DATA); 1938 } 1939 1940 /* 1941 * CP & Ring 1942 */ 1943 void r600_cp_stop(struct radeon_device *rdev) 1944 { 1945 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1946 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1947 WREG32(SCRATCH_UMSK, 0); 1948 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1949 } 1950 1951 int r600_init_microcode(struct radeon_device *rdev) 1952 { 1953 const char *chip_name; 1954 const char *rlc_chip_name; 1955 size_t pfp_req_size, me_req_size, rlc_req_size; 1956 char fw_name[30]; 1957 int err; 1958 1959 DRM_DEBUG("\n"); 1960 1961 #if 0 1962 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); 1963 err = IS_ERR(pdev); 1964 if (err) { 1965 printk(KERN_ERR "radeon_cp: Failed to register firmware\n"); 1966 return -EINVAL; 1967 } 1968 #endif 1969 1970 switch (rdev->family) { 1971 case CHIP_R600: 1972 chip_name = "r600"; 1973 rlc_chip_name = "r600"; 1974 break; 1975 case CHIP_RV610: 1976 chip_name = "rv610"; 1977 rlc_chip_name = "r600"; 1978 break; 1979 case CHIP_RV630: 1980 chip_name = "rv630"; 1981 rlc_chip_name = "r600"; 1982 break; 1983 case CHIP_RV620: 1984 chip_name = "rv620"; 1985 rlc_chip_name = "r600"; 1986 break; 1987 case CHIP_RV635: 1988 chip_name = "rv635"; 1989 rlc_chip_name = "r600"; 1990 break; 1991 case CHIP_RV670: 1992 chip_name = "rv670"; 1993 rlc_chip_name = "r600"; 1994 break; 1995 case CHIP_RS780: 1996 case CHIP_RS880: 1997 chip_name = "rs780"; 1998 rlc_chip_name = "r600"; 1999 break; 2000 case CHIP_RV770: 2001 chip_name = "rv770"; 2002 rlc_chip_name = "r700"; 2003 break; 2004 case CHIP_RV730: 2005 case CHIP_RV740: 2006 chip_name = "rv730"; 2007 rlc_chip_name = "r700"; 2008 break; 2009 case CHIP_RV710: 2010 chip_name = "rv710"; 2011 rlc_chip_name = "r700"; 2012 break; 2013 case CHIP_CEDAR: 2014 chip_name = "cedar"; 2015 rlc_chip_name = "cedar"; 2016 break; 2017 case CHIP_REDWOOD: 2018 chip_name = "redwood"; 2019 rlc_chip_name = "redwood"; 2020 break; 2021 case CHIP_JUNIPER: 2022 chip_name = "juniper"; 2023 rlc_chip_name = "juniper"; 2024 break; 2025 case CHIP_CYPRESS: 2026 case CHIP_HEMLOCK: 2027 chip_name = "cypress"; 2028 rlc_chip_name = "cypress"; 2029 break; 2030 case CHIP_PALM: 2031 chip_name = "palm"; 2032 rlc_chip_name = "sumo"; 2033 break; 2034 case CHIP_SUMO: 2035 chip_name = "sumo"; 2036 rlc_chip_name = "sumo"; 2037 break; 2038 case CHIP_SUMO2: 2039 chip_name = "sumo2"; 2040 rlc_chip_name = "sumo"; 2041 break; 2042 default: BUG(); 2043 } 2044 2045 if (rdev->family >= CHIP_CEDAR) { 2046 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4; 2047 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4; 2048 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4; 2049 } else if (rdev->family >= CHIP_RV770) { 2050 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 2051 me_req_size = R700_PM4_UCODE_SIZE * 4; 2052 rlc_req_size = R700_RLC_UCODE_SIZE * 4; 2053 } else { 2054 pfp_req_size = PFP_UCODE_SIZE * 4; 2055 me_req_size = PM4_UCODE_SIZE * 12; 2056 rlc_req_size = RLC_UCODE_SIZE * 4; 2057 } 2058 2059 DRM_INFO("Loading %s Microcode\n", chip_name); 2060 2061 snprintf(fw_name, sizeof(fw_name), "radeon-%s_pfp", chip_name); 2062 err = loadfirmware(fw_name, &rdev->pfp_fw, &rdev->pfp_fw_size); 2063 if (err) 2064 goto out; 2065 if (rdev->pfp_fw_size != pfp_req_size) { 2066 DRM_ERROR( 2067 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2068 rdev->pfp_fw_size, fw_name); 2069 err = -EINVAL; 2070 goto out; 2071 } 2072 2073 snprintf(fw_name, sizeof(fw_name), "radeon-%s_me", chip_name); 2074 err = loadfirmware(fw_name, &rdev->me_fw, &rdev->me_fw_size); 2075 if (err) 2076 goto out; 2077 if (rdev->me_fw_size != me_req_size) { 2078 DRM_ERROR( 2079 "r600_cp: Bogus length %zu in firmware \"%s\"\n", 2080 rdev->me_fw_size, fw_name); 2081 err = -EINVAL; 2082 } 2083 2084 snprintf(fw_name, sizeof(fw_name), "radeon-%s_rlc", rlc_chip_name); 2085 err = loadfirmware(fw_name, &rdev->rlc_fw, &rdev->rlc_fw_size); 2086 if (err) 2087 goto out; 2088 if (rdev->rlc_fw_size != rlc_req_size) { 2089 DRM_ERROR( 2090 "r600_rlc: Bogus length %zu in firmware \"%s\"\n", 2091 rdev->rlc_fw_size, fw_name); 2092 err = -EINVAL; 2093 } 2094 2095 out: 2096 if (err) { 2097 if (err != -EINVAL) 2098 printk(KERN_ERR 2099 "r600_cp: Failed to load firmware \"%s\"\n", 2100 fw_name); 2101 if (rdev->pfp_fw) { 2102 free(rdev->pfp_fw, M_DEVBUF, 0); 2103 rdev->pfp_fw = NULL; 2104 } 2105 if (rdev->me_fw) { 2106 free(rdev->me_fw, M_DEVBUF, 0); 2107 rdev->me_fw = NULL; 2108 } 2109 if (rdev->rlc_fw) { 2110 free(rdev->rlc_fw, M_DEVBUF, 0); 2111 rdev->rlc_fw = NULL; 2112 } 2113 } 2114 return err; 2115 } 2116 2117 static int r600_cp_load_microcode(struct radeon_device *rdev) 2118 { 2119 const __be32 *fw_data; 2120 int i; 2121 2122 if (!rdev->me_fw || !rdev->pfp_fw) 2123 return -EINVAL; 2124 2125 r600_cp_stop(rdev); 2126 2127 WREG32(CP_RB_CNTL, 2128 #ifdef __BIG_ENDIAN 2129 BUF_SWAP_32BIT | 2130 #endif 2131 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2132 2133 /* Reset cp */ 2134 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2135 RREG32(GRBM_SOFT_RESET); 2136 mdelay(15); 2137 WREG32(GRBM_SOFT_RESET, 0); 2138 2139 WREG32(CP_ME_RAM_WADDR, 0); 2140 2141 fw_data = (const __be32 *)rdev->me_fw; 2142 WREG32(CP_ME_RAM_WADDR, 0); 2143 for (i = 0; i < PM4_UCODE_SIZE * 3; i++) 2144 WREG32(CP_ME_RAM_DATA, 2145 be32_to_cpup(fw_data++)); 2146 2147 fw_data = (const __be32 *)rdev->pfp_fw; 2148 WREG32(CP_PFP_UCODE_ADDR, 0); 2149 for (i = 0; i < PFP_UCODE_SIZE; i++) 2150 WREG32(CP_PFP_UCODE_DATA, 2151 be32_to_cpup(fw_data++)); 2152 2153 WREG32(CP_PFP_UCODE_ADDR, 0); 2154 WREG32(CP_ME_RAM_WADDR, 0); 2155 WREG32(CP_ME_RAM_RADDR, 0); 2156 return 0; 2157 } 2158 2159 int r600_cp_start(struct radeon_device *rdev) 2160 { 2161 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2162 int r; 2163 uint32_t cp_me; 2164 2165 r = radeon_ring_lock(rdev, ring, 7); 2166 if (r) { 2167 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2168 return r; 2169 } 2170 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2171 radeon_ring_write(ring, 0x1); 2172 if (rdev->family >= CHIP_RV770) { 2173 radeon_ring_write(ring, 0x0); 2174 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); 2175 } else { 2176 radeon_ring_write(ring, 0x3); 2177 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); 2178 } 2179 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2180 radeon_ring_write(ring, 0); 2181 radeon_ring_write(ring, 0); 2182 radeon_ring_unlock_commit(rdev, ring); 2183 2184 cp_me = 0xff; 2185 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2186 return 0; 2187 } 2188 2189 int r600_cp_resume(struct radeon_device *rdev) 2190 { 2191 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2192 u32 tmp; 2193 u32 rb_bufsz; 2194 int r; 2195 2196 /* Reset cp */ 2197 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2198 RREG32(GRBM_SOFT_RESET); 2199 mdelay(15); 2200 WREG32(GRBM_SOFT_RESET, 0); 2201 2202 /* Set ring buffer size */ 2203 rb_bufsz = drm_order(ring->ring_size / 8); 2204 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2205 #ifdef __BIG_ENDIAN 2206 tmp |= BUF_SWAP_32BIT; 2207 #endif 2208 WREG32(CP_RB_CNTL, tmp); 2209 WREG32(CP_SEM_WAIT_TIMER, 0x0); 2210 2211 /* Set the write pointer delay */ 2212 WREG32(CP_RB_WPTR_DELAY, 0); 2213 2214 /* Initialize the ring buffer's read and write pointers */ 2215 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2216 WREG32(CP_RB_RPTR_WR, 0); 2217 ring->wptr = 0; 2218 WREG32(CP_RB_WPTR, ring->wptr); 2219 2220 /* set the wb address whether it's enabled or not */ 2221 WREG32(CP_RB_RPTR_ADDR, 2222 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 2223 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2224 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2225 2226 if (rdev->wb.enabled) 2227 WREG32(SCRATCH_UMSK, 0xff); 2228 else { 2229 tmp |= RB_NO_UPDATE; 2230 WREG32(SCRATCH_UMSK, 0); 2231 } 2232 2233 mdelay(1); 2234 WREG32(CP_RB_CNTL, tmp); 2235 2236 WREG32(CP_RB_BASE, ring->gpu_addr >> 8); 2237 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2238 2239 ring->rptr = RREG32(CP_RB_RPTR); 2240 2241 r600_cp_start(rdev); 2242 ring->ready = true; 2243 r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring); 2244 if (r) { 2245 ring->ready = false; 2246 return r; 2247 } 2248 return 0; 2249 } 2250 2251 void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) 2252 { 2253 u32 rb_bufsz; 2254 int r; 2255 2256 /* Align ring size */ 2257 rb_bufsz = drm_order(ring_size / 8); 2258 ring_size = (1 << (rb_bufsz + 1)) * 4; 2259 ring->ring_size = ring_size; 2260 ring->align_mask = 16 - 1; 2261 2262 if (radeon_ring_supports_scratch_reg(rdev, ring)) { 2263 r = radeon_scratch_get(rdev, &ring->rptr_save_reg); 2264 if (r) { 2265 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); 2266 ring->rptr_save_reg = 0; 2267 } 2268 } 2269 } 2270 2271 void r600_cp_fini(struct radeon_device *rdev) 2272 { 2273 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2274 r600_cp_stop(rdev); 2275 radeon_ring_fini(rdev, ring); 2276 radeon_scratch_free(rdev, ring->rptr_save_reg); 2277 } 2278 2279 /* 2280 * DMA 2281 * Starting with R600, the GPU has an asynchronous 2282 * DMA engine. The programming model is very similar 2283 * to the 3D engine (ring buffer, IBs, etc.), but the 2284 * DMA controller has it's own packet format that is 2285 * different form the PM4 format used by the 3D engine. 2286 * It supports copying data, writing embedded data, 2287 * solid fills, and a number of other things. It also 2288 * has support for tiling/detiling of buffers. 2289 */ 2290 /** 2291 * r600_dma_stop - stop the async dma engine 2292 * 2293 * @rdev: radeon_device pointer 2294 * 2295 * Stop the async dma engine (r6xx-evergreen). 2296 */ 2297 void r600_dma_stop(struct radeon_device *rdev) 2298 { 2299 u32 rb_cntl = RREG32(DMA_RB_CNTL); 2300 2301 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 2302 2303 rb_cntl &= ~DMA_RB_ENABLE; 2304 WREG32(DMA_RB_CNTL, rb_cntl); 2305 2306 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 2307 } 2308 2309 /** 2310 * r600_dma_resume - setup and start the async dma engine 2311 * 2312 * @rdev: radeon_device pointer 2313 * 2314 * Set up the DMA ring buffer and enable it. (r6xx-evergreen). 2315 * Returns 0 for success, error for failure. 2316 */ 2317 int r600_dma_resume(struct radeon_device *rdev) 2318 { 2319 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2320 u32 rb_cntl, dma_cntl, ib_cntl; 2321 u32 rb_bufsz; 2322 int r; 2323 2324 /* Reset dma */ 2325 if (rdev->family >= CHIP_RV770) 2326 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); 2327 else 2328 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 2329 RREG32(SRBM_SOFT_RESET); 2330 udelay(50); 2331 WREG32(SRBM_SOFT_RESET, 0); 2332 2333 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0); 2334 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0); 2335 2336 /* Set ring buffer size in dwords */ 2337 rb_bufsz = drm_order(ring->ring_size / 4); 2338 rb_cntl = rb_bufsz << 1; 2339 #ifdef __BIG_ENDIAN 2340 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; 2341 #endif 2342 WREG32(DMA_RB_CNTL, rb_cntl); 2343 2344 /* Initialize the ring buffer's read and write pointers */ 2345 WREG32(DMA_RB_RPTR, 0); 2346 WREG32(DMA_RB_WPTR, 0); 2347 2348 /* set the wb address whether it's enabled or not */ 2349 WREG32(DMA_RB_RPTR_ADDR_HI, 2350 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF); 2351 WREG32(DMA_RB_RPTR_ADDR_LO, 2352 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC)); 2353 2354 if (rdev->wb.enabled) 2355 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; 2356 2357 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8); 2358 2359 /* enable DMA IBs */ 2360 ib_cntl = DMA_IB_ENABLE; 2361 #ifdef __BIG_ENDIAN 2362 ib_cntl |= DMA_IB_SWAP_ENABLE; 2363 #endif 2364 WREG32(DMA_IB_CNTL, ib_cntl); 2365 2366 dma_cntl = RREG32(DMA_CNTL); 2367 dma_cntl &= ~CTXEMPTY_INT_ENABLE; 2368 WREG32(DMA_CNTL, dma_cntl); 2369 2370 if (rdev->family >= CHIP_RV770) 2371 WREG32(DMA_MODE, 1); 2372 2373 ring->wptr = 0; 2374 WREG32(DMA_RB_WPTR, ring->wptr << 2); 2375 2376 ring->rptr = RREG32(DMA_RB_RPTR) >> 2; 2377 2378 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE); 2379 2380 ring->ready = true; 2381 2382 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring); 2383 if (r) { 2384 ring->ready = false; 2385 return r; 2386 } 2387 2388 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 2389 2390 return 0; 2391 } 2392 2393 /** 2394 * r600_dma_fini - tear down the async dma engine 2395 * 2396 * @rdev: radeon_device pointer 2397 * 2398 * Stop the async dma engine and free the ring (r6xx-evergreen). 2399 */ 2400 void r600_dma_fini(struct radeon_device *rdev) 2401 { 2402 r600_dma_stop(rdev); 2403 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); 2404 } 2405 2406 /* 2407 * GPU scratch registers helpers function. 2408 */ 2409 void r600_scratch_init(struct radeon_device *rdev) 2410 { 2411 int i; 2412 2413 rdev->scratch.num_reg = 7; 2414 rdev->scratch.reg_base = SCRATCH_REG0; 2415 for (i = 0; i < rdev->scratch.num_reg; i++) { 2416 rdev->scratch.free[i] = true; 2417 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); 2418 } 2419 } 2420 2421 int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) 2422 { 2423 uint32_t scratch; 2424 uint32_t tmp = 0; 2425 unsigned i; 2426 int r; 2427 2428 r = radeon_scratch_get(rdev, &scratch); 2429 if (r) { 2430 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); 2431 return r; 2432 } 2433 WREG32(scratch, 0xCAFEDEAD); 2434 r = radeon_ring_lock(rdev, ring, 3); 2435 if (r) { 2436 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r); 2437 radeon_scratch_free(rdev, scratch); 2438 return r; 2439 } 2440 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2441 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2442 radeon_ring_write(ring, 0xDEADBEEF); 2443 radeon_ring_unlock_commit(rdev, ring); 2444 for (i = 0; i < rdev->usec_timeout; i++) { 2445 tmp = RREG32(scratch); 2446 if (tmp == 0xDEADBEEF) 2447 break; 2448 DRM_UDELAY(1); 2449 } 2450 if (i < rdev->usec_timeout) { 2451 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2452 } else { 2453 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 2454 ring->idx, scratch, tmp); 2455 r = -EINVAL; 2456 } 2457 radeon_scratch_free(rdev, scratch); 2458 return r; 2459 } 2460 2461 /** 2462 * r600_dma_ring_test - simple async dma engine test 2463 * 2464 * @rdev: radeon_device pointer 2465 * @ring: radeon_ring structure holding ring information 2466 * 2467 * Test the DMA engine by writing using it to write an 2468 * value to memory. (r6xx-SI). 2469 * Returns 0 for success, error for failure. 2470 */ 2471 int r600_dma_ring_test(struct radeon_device *rdev, 2472 struct radeon_ring *ring) 2473 { 2474 unsigned i; 2475 int r; 2476 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 2477 u32 tmp; 2478 2479 if (!ptr) { 2480 DRM_ERROR("invalid vram scratch pointer\n"); 2481 return -EINVAL; 2482 } 2483 2484 tmp = 0xCAFEDEAD; 2485 *ptr = tmp; 2486 2487 r = radeon_ring_lock(rdev, ring, 4); 2488 if (r) { 2489 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r); 2490 return r; 2491 } 2492 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 2493 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 2494 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 2495 radeon_ring_write(ring, 0xDEADBEEF); 2496 radeon_ring_unlock_commit(rdev, ring); 2497 2498 for (i = 0; i < rdev->usec_timeout; i++) { 2499 tmp = *ptr; 2500 if (tmp == 0xDEADBEEF) 2501 break; 2502 DRM_UDELAY(1); 2503 } 2504 2505 if (i < rdev->usec_timeout) { 2506 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 2507 } else { 2508 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n", 2509 ring->idx, tmp); 2510 r = -EINVAL; 2511 } 2512 return r; 2513 } 2514 2515 /* 2516 * CP fences/semaphores 2517 */ 2518 2519 void r600_fence_ring_emit(struct radeon_device *rdev, 2520 struct radeon_fence *fence) 2521 { 2522 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2523 u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA | 2524 PACKET3_SH_ACTION_ENA; 2525 2526 if (rdev->family >= CHIP_RV770) 2527 cp_coher_cntl |= PACKET3_FULL_CACHE_ENA; 2528 2529 if (rdev->wb.use_event) { 2530 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2531 /* flush read cache over gart */ 2532 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2533 radeon_ring_write(ring, cp_coher_cntl); 2534 radeon_ring_write(ring, 0xFFFFFFFF); 2535 radeon_ring_write(ring, 0); 2536 radeon_ring_write(ring, 10); /* poll interval */ 2537 /* EVENT_WRITE_EOP - flush caches, send int */ 2538 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2539 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2540 radeon_ring_write(ring, addr & 0xffffffff); 2541 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2542 radeon_ring_write(ring, fence->seq); 2543 radeon_ring_write(ring, 0); 2544 } else { 2545 /* flush read cache over gart */ 2546 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2547 radeon_ring_write(ring, cp_coher_cntl); 2548 radeon_ring_write(ring, 0xFFFFFFFF); 2549 radeon_ring_write(ring, 0); 2550 radeon_ring_write(ring, 10); /* poll interval */ 2551 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); 2552 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2553 /* wait for 3D idle clean */ 2554 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2555 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2556 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2557 /* Emit fence sequence & fire IRQ */ 2558 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2559 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2560 radeon_ring_write(ring, fence->seq); 2561 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2562 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); 2563 radeon_ring_write(ring, RB_INT_STAT); 2564 } 2565 } 2566 2567 void r600_semaphore_ring_emit(struct radeon_device *rdev, 2568 struct radeon_ring *ring, 2569 struct radeon_semaphore *semaphore, 2570 bool emit_wait) 2571 { 2572 uint64_t addr = semaphore->gpu_addr; 2573 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2574 2575 if (rdev->family < CHIP_CAYMAN) 2576 sel |= PACKET3_SEM_WAIT_ON_SIGNAL; 2577 2578 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2579 radeon_ring_write(ring, addr & 0xffffffff); 2580 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2581 } 2582 2583 /* 2584 * DMA fences/semaphores 2585 */ 2586 2587 /** 2588 * r600_dma_fence_ring_emit - emit a fence on the DMA ring 2589 * 2590 * @rdev: radeon_device pointer 2591 * @fence: radeon fence object 2592 * 2593 * Add a DMA fence packet to the ring to write 2594 * the fence seq number and DMA trap packet to generate 2595 * an interrupt if needed (r6xx-r7xx). 2596 */ 2597 void r600_dma_fence_ring_emit(struct radeon_device *rdev, 2598 struct radeon_fence *fence) 2599 { 2600 struct radeon_ring *ring = &rdev->ring[fence->ring]; 2601 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 2602 2603 /* write the fence */ 2604 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 2605 radeon_ring_write(ring, addr & 0xfffffffc); 2606 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 2607 radeon_ring_write(ring, lower_32_bits(fence->seq)); 2608 /* generate an interrupt */ 2609 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 2610 } 2611 2612 /** 2613 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring 2614 * 2615 * @rdev: radeon_device pointer 2616 * @ring: radeon_ring structure holding ring information 2617 * @semaphore: radeon semaphore object 2618 * @emit_wait: wait or signal semaphore 2619 * 2620 * Add a DMA semaphore packet to the ring wait on or signal 2621 * other rings (r6xx-SI). 2622 */ 2623 void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, 2624 struct radeon_ring *ring, 2625 struct radeon_semaphore *semaphore, 2626 bool emit_wait) 2627 { 2628 u64 addr = semaphore->gpu_addr; 2629 u32 s = emit_wait ? 0 : 1; 2630 2631 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); 2632 radeon_ring_write(ring, addr & 0xfffffffc); 2633 radeon_ring_write(ring, upper_32_bits(addr) & 0xff); 2634 } 2635 2636 int r600_copy_blit(struct radeon_device *rdev, 2637 uint64_t src_offset, 2638 uint64_t dst_offset, 2639 unsigned num_gpu_pages, 2640 struct radeon_fence **fence) 2641 { 2642 struct radeon_semaphore *sem = NULL; 2643 struct radeon_sa_bo *vb = NULL; 2644 int r; 2645 2646 r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem); 2647 if (r) { 2648 return r; 2649 } 2650 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb); 2651 r600_blit_done_copy(rdev, fence, vb, sem); 2652 return 0; 2653 } 2654 2655 /** 2656 * r600_copy_dma - copy pages using the DMA engine 2657 * 2658 * @rdev: radeon_device pointer 2659 * @src_offset: src GPU address 2660 * @dst_offset: dst GPU address 2661 * @num_gpu_pages: number of GPU pages to xfer 2662 * @fence: radeon fence object 2663 * 2664 * Copy GPU paging using the DMA engine (r6xx). 2665 * Used by the radeon ttm implementation to move pages if 2666 * registered as the asic copy callback. 2667 */ 2668 int r600_copy_dma(struct radeon_device *rdev, 2669 uint64_t src_offset, uint64_t dst_offset, 2670 unsigned num_gpu_pages, 2671 struct radeon_fence **fence) 2672 { 2673 struct radeon_semaphore *sem = NULL; 2674 int ring_index = rdev->asic->copy.dma_ring_index; 2675 struct radeon_ring *ring = &rdev->ring[ring_index]; 2676 u32 size_in_dw, cur_size_in_dw; 2677 int i, num_loops; 2678 int r = 0; 2679 2680 r = radeon_semaphore_create(rdev, &sem); 2681 if (r) { 2682 DRM_ERROR("radeon: moving bo (%d).\n", r); 2683 return r; 2684 } 2685 2686 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 2687 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); 2688 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); 2689 if (r) { 2690 DRM_ERROR("radeon: moving bo (%d).\n", r); 2691 radeon_semaphore_free(rdev, &sem, NULL); 2692 return r; 2693 } 2694 2695 if (radeon_fence_need_sync(*fence, ring->idx)) { 2696 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, 2697 ring->idx); 2698 radeon_fence_note_sync(*fence, ring->idx); 2699 } else { 2700 radeon_semaphore_free(rdev, &sem, NULL); 2701 } 2702 2703 for (i = 0; i < num_loops; i++) { 2704 cur_size_in_dw = size_in_dw; 2705 if (cur_size_in_dw > 0xFFFE) 2706 cur_size_in_dw = 0xFFFE; 2707 size_in_dw -= cur_size_in_dw; 2708 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 2709 radeon_ring_write(ring, dst_offset & 0xfffffffc); 2710 radeon_ring_write(ring, src_offset & 0xfffffffc); 2711 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) | 2712 (upper_32_bits(src_offset) & 0xff))); 2713 src_offset += cur_size_in_dw * 4; 2714 dst_offset += cur_size_in_dw * 4; 2715 } 2716 2717 r = radeon_fence_emit(rdev, fence, ring->idx); 2718 if (r) { 2719 radeon_ring_unlock_undo(rdev, ring); 2720 return r; 2721 } 2722 2723 radeon_ring_unlock_commit(rdev, ring); 2724 radeon_semaphore_free(rdev, &sem, *fence); 2725 2726 return r; 2727 } 2728 2729 int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2730 uint32_t tiling_flags, uint32_t pitch, 2731 uint32_t offset, uint32_t obj_size) 2732 { 2733 /* FIXME: implement */ 2734 return 0; 2735 } 2736 2737 void r600_clear_surface_reg(struct radeon_device *rdev, int reg) 2738 { 2739 /* FIXME: implement */ 2740 } 2741 2742 static int r600_startup(struct radeon_device *rdev) 2743 { 2744 struct radeon_ring *ring; 2745 int r; 2746 2747 /* enable pcie gen2 link */ 2748 r600_pcie_gen2_enable(rdev); 2749 2750 r600_mc_program(rdev); 2751 2752 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { 2753 r = r600_init_microcode(rdev); 2754 if (r) { 2755 DRM_ERROR("Failed to load firmware!\n"); 2756 return r; 2757 } 2758 } 2759 2760 r = r600_vram_scratch_init(rdev); 2761 if (r) 2762 return r; 2763 2764 if (rdev->flags & RADEON_IS_AGP) { 2765 r600_agp_enable(rdev); 2766 } else { 2767 r = r600_pcie_gart_enable(rdev); 2768 if (r) 2769 return r; 2770 } 2771 r600_gpu_init(rdev); 2772 r = r600_blit_init(rdev); 2773 if (r) { 2774 r600_blit_fini(rdev); 2775 rdev->asic->copy.copy = NULL; 2776 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 2777 } 2778 2779 /* allocate wb buffer */ 2780 r = radeon_wb_init(rdev); 2781 if (r) 2782 return r; 2783 2784 r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); 2785 if (r) { 2786 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); 2787 return r; 2788 } 2789 2790 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); 2791 if (r) { 2792 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); 2793 return r; 2794 } 2795 2796 /* Enable IRQ */ 2797 if (!rdev->irq.installed) { 2798 r = radeon_irq_kms_init(rdev); 2799 if (r) 2800 return r; 2801 } 2802 2803 r = r600_irq_init(rdev); 2804 if (r) { 2805 DRM_ERROR("radeon: IH init failed (%d).\n", r); 2806 radeon_irq_kms_fini(rdev); 2807 return r; 2808 } 2809 r600_irq_set(rdev); 2810 2811 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2812 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2813 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 2814 0, 0xfffff, RADEON_CP_PACKET2); 2815 if (r) 2816 return r; 2817 2818 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 2819 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 2820 DMA_RB_RPTR, DMA_RB_WPTR, 2821 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 2822 if (r) 2823 return r; 2824 2825 r = r600_cp_load_microcode(rdev); 2826 if (r) 2827 return r; 2828 r = r600_cp_resume(rdev); 2829 if (r) 2830 return r; 2831 2832 r = r600_dma_resume(rdev); 2833 if (r) 2834 return r; 2835 2836 r = radeon_ib_pool_init(rdev); 2837 if (r) { 2838 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2839 return r; 2840 } 2841 2842 r = r600_audio_init(rdev); 2843 if (r) { 2844 DRM_ERROR("radeon: audio init failed\n"); 2845 return r; 2846 } 2847 2848 return 0; 2849 } 2850 2851 void r600_vga_set_state(struct radeon_device *rdev, bool state) 2852 { 2853 uint32_t temp; 2854 2855 temp = RREG32(CONFIG_CNTL); 2856 if (state == false) { 2857 temp &= ~(1<<0); 2858 temp |= (1<<1); 2859 } else { 2860 temp &= ~(1<<1); 2861 } 2862 WREG32(CONFIG_CNTL, temp); 2863 } 2864 2865 int r600_resume(struct radeon_device *rdev) 2866 { 2867 int r; 2868 2869 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, 2870 * posting will perform necessary task to bring back GPU into good 2871 * shape. 2872 */ 2873 /* post card */ 2874 atom_asic_init(rdev->mode_info.atom_context); 2875 2876 rdev->accel_working = true; 2877 r = r600_startup(rdev); 2878 if (r) { 2879 DRM_ERROR("r600 startup failed on resume\n"); 2880 rdev->accel_working = false; 2881 return r; 2882 } 2883 2884 return r; 2885 } 2886 2887 int r600_suspend(struct radeon_device *rdev) 2888 { 2889 r600_audio_fini(rdev); 2890 r600_cp_stop(rdev); 2891 r600_dma_stop(rdev); 2892 r600_irq_suspend(rdev); 2893 radeon_wb_disable(rdev); 2894 r600_pcie_gart_disable(rdev); 2895 2896 return 0; 2897 } 2898 2899 /* Plan is to move initialization in that function and use 2900 * helper function so that radeon_device_init pretty much 2901 * do nothing more than calling asic specific function. This 2902 * should also allow to remove a bunch of callback function 2903 * like vram_info. 2904 */ 2905 int r600_init(struct radeon_device *rdev) 2906 { 2907 int r; 2908 2909 if (r600_debugfs_mc_info_init(rdev)) { 2910 DRM_ERROR("Failed to register debugfs file for mc !\n"); 2911 } 2912 /* Read BIOS */ 2913 if (!radeon_get_bios(rdev)) { 2914 if (ASIC_IS_AVIVO(rdev)) 2915 return -EINVAL; 2916 } 2917 /* Must be an ATOMBIOS */ 2918 if (!rdev->is_atom_bios) { 2919 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); 2920 return -EINVAL; 2921 } 2922 r = radeon_atombios_init(rdev); 2923 if (r) 2924 return r; 2925 /* Post card if necessary */ 2926 if (!radeon_card_posted(rdev)) { 2927 if (!rdev->bios) { 2928 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); 2929 return -EINVAL; 2930 } 2931 DRM_INFO("GPU not posted. posting now...\n"); 2932 atom_asic_init(rdev->mode_info.atom_context); 2933 } 2934 /* Initialize scratch registers */ 2935 r600_scratch_init(rdev); 2936 /* Initialize surface registers */ 2937 radeon_surface_init(rdev); 2938 /* Initialize clocks */ 2939 radeon_get_clock_info(rdev->ddev); 2940 /* Fence driver */ 2941 r = radeon_fence_driver_init(rdev); 2942 if (r) 2943 return r; 2944 if (rdev->flags & RADEON_IS_AGP) { 2945 r = radeon_agp_init(rdev); 2946 if (r) 2947 radeon_agp_disable(rdev); 2948 } 2949 r = r600_mc_init(rdev); 2950 if (r) 2951 return r; 2952 /* Memory manager */ 2953 r = radeon_bo_init(rdev); 2954 if (r) 2955 return r; 2956 2957 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2958 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2959 2960 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; 2961 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); 2962 2963 rdev->ih.ring_obj = NULL; 2964 r600_ih_ring_init(rdev, 64 * 1024); 2965 2966 r = r600_pcie_gart_init(rdev); 2967 if (r) 2968 return r; 2969 2970 rdev->accel_working = true; 2971 r = r600_startup(rdev); 2972 if (r) { 2973 dev_err(rdev->dev, "disabling GPU acceleration\n"); 2974 r600_cp_fini(rdev); 2975 r600_dma_fini(rdev); 2976 r600_irq_fini(rdev); 2977 radeon_wb_fini(rdev); 2978 radeon_ib_pool_fini(rdev); 2979 radeon_irq_kms_fini(rdev); 2980 r600_pcie_gart_fini(rdev); 2981 rdev->accel_working = false; 2982 } 2983 2984 return 0; 2985 } 2986 2987 void r600_fini(struct radeon_device *rdev) 2988 { 2989 r600_audio_fini(rdev); 2990 r600_blit_fini(rdev); 2991 r600_cp_fini(rdev); 2992 r600_dma_fini(rdev); 2993 r600_irq_fini(rdev); 2994 radeon_wb_fini(rdev); 2995 radeon_ib_pool_fini(rdev); 2996 radeon_irq_kms_fini(rdev); 2997 r600_pcie_gart_fini(rdev); 2998 r600_vram_scratch_fini(rdev); 2999 radeon_agp_fini(rdev); 3000 radeon_gem_fini(rdev); 3001 radeon_fence_driver_fini(rdev); 3002 radeon_bo_fini(rdev); 3003 radeon_atombios_fini(rdev); 3004 kfree(rdev->bios); 3005 rdev->bios = NULL; 3006 } 3007 3008 3009 /* 3010 * CS stuff 3011 */ 3012 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3013 { 3014 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3015 u32 next_rptr; 3016 3017 if (ring->rptr_save_reg) { 3018 next_rptr = ring->wptr + 3 + 4; 3019 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3020 radeon_ring_write(ring, ((ring->rptr_save_reg - 3021 PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 3022 radeon_ring_write(ring, next_rptr); 3023 } else if (rdev->wb.enabled) { 3024 next_rptr = ring->wptr + 5 + 4; 3025 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3)); 3026 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3027 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18)); 3028 radeon_ring_write(ring, next_rptr); 3029 radeon_ring_write(ring, 0); 3030 } 3031 3032 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 3033 radeon_ring_write(ring, 3034 #ifdef __BIG_ENDIAN 3035 (2 << 0) | 3036 #endif 3037 (ib->gpu_addr & 0xFFFFFFFC)); 3038 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 3039 radeon_ring_write(ring, ib->length_dw); 3040 } 3041 3042 int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3043 { 3044 struct radeon_ib ib; 3045 uint32_t scratch; 3046 uint32_t tmp = 0; 3047 unsigned i; 3048 int r; 3049 3050 r = radeon_scratch_get(rdev, &scratch); 3051 if (r) { 3052 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); 3053 return r; 3054 } 3055 WREG32(scratch, 0xCAFEDEAD); 3056 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3057 if (r) { 3058 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3059 goto free_scratch; 3060 } 3061 ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1); 3062 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3063 ib.ptr[2] = 0xDEADBEEF; 3064 ib.length_dw = 3; 3065 r = radeon_ib_schedule(rdev, &ib, NULL); 3066 if (r) { 3067 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3068 goto free_ib; 3069 } 3070 r = radeon_fence_wait(ib.fence, false); 3071 if (r) { 3072 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3073 goto free_ib; 3074 } 3075 for (i = 0; i < rdev->usec_timeout; i++) { 3076 tmp = RREG32(scratch); 3077 if (tmp == 0xDEADBEEF) 3078 break; 3079 DRM_UDELAY(1); 3080 } 3081 if (i < rdev->usec_timeout) { 3082 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3083 } else { 3084 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", 3085 scratch, tmp); 3086 r = -EINVAL; 3087 } 3088 free_ib: 3089 radeon_ib_free(rdev, &ib); 3090 free_scratch: 3091 radeon_scratch_free(rdev, scratch); 3092 return r; 3093 } 3094 3095 /** 3096 * r600_dma_ib_test - test an IB on the DMA engine 3097 * 3098 * @rdev: radeon_device pointer 3099 * @ring: radeon_ring structure holding ring information 3100 * 3101 * Test a simple IB in the DMA ring (r6xx-SI). 3102 * Returns 0 on success, error on failure. 3103 */ 3104 int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) 3105 { 3106 struct radeon_ib ib; 3107 unsigned i; 3108 int r; 3109 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 3110 u32 tmp = 0; 3111 3112 if (!ptr) { 3113 DRM_ERROR("invalid vram scratch pointer\n"); 3114 return -EINVAL; 3115 } 3116 3117 tmp = 0xCAFEDEAD; 3118 *ptr = tmp; 3119 3120 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 3121 if (r) { 3122 DRM_ERROR("radeon: failed to get ib (%d).\n", r); 3123 return r; 3124 } 3125 3126 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 3127 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 3128 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 3129 ib.ptr[3] = 0xDEADBEEF; 3130 ib.length_dw = 4; 3131 3132 r = radeon_ib_schedule(rdev, &ib, NULL); 3133 if (r) { 3134 radeon_ib_free(rdev, &ib); 3135 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3136 return r; 3137 } 3138 r = radeon_fence_wait(ib.fence, false); 3139 if (r) { 3140 DRM_ERROR("radeon: fence wait failed (%d).\n", r); 3141 return r; 3142 } 3143 for (i = 0; i < rdev->usec_timeout; i++) { 3144 tmp = *ptr; 3145 if (tmp == 0xDEADBEEF) 3146 break; 3147 DRM_UDELAY(1); 3148 } 3149 if (i < rdev->usec_timeout) { 3150 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i); 3151 } else { 3152 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp); 3153 r = -EINVAL; 3154 } 3155 radeon_ib_free(rdev, &ib); 3156 return r; 3157 } 3158 3159 /** 3160 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine 3161 * 3162 * @rdev: radeon_device pointer 3163 * @ib: IB object to schedule 3164 * 3165 * Schedule an IB in the DMA ring (r6xx-r7xx). 3166 */ 3167 void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3168 { 3169 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3170 3171 if (rdev->wb.enabled) { 3172 u32 next_rptr = ring->wptr + 4; 3173 while ((next_rptr & 7) != 5) 3174 next_rptr++; 3175 next_rptr += 3; 3176 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 3177 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3178 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 3179 radeon_ring_write(ring, next_rptr); 3180 } 3181 3182 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. 3183 * Pad as necessary with NOPs. 3184 */ 3185 while ((ring->wptr & 7) != 5) 3186 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3187 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 3188 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 3189 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 3190 3191 } 3192 3193 /* 3194 * Interrupts 3195 * 3196 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty 3197 * the same as the CP ring buffer, but in reverse. Rather than the CPU 3198 * writing to the ring and the GPU consuming, the GPU writes to the ring 3199 * and host consumes. As the host irq handler processes interrupts, it 3200 * increments the rptr. When the rptr catches up with the wptr, all the 3201 * current interrupts have been processed. 3202 */ 3203 3204 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) 3205 { 3206 u32 rb_bufsz; 3207 3208 /* Align ring size */ 3209 rb_bufsz = drm_order(ring_size / 4); 3210 ring_size = (1 << rb_bufsz) * 4; 3211 rdev->ih.ring_size = ring_size; 3212 rdev->ih.ptr_mask = rdev->ih.ring_size - 1; 3213 rdev->ih.rptr = 0; 3214 } 3215 3216 int r600_ih_ring_alloc(struct radeon_device *rdev) 3217 { 3218 int r; 3219 3220 /* Allocate ring buffer */ 3221 if (rdev->ih.ring_obj == NULL) { 3222 r = radeon_bo_create(rdev, rdev->ih.ring_size, 3223 PAGE_SIZE, true, 3224 RADEON_GEM_DOMAIN_GTT, 3225 NULL, &rdev->ih.ring_obj); 3226 if (r) { 3227 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); 3228 return r; 3229 } 3230 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3231 if (unlikely(r != 0)) 3232 return r; 3233 r = radeon_bo_pin(rdev->ih.ring_obj, 3234 RADEON_GEM_DOMAIN_GTT, 3235 &rdev->ih.gpu_addr); 3236 if (r) { 3237 radeon_bo_unreserve(rdev->ih.ring_obj); 3238 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); 3239 return r; 3240 } 3241 r = radeon_bo_kmap(rdev->ih.ring_obj, 3242 (void **)&rdev->ih.ring); 3243 radeon_bo_unreserve(rdev->ih.ring_obj); 3244 if (r) { 3245 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); 3246 return r; 3247 } 3248 } 3249 return 0; 3250 } 3251 3252 void r600_ih_ring_fini(struct radeon_device *rdev) 3253 { 3254 int r; 3255 if (rdev->ih.ring_obj) { 3256 r = radeon_bo_reserve(rdev->ih.ring_obj, false); 3257 if (likely(r == 0)) { 3258 radeon_bo_kunmap(rdev->ih.ring_obj); 3259 radeon_bo_unpin(rdev->ih.ring_obj); 3260 radeon_bo_unreserve(rdev->ih.ring_obj); 3261 } 3262 radeon_bo_unref(&rdev->ih.ring_obj); 3263 rdev->ih.ring = NULL; 3264 rdev->ih.ring_obj = NULL; 3265 } 3266 } 3267 3268 void r600_rlc_stop(struct radeon_device *rdev) 3269 { 3270 3271 if ((rdev->family >= CHIP_RV770) && 3272 (rdev->family <= CHIP_RV740)) { 3273 /* r7xx asics need to soft reset RLC before halting */ 3274 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 3275 RREG32(SRBM_SOFT_RESET); 3276 mdelay(15); 3277 WREG32(SRBM_SOFT_RESET, 0); 3278 RREG32(SRBM_SOFT_RESET); 3279 } 3280 3281 WREG32(RLC_CNTL, 0); 3282 } 3283 3284 static void r600_rlc_start(struct radeon_device *rdev) 3285 { 3286 WREG32(RLC_CNTL, RLC_ENABLE); 3287 } 3288 3289 static int r600_rlc_init(struct radeon_device *rdev) 3290 { 3291 u32 i; 3292 const __be32 *fw_data; 3293 3294 if (!rdev->rlc_fw) 3295 return -EINVAL; 3296 3297 r600_rlc_stop(rdev); 3298 3299 WREG32(RLC_HB_CNTL, 0); 3300 3301 if (rdev->family == CHIP_ARUBA) { 3302 WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8); 3303 WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8); 3304 } 3305 if (rdev->family <= CHIP_CAYMAN) { 3306 WREG32(RLC_HB_BASE, 0); 3307 WREG32(RLC_HB_RPTR, 0); 3308 WREG32(RLC_HB_WPTR, 0); 3309 } 3310 if (rdev->family <= CHIP_CAICOS) { 3311 WREG32(RLC_HB_WPTR_LSB_ADDR, 0); 3312 WREG32(RLC_HB_WPTR_MSB_ADDR, 0); 3313 } 3314 WREG32(RLC_MC_CNTL, 0); 3315 WREG32(RLC_UCODE_CNTL, 0); 3316 3317 fw_data = (const __be32 *)rdev->rlc_fw; 3318 if (rdev->family >= CHIP_ARUBA) { 3319 for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) { 3320 WREG32(RLC_UCODE_ADDR, i); 3321 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3322 } 3323 } else if (rdev->family >= CHIP_CAYMAN) { 3324 for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) { 3325 WREG32(RLC_UCODE_ADDR, i); 3326 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3327 } 3328 } else if (rdev->family >= CHIP_CEDAR) { 3329 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) { 3330 WREG32(RLC_UCODE_ADDR, i); 3331 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3332 } 3333 } else if (rdev->family >= CHIP_RV770) { 3334 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { 3335 WREG32(RLC_UCODE_ADDR, i); 3336 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3337 } 3338 } else { 3339 for (i = 0; i < RLC_UCODE_SIZE; i++) { 3340 WREG32(RLC_UCODE_ADDR, i); 3341 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); 3342 } 3343 } 3344 WREG32(RLC_UCODE_ADDR, 0); 3345 3346 r600_rlc_start(rdev); 3347 3348 return 0; 3349 } 3350 3351 static void r600_enable_interrupts(struct radeon_device *rdev) 3352 { 3353 u32 ih_cntl = RREG32(IH_CNTL); 3354 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3355 3356 ih_cntl |= ENABLE_INTR; 3357 ih_rb_cntl |= IH_RB_ENABLE; 3358 WREG32(IH_CNTL, ih_cntl); 3359 WREG32(IH_RB_CNTL, ih_rb_cntl); 3360 rdev->ih.enabled = true; 3361 } 3362 3363 void r600_disable_interrupts(struct radeon_device *rdev) 3364 { 3365 u32 ih_rb_cntl = RREG32(IH_RB_CNTL); 3366 u32 ih_cntl = RREG32(IH_CNTL); 3367 3368 ih_rb_cntl &= ~IH_RB_ENABLE; 3369 ih_cntl &= ~ENABLE_INTR; 3370 WREG32(IH_RB_CNTL, ih_rb_cntl); 3371 WREG32(IH_CNTL, ih_cntl); 3372 /* set rptr, wptr to 0 */ 3373 WREG32(IH_RB_RPTR, 0); 3374 WREG32(IH_RB_WPTR, 0); 3375 rdev->ih.enabled = false; 3376 rdev->ih.rptr = 0; 3377 } 3378 3379 static void r600_disable_interrupt_state(struct radeon_device *rdev) 3380 { 3381 u32 tmp; 3382 3383 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3384 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3385 WREG32(DMA_CNTL, tmp); 3386 WREG32(GRBM_INT_CNTL, 0); 3387 WREG32(DxMODE_INT_MASK, 0); 3388 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3389 WREG32(D2GRPH_INTERRUPT_CONTROL, 0); 3390 if (ASIC_IS_DCE3(rdev)) { 3391 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); 3392 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); 3393 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3394 WREG32(DC_HPD1_INT_CONTROL, tmp); 3395 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3396 WREG32(DC_HPD2_INT_CONTROL, tmp); 3397 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3398 WREG32(DC_HPD3_INT_CONTROL, tmp); 3399 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3400 WREG32(DC_HPD4_INT_CONTROL, tmp); 3401 if (ASIC_IS_DCE32(rdev)) { 3402 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3403 WREG32(DC_HPD5_INT_CONTROL, tmp); 3404 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 3405 WREG32(DC_HPD6_INT_CONTROL, tmp); 3406 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3407 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3408 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3409 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3410 } else { 3411 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3412 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3413 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3414 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3415 } 3416 } else { 3417 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 3418 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 3419 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3420 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3421 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3422 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3423 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 3424 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3425 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3426 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3427 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3428 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3429 } 3430 } 3431 3432 int r600_irq_init(struct radeon_device *rdev) 3433 { 3434 int ret = 0; 3435 int rb_bufsz; 3436 u32 interrupt_cntl, ih_cntl, ih_rb_cntl; 3437 3438 /* allocate ring */ 3439 ret = r600_ih_ring_alloc(rdev); 3440 if (ret) 3441 return ret; 3442 3443 /* disable irqs */ 3444 r600_disable_interrupts(rdev); 3445 3446 /* init rlc */ 3447 ret = r600_rlc_init(rdev); 3448 if (ret) { 3449 r600_ih_ring_fini(rdev); 3450 return ret; 3451 } 3452 3453 /* setup interrupt control */ 3454 /* set dummy read address to ring address */ 3455 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); 3456 interrupt_cntl = RREG32(INTERRUPT_CNTL); 3457 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi 3458 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN 3459 */ 3460 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; 3461 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ 3462 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; 3463 WREG32(INTERRUPT_CNTL, interrupt_cntl); 3464 3465 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); 3466 rb_bufsz = drm_order(rdev->ih.ring_size / 4); 3467 3468 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | 3469 IH_WPTR_OVERFLOW_CLEAR | 3470 (rb_bufsz << 1)); 3471 3472 if (rdev->wb.enabled) 3473 ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; 3474 3475 /* set the writeback address whether it's enabled or not */ 3476 WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); 3477 WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); 3478 3479 WREG32(IH_RB_CNTL, ih_rb_cntl); 3480 3481 /* set rptr, wptr to 0 */ 3482 WREG32(IH_RB_RPTR, 0); 3483 WREG32(IH_RB_WPTR, 0); 3484 3485 /* Default settings for IH_CNTL (disabled at first) */ 3486 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); 3487 /* RPTR_REARM only works if msi's are enabled */ 3488 if (rdev->msi_enabled) 3489 ih_cntl |= RPTR_REARM; 3490 WREG32(IH_CNTL, ih_cntl); 3491 3492 /* force the active interrupt state to all disabled */ 3493 if (rdev->family >= CHIP_CEDAR) 3494 evergreen_disable_interrupt_state(rdev); 3495 else 3496 r600_disable_interrupt_state(rdev); 3497 3498 /* at this point everything should be setup correctly to enable master */ 3499 #ifdef notyet 3500 pci_set_master(rdev->pdev); 3501 #endif 3502 3503 /* enable irqs */ 3504 r600_enable_interrupts(rdev); 3505 3506 return ret; 3507 } 3508 3509 void r600_irq_suspend(struct radeon_device *rdev) 3510 { 3511 r600_irq_disable(rdev); 3512 r600_rlc_stop(rdev); 3513 } 3514 3515 void r600_irq_fini(struct radeon_device *rdev) 3516 { 3517 r600_irq_suspend(rdev); 3518 r600_ih_ring_fini(rdev); 3519 } 3520 3521 int r600_irq_set(struct radeon_device *rdev) 3522 { 3523 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 3524 u32 mode_int = 0; 3525 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; 3526 u32 grbm_int_cntl = 0; 3527 u32 hdmi0, hdmi1; 3528 u32 d1grph = 0, d2grph = 0; 3529 u32 dma_cntl; 3530 3531 if (!rdev->irq.installed) { 3532 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3533 return -EINVAL; 3534 } 3535 /* don't enable anything if the ih is disabled */ 3536 if (!rdev->ih.enabled) { 3537 r600_disable_interrupts(rdev); 3538 /* force the active interrupt state to all disabled */ 3539 r600_disable_interrupt_state(rdev); 3540 return 0; 3541 } 3542 3543 if (ASIC_IS_DCE3(rdev)) { 3544 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3545 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3546 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3547 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 3548 if (ASIC_IS_DCE32(rdev)) { 3549 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3550 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3551 hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3552 hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 3553 } else { 3554 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3555 hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3556 } 3557 } else { 3558 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; 3559 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; 3560 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; 3561 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3562 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3563 } 3564 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; 3565 3566 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3567 DRM_DEBUG("r600_irq_set: sw int\n"); 3568 cp_int_cntl |= RB_INT_ENABLE; 3569 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3570 } 3571 3572 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { 3573 DRM_DEBUG("r600_irq_set: sw int dma\n"); 3574 dma_cntl |= TRAP_ENABLE; 3575 } 3576 3577 if (rdev->irq.crtc_vblank_int[0] || 3578 atomic_read(&rdev->irq.pflip[0])) { 3579 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3580 mode_int |= D1MODE_VBLANK_INT_MASK; 3581 } 3582 if (rdev->irq.crtc_vblank_int[1] || 3583 atomic_read(&rdev->irq.pflip[1])) { 3584 DRM_DEBUG("r600_irq_set: vblank 1\n"); 3585 mode_int |= D2MODE_VBLANK_INT_MASK; 3586 } 3587 if (rdev->irq.hpd[0]) { 3588 DRM_DEBUG("r600_irq_set: hpd 1\n"); 3589 hpd1 |= DC_HPDx_INT_EN; 3590 } 3591 if (rdev->irq.hpd[1]) { 3592 DRM_DEBUG("r600_irq_set: hpd 2\n"); 3593 hpd2 |= DC_HPDx_INT_EN; 3594 } 3595 if (rdev->irq.hpd[2]) { 3596 DRM_DEBUG("r600_irq_set: hpd 3\n"); 3597 hpd3 |= DC_HPDx_INT_EN; 3598 } 3599 if (rdev->irq.hpd[3]) { 3600 DRM_DEBUG("r600_irq_set: hpd 4\n"); 3601 hpd4 |= DC_HPDx_INT_EN; 3602 } 3603 if (rdev->irq.hpd[4]) { 3604 DRM_DEBUG("r600_irq_set: hpd 5\n"); 3605 hpd5 |= DC_HPDx_INT_EN; 3606 } 3607 if (rdev->irq.hpd[5]) { 3608 DRM_DEBUG("r600_irq_set: hpd 6\n"); 3609 hpd6 |= DC_HPDx_INT_EN; 3610 } 3611 if (rdev->irq.afmt[0]) { 3612 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3613 hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3614 } 3615 if (rdev->irq.afmt[1]) { 3616 DRM_DEBUG("r600_irq_set: hdmi 0\n"); 3617 hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK; 3618 } 3619 3620 WREG32(CP_INT_CNTL, cp_int_cntl); 3621 WREG32(DMA_CNTL, dma_cntl); 3622 WREG32(DxMODE_INT_MASK, mode_int); 3623 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3624 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3625 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3626 if (ASIC_IS_DCE3(rdev)) { 3627 WREG32(DC_HPD1_INT_CONTROL, hpd1); 3628 WREG32(DC_HPD2_INT_CONTROL, hpd2); 3629 WREG32(DC_HPD3_INT_CONTROL, hpd3); 3630 WREG32(DC_HPD4_INT_CONTROL, hpd4); 3631 if (ASIC_IS_DCE32(rdev)) { 3632 WREG32(DC_HPD5_INT_CONTROL, hpd5); 3633 WREG32(DC_HPD6_INT_CONTROL, hpd6); 3634 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0); 3635 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1); 3636 } else { 3637 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3638 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3639 } 3640 } else { 3641 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); 3642 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); 3643 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); 3644 WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0); 3645 WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1); 3646 } 3647 3648 return 0; 3649 } 3650 3651 static void r600_irq_ack(struct radeon_device *rdev) 3652 { 3653 u32 tmp; 3654 3655 if (ASIC_IS_DCE3(rdev)) { 3656 rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); 3657 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); 3658 rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); 3659 if (ASIC_IS_DCE32(rdev)) { 3660 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0); 3661 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1); 3662 } else { 3663 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3664 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS); 3665 } 3666 } else { 3667 rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS); 3668 rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 3669 rdev->irq.stat_regs.r600.disp_int_cont2 = 0; 3670 rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS); 3671 rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS); 3672 } 3673 rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS); 3674 rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS); 3675 3676 if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3677 WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3678 if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED) 3679 WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR); 3680 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) 3681 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3682 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) 3683 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3684 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) 3685 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); 3686 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) 3687 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); 3688 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 3689 if (ASIC_IS_DCE3(rdev)) { 3690 tmp = RREG32(DC_HPD1_INT_CONTROL); 3691 tmp |= DC_HPDx_INT_ACK; 3692 WREG32(DC_HPD1_INT_CONTROL, tmp); 3693 } else { 3694 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); 3695 tmp |= DC_HPDx_INT_ACK; 3696 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); 3697 } 3698 } 3699 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 3700 if (ASIC_IS_DCE3(rdev)) { 3701 tmp = RREG32(DC_HPD2_INT_CONTROL); 3702 tmp |= DC_HPDx_INT_ACK; 3703 WREG32(DC_HPD2_INT_CONTROL, tmp); 3704 } else { 3705 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); 3706 tmp |= DC_HPDx_INT_ACK; 3707 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); 3708 } 3709 } 3710 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 3711 if (ASIC_IS_DCE3(rdev)) { 3712 tmp = RREG32(DC_HPD3_INT_CONTROL); 3713 tmp |= DC_HPDx_INT_ACK; 3714 WREG32(DC_HPD3_INT_CONTROL, tmp); 3715 } else { 3716 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); 3717 tmp |= DC_HPDx_INT_ACK; 3718 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); 3719 } 3720 } 3721 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 3722 tmp = RREG32(DC_HPD4_INT_CONTROL); 3723 tmp |= DC_HPDx_INT_ACK; 3724 WREG32(DC_HPD4_INT_CONTROL, tmp); 3725 } 3726 if (ASIC_IS_DCE32(rdev)) { 3727 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 3728 tmp = RREG32(DC_HPD5_INT_CONTROL); 3729 tmp |= DC_HPDx_INT_ACK; 3730 WREG32(DC_HPD5_INT_CONTROL, tmp); 3731 } 3732 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3733 tmp = RREG32(DC_HPD5_INT_CONTROL); 3734 tmp |= DC_HPDx_INT_ACK; 3735 WREG32(DC_HPD6_INT_CONTROL, tmp); 3736 } 3737 if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) { 3738 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0); 3739 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3740 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp); 3741 } 3742 if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) { 3743 tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1); 3744 tmp |= AFMT_AZ_FORMAT_WTRIG_ACK; 3745 WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp); 3746 } 3747 } else { 3748 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 3749 tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL); 3750 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3751 WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp); 3752 } 3753 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 3754 if (ASIC_IS_DCE3(rdev)) { 3755 tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL); 3756 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3757 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp); 3758 } else { 3759 tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL); 3760 tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK; 3761 WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp); 3762 } 3763 } 3764 } 3765 } 3766 3767 void r600_irq_disable(struct radeon_device *rdev) 3768 { 3769 r600_disable_interrupts(rdev); 3770 /* Wait and acknowledge irq */ 3771 mdelay(1); 3772 r600_irq_ack(rdev); 3773 r600_disable_interrupt_state(rdev); 3774 } 3775 3776 static u32 r600_get_ih_wptr(struct radeon_device *rdev) 3777 { 3778 u32 wptr, tmp; 3779 3780 if (rdev->wb.enabled) 3781 wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]); 3782 else 3783 wptr = RREG32(IH_RB_WPTR); 3784 3785 if (wptr & RB_OVERFLOW) { 3786 /* When a ring buffer overflow happen start parsing interrupt 3787 * from the last not overwritten vector (wptr + 16). Hopefully 3788 * this should allow us to catchup. 3789 */ 3790 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 3791 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 3792 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 3793 tmp = RREG32(IH_RB_CNTL); 3794 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3795 WREG32(IH_RB_CNTL, tmp); 3796 } 3797 return (wptr & rdev->ih.ptr_mask); 3798 } 3799 3800 /* r600 IV Ring 3801 * Each IV ring entry is 128 bits: 3802 * [7:0] - interrupt source id 3803 * [31:8] - reserved 3804 * [59:32] - interrupt source data 3805 * [127:60] - reserved 3806 * 3807 * The basic interrupt vector entries 3808 * are decoded as follows: 3809 * src_id src_data description 3810 * 1 0 D1 Vblank 3811 * 1 1 D1 Vline 3812 * 5 0 D2 Vblank 3813 * 5 1 D2 Vline 3814 * 19 0 FP Hot plug detection A 3815 * 19 1 FP Hot plug detection B 3816 * 19 2 DAC A auto-detection 3817 * 19 3 DAC B auto-detection 3818 * 21 4 HDMI block A 3819 * 21 5 HDMI block B 3820 * 176 - CP_INT RB 3821 * 177 - CP_INT IB1 3822 * 178 - CP_INT IB2 3823 * 181 - EOP Interrupt 3824 * 233 - GUI Idle 3825 * 3826 * Note, these are based on r600 and may need to be 3827 * adjusted or added to on newer asics 3828 */ 3829 3830 int r600_irq_process(struct radeon_device *rdev) 3831 { 3832 u32 wptr; 3833 u32 rptr; 3834 u32 src_id, src_data; 3835 u32 ring_index; 3836 bool queue_hotplug = false; 3837 bool queue_hdmi = false; 3838 3839 if (!rdev->ih.enabled || rdev->shutdown) 3840 return IRQ_NONE; 3841 3842 /* No MSIs, need a dummy read to flush PCI DMAs */ 3843 if (!rdev->msi_enabled) 3844 RREG32(IH_RB_WPTR); 3845 3846 wptr = r600_get_ih_wptr(rdev); 3847 3848 if (wptr == rdev->ih.rptr) 3849 return IRQ_NONE; 3850 restart_ih: 3851 /* is somebody else already processing irqs? */ 3852 if (atomic_xchg(&rdev->ih.lock, 1)) 3853 return IRQ_NONE; 3854 3855 rptr = rdev->ih.rptr; 3856 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3857 3858 /* Order reading of wptr vs. reading of IH ring data */ 3859 rmb(); 3860 3861 /* display interrupts */ 3862 r600_irq_ack(rdev); 3863 3864 while (rptr != wptr) { 3865 /* wptr/rptr are in bytes! */ 3866 ring_index = rptr / 4; 3867 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 3868 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 3869 3870 switch (src_id) { 3871 case 1: /* D1 vblank/vline */ 3872 switch (src_data) { 3873 case 0: /* D1 vblank */ 3874 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 3875 if (rdev->irq.crtc_vblank_int[0]) { 3876 drm_handle_vblank(rdev->ddev, 0); 3877 rdev->pm.vblank_sync = true; 3878 wake_up(&rdev->irq.vblank_queue); 3879 } 3880 if (atomic_read(&rdev->irq.pflip[0])) 3881 radeon_crtc_handle_flip(rdev, 0); 3882 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 3883 DRM_DEBUG("IH: D1 vblank\n"); 3884 } 3885 break; 3886 case 1: /* D1 vline */ 3887 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 3888 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 3889 DRM_DEBUG("IH: D1 vline\n"); 3890 } 3891 break; 3892 default: 3893 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3894 break; 3895 } 3896 break; 3897 case 5: /* D2 vblank/vline */ 3898 switch (src_data) { 3899 case 0: /* D2 vblank */ 3900 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 3901 if (rdev->irq.crtc_vblank_int[1]) { 3902 drm_handle_vblank(rdev->ddev, 1); 3903 rdev->pm.vblank_sync = true; 3904 wake_up(&rdev->irq.vblank_queue); 3905 } 3906 if (atomic_read(&rdev->irq.pflip[1])) 3907 radeon_crtc_handle_flip(rdev, 1); 3908 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT; 3909 DRM_DEBUG("IH: D2 vblank\n"); 3910 } 3911 break; 3912 case 1: /* D1 vline */ 3913 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 3914 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 3915 DRM_DEBUG("IH: D2 vline\n"); 3916 } 3917 break; 3918 default: 3919 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3920 break; 3921 } 3922 break; 3923 case 19: /* HPD/DAC hotplug */ 3924 switch (src_data) { 3925 case 0: 3926 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 3927 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 3928 queue_hotplug = true; 3929 DRM_DEBUG("IH: HPD1\n"); 3930 } 3931 break; 3932 case 1: 3933 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 3934 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 3935 queue_hotplug = true; 3936 DRM_DEBUG("IH: HPD2\n"); 3937 } 3938 break; 3939 case 4: 3940 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 3941 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 3942 queue_hotplug = true; 3943 DRM_DEBUG("IH: HPD3\n"); 3944 } 3945 break; 3946 case 5: 3947 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 3948 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 3949 queue_hotplug = true; 3950 DRM_DEBUG("IH: HPD4\n"); 3951 } 3952 break; 3953 case 10: 3954 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 3955 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 3956 queue_hotplug = true; 3957 DRM_DEBUG("IH: HPD5\n"); 3958 } 3959 break; 3960 case 12: 3961 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 3962 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 3963 queue_hotplug = true; 3964 DRM_DEBUG("IH: HPD6\n"); 3965 } 3966 break; 3967 default: 3968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3969 break; 3970 } 3971 break; 3972 case 21: /* hdmi */ 3973 switch (src_data) { 3974 case 4: 3975 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 3976 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 3977 queue_hdmi = true; 3978 DRM_DEBUG("IH: HDMI0\n"); 3979 } 3980 break; 3981 case 5: 3982 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 3983 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 3984 queue_hdmi = true; 3985 DRM_DEBUG("IH: HDMI1\n"); 3986 } 3987 break; 3988 default: 3989 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 3990 break; 3991 } 3992 break; 3993 case 176: /* CP_INT in ring buffer */ 3994 case 177: /* CP_INT in IB1 */ 3995 case 178: /* CP_INT in IB2 */ 3996 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); 3997 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3998 break; 3999 case 181: /* CP EOP event */ 4000 DRM_DEBUG("IH: CP EOP\n"); 4001 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 4002 break; 4003 case 224: /* DMA trap event */ 4004 DRM_DEBUG("IH: DMA trap\n"); 4005 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); 4006 break; 4007 case 233: /* GUI IDLE */ 4008 DRM_DEBUG("IH: GUI idle\n"); 4009 break; 4010 default: 4011 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4012 break; 4013 } 4014 4015 /* wptr/rptr are in bytes! */ 4016 rptr += 16; 4017 rptr &= rdev->ih.ptr_mask; 4018 } 4019 if (queue_hotplug) 4020 task_add(systq, &rdev->hotplug_task); 4021 if (queue_hdmi) 4022 task_add(systq, &rdev->audio_task); 4023 rdev->ih.rptr = rptr; 4024 WREG32(IH_RB_RPTR, rdev->ih.rptr); 4025 atomic_set(&rdev->ih.lock, 0); 4026 4027 /* make sure wptr hasn't changed while processing */ 4028 wptr = r600_get_ih_wptr(rdev); 4029 if (wptr != rptr) 4030 goto restart_ih; 4031 4032 return IRQ_HANDLED; 4033 } 4034 4035 /* 4036 * Debugfs info 4037 */ 4038 #if defined(CONFIG_DEBUG_FS) 4039 4040 static int r600_debugfs_mc_info(struct seq_file *m, void *data) 4041 { 4042 struct drm_info_node *node = (struct drm_info_node *) m->private; 4043 struct drm_device *dev = node->minor->dev; 4044 struct radeon_device *rdev = dev->dev_private; 4045 4046 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS); 4047 DREG32_SYS(m, rdev, VM_L2_STATUS); 4048 return 0; 4049 } 4050 4051 static struct drm_info_list r600_mc_info_list[] = { 4052 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL}, 4053 }; 4054 #endif 4055 4056 int r600_debugfs_mc_info_init(struct radeon_device *rdev) 4057 { 4058 #if defined(CONFIG_DEBUG_FS) 4059 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list)); 4060 #else 4061 return 0; 4062 #endif 4063 } 4064 4065 /** 4066 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl 4067 * rdev: radeon device structure 4068 * bo: buffer object struct which userspace is waiting for idle 4069 * 4070 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed 4071 * through ring buffer, this leads to corruption in rendering, see 4072 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we 4073 * directly perform HDP flush by writing register through MMIO. 4074 */ 4075 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) 4076 { 4077 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read 4078 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. 4079 * This seems to cause problems on some AGP cards. Just use the old 4080 * method for them. 4081 */ 4082 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && 4083 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { 4084 volatile uint32_t *ptr = rdev->vram_scratch.ptr; 4085 u32 tmp; 4086 4087 WREG32(HDP_DEBUG1, 0); 4088 tmp = *ptr; 4089 } else 4090 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); 4091 } 4092 4093 void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes) 4094 { 4095 u32 link_width_cntl, mask, target_reg; 4096 4097 if (rdev->flags & RADEON_IS_IGP) 4098 return; 4099 4100 if (!(rdev->flags & RADEON_IS_PCIE)) 4101 return; 4102 4103 /* x2 cards have a special sequence */ 4104 if (ASIC_IS_X2(rdev)) 4105 return; 4106 4107 /* FIXME wait for idle */ 4108 4109 switch (lanes) { 4110 case 0: 4111 mask = RADEON_PCIE_LC_LINK_WIDTH_X0; 4112 break; 4113 case 1: 4114 mask = RADEON_PCIE_LC_LINK_WIDTH_X1; 4115 break; 4116 case 2: 4117 mask = RADEON_PCIE_LC_LINK_WIDTH_X2; 4118 break; 4119 case 4: 4120 mask = RADEON_PCIE_LC_LINK_WIDTH_X4; 4121 break; 4122 case 8: 4123 mask = RADEON_PCIE_LC_LINK_WIDTH_X8; 4124 break; 4125 case 12: 4126 mask = RADEON_PCIE_LC_LINK_WIDTH_X12; 4127 break; 4128 case 16: 4129 default: 4130 mask = RADEON_PCIE_LC_LINK_WIDTH_X16; 4131 break; 4132 } 4133 4134 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4135 4136 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == 4137 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) 4138 return; 4139 4140 if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS) 4141 return; 4142 4143 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | 4144 RADEON_PCIE_LC_RECONFIG_NOW | 4145 R600_PCIE_LC_RENEGOTIATE_EN | 4146 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE); 4147 link_width_cntl |= mask; 4148 4149 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4150 4151 /* some northbridges can renegotiate the link rather than requiring 4152 * a complete re-config. 4153 * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.) 4154 */ 4155 if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT) 4156 link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT; 4157 else 4158 link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE; 4159 4160 WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | 4161 RADEON_PCIE_LC_RECONFIG_NOW)); 4162 4163 if (rdev->family >= CHIP_RV770) 4164 target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX; 4165 else 4166 target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX; 4167 4168 /* wait for lane set to complete */ 4169 link_width_cntl = RREG32(target_reg); 4170 while (link_width_cntl == 0xffffffff) 4171 link_width_cntl = RREG32(target_reg); 4172 4173 } 4174 4175 int r600_get_pcie_lanes(struct radeon_device *rdev) 4176 { 4177 u32 link_width_cntl; 4178 4179 if (rdev->flags & RADEON_IS_IGP) 4180 return 0; 4181 4182 if (!(rdev->flags & RADEON_IS_PCIE)) 4183 return 0; 4184 4185 /* x2 cards have a special sequence */ 4186 if (ASIC_IS_X2(rdev)) 4187 return 0; 4188 4189 /* FIXME wait for idle */ 4190 4191 link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); 4192 4193 switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { 4194 case RADEON_PCIE_LC_LINK_WIDTH_X0: 4195 return 0; 4196 case RADEON_PCIE_LC_LINK_WIDTH_X1: 4197 return 1; 4198 case RADEON_PCIE_LC_LINK_WIDTH_X2: 4199 return 2; 4200 case RADEON_PCIE_LC_LINK_WIDTH_X4: 4201 return 4; 4202 case RADEON_PCIE_LC_LINK_WIDTH_X8: 4203 return 8; 4204 case RADEON_PCIE_LC_LINK_WIDTH_X16: 4205 default: 4206 return 16; 4207 } 4208 } 4209 4210 static void r600_pcie_gen2_enable(struct radeon_device *rdev) 4211 { 4212 u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp; 4213 u16 link_cntl2; 4214 u32 mask; 4215 int ret; 4216 4217 if (radeon_pcie_gen2 == 0) 4218 return; 4219 4220 if (rdev->flags & RADEON_IS_IGP) 4221 return; 4222 4223 if (!(rdev->flags & RADEON_IS_PCIE)) 4224 return; 4225 4226 /* x2 cards have a special sequence */ 4227 if (ASIC_IS_X2(rdev)) 4228 return; 4229 4230 /* only RV6xx+ chips are supported */ 4231 if (rdev->family <= CHIP_R600) 4232 return; 4233 4234 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask); 4235 if (ret != 0) 4236 return; 4237 4238 if (!(mask & DRM_PCIE_SPEED_50)) 4239 return; 4240 4241 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4242 if (speed_cntl & LC_CURRENT_DATA_RATE) { 4243 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 4244 return; 4245 } 4246 4247 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); 4248 4249 /* 55 nm r6xx asics */ 4250 if ((rdev->family == CHIP_RV670) || 4251 (rdev->family == CHIP_RV620) || 4252 (rdev->family == CHIP_RV635)) { 4253 /* advertise upconfig capability */ 4254 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4255 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4256 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4257 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4258 if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) { 4259 lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT; 4260 link_width_cntl &= ~(LC_LINK_WIDTH_MASK | 4261 LC_RECONFIG_ARC_MISSING_ESCAPE); 4262 link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN; 4263 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4264 } else { 4265 link_width_cntl |= LC_UPCONFIGURE_DIS; 4266 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4267 } 4268 } 4269 4270 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4271 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) && 4272 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 4273 4274 /* 55 nm r6xx asics */ 4275 if ((rdev->family == CHIP_RV670) || 4276 (rdev->family == CHIP_RV620) || 4277 (rdev->family == CHIP_RV635)) { 4278 WREG32(MM_CFGREGS_CNTL, 0x8); 4279 link_cntl2 = RREG32(0x4088); 4280 WREG32(MM_CFGREGS_CNTL, 0); 4281 /* not supported yet */ 4282 if (link_cntl2 & SELECTABLE_DEEMPHASIS) 4283 return; 4284 } 4285 4286 speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK; 4287 speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT); 4288 speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK; 4289 speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE; 4290 speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE; 4291 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4292 4293 tmp = RREG32(0x541c); 4294 WREG32(0x541c, tmp | 0x8); 4295 WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN); 4296 link_cntl2 = RREG16(0x4088); 4297 link_cntl2 &= ~TARGET_LINK_SPEED_MASK; 4298 link_cntl2 |= 0x2; 4299 WREG16(0x4088, link_cntl2); 4300 WREG32(MM_CFGREGS_CNTL, 0); 4301 4302 if ((rdev->family == CHIP_RV670) || 4303 (rdev->family == CHIP_RV620) || 4304 (rdev->family == CHIP_RV635)) { 4305 training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL); 4306 training_cntl &= ~LC_POINT_7_PLUS_EN; 4307 WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl); 4308 } else { 4309 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4310 speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN; 4311 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4312 } 4313 4314 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 4315 speed_cntl |= LC_GEN2_EN_STRAP; 4316 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl); 4317 4318 } else { 4319 link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL); 4320 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */ 4321 if (1) 4322 link_width_cntl |= LC_UPCONFIGURE_DIS; 4323 else 4324 link_width_cntl &= ~LC_UPCONFIGURE_DIS; 4325 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); 4326 } 4327 } 4328 4329 /** 4330 * r600_get_gpu_clock - return GPU clock counter snapshot 4331 * 4332 * @rdev: radeon_device pointer 4333 * 4334 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4335 * Returns the 64 bit clock counter snapshot. 4336 */ 4337 uint64_t r600_get_gpu_clock(struct radeon_device *rdev) 4338 { 4339 uint64_t clock; 4340 4341 mutex_lock(&rdev->gpu_clock_mutex); 4342 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); 4343 clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | 4344 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); 4345 mutex_unlock(&rdev->gpu_clock_mutex); 4346 return clock; 4347 } 4348