1 // SPDX-License-Identifier: MIT 2 3 /* 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/seq_file.h> 8 #include <linux/string_helpers.h> 9 10 #include "i915_drv.h" 11 #include "i915_reg.h" 12 #include "intel_gt.h" 13 #include "intel_gt_clock_utils.h" 14 #include "intel_gt_debugfs.h" 15 #include "intel_gt_pm.h" 16 #include "intel_gt_pm_debugfs.h" 17 #include "intel_gt_regs.h" 18 #include "intel_llc.h" 19 #include "intel_mchbar_regs.h" 20 #include "intel_pcode.h" 21 #include "intel_rc6.h" 22 #include "intel_rps.h" 23 #include "intel_runtime_pm.h" 24 #include "intel_uncore.h" 25 #include "vlv_sideband.h" 26 27 #ifdef notyet 28 29 void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt) 30 { 31 atomic_inc(>->user_wakeref); 32 intel_gt_pm_get(gt); 33 if (GRAPHICS_VER(gt->i915) >= 6) 34 intel_uncore_forcewake_user_get(gt->uncore); 35 } 36 37 void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt) 38 { 39 if (GRAPHICS_VER(gt->i915) >= 6) 40 intel_uncore_forcewake_user_put(gt->uncore); 41 intel_gt_pm_put(gt); 42 atomic_dec(>->user_wakeref); 43 } 44 45 static int forcewake_user_open(struct inode *inode, struct file *file) 46 { 47 struct intel_gt *gt = inode->i_private; 48 49 intel_gt_pm_debugfs_forcewake_user_open(gt); 50 51 return 0; 52 } 53 54 static int forcewake_user_release(struct inode *inode, struct file *file) 55 { 56 struct intel_gt *gt = inode->i_private; 57 58 intel_gt_pm_debugfs_forcewake_user_release(gt); 59 60 return 0; 61 } 62 63 static const struct file_operations forcewake_user_fops = { 64 .owner = THIS_MODULE, 65 .open = forcewake_user_open, 66 .release = forcewake_user_release, 67 }; 68 69 static int fw_domains_show(struct seq_file *m, void *data) 70 { 71 struct intel_gt *gt = m->private; 72 struct intel_uncore *uncore = gt->uncore; 73 struct intel_uncore_forcewake_domain *fw_domain; 74 unsigned int tmp; 75 76 seq_printf(m, "user.bypass_count = %u\n", 77 uncore->user_forcewake_count); 78 79 for_each_fw_domain(fw_domain, uncore, tmp) 80 seq_printf(m, "%s.wake_count = %u\n", 81 intel_uncore_forcewake_domain_to_str(fw_domain->id), 82 READ_ONCE(fw_domain->wake_count)); 83 84 return 0; 85 } 86 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains); 87 88 static void print_rc6_res(struct seq_file *m, 89 const char *title, 90 const i915_reg_t reg) 91 { 92 struct intel_gt *gt = m->private; 93 intel_wakeref_t wakeref; 94 95 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 96 seq_printf(m, "%s %u (%llu us)\n", title, 97 intel_uncore_read(gt->uncore, reg), 98 intel_rc6_residency_us(>->rc6, reg)); 99 } 100 101 static int vlv_drpc(struct seq_file *m) 102 { 103 struct intel_gt *gt = m->private; 104 struct intel_uncore *uncore = gt->uncore; 105 u32 rcctl1, pw_status, mt_fwake_req; 106 107 mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 108 pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS); 109 rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); 110 111 seq_printf(m, "RC6 Enabled: %s\n", 112 str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE | 113 GEN6_RC_CTL_EI_MODE(1)))); 114 seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req); 115 seq_printf(m, "Render Power Well: %s\n", 116 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down"); 117 seq_printf(m, "Media Power Well: %s\n", 118 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down"); 119 120 print_rc6_res(m, "Render RC6 residency since boot:", GEN6_GT_GFX_RC6); 121 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6); 122 123 return fw_domains_show(m, NULL); 124 } 125 126 static int gen6_drpc(struct seq_file *m) 127 { 128 struct intel_gt *gt = m->private; 129 struct drm_i915_private *i915 = gt->i915; 130 struct intel_uncore *uncore = gt->uncore; 131 u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0; 132 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 133 134 mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT); 135 gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS); 136 137 rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL); 138 if (GRAPHICS_VER(i915) >= 9) { 139 gen9_powergate_enable = 140 intel_uncore_read(uncore, GEN9_PG_ENABLE); 141 gen9_powergate_status = 142 intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS); 143 } 144 145 if (GRAPHICS_VER(i915) <= 7) 146 snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); 147 148 seq_printf(m, "RC1e Enabled: %s\n", 149 str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); 150 seq_printf(m, "RC6 Enabled: %s\n", 151 str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE)); 152 if (GRAPHICS_VER(i915) >= 9) { 153 seq_printf(m, "Render Well Gating Enabled: %s\n", 154 str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE)); 155 seq_printf(m, "Media Well Gating Enabled: %s\n", 156 str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE)); 157 } 158 seq_printf(m, "Deep RC6 Enabled: %s\n", 159 str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE)); 160 seq_printf(m, "Deepest RC6 Enabled: %s\n", 161 str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE)); 162 seq_puts(m, "Current RC state: "); 163 switch (gt_core_status & GEN6_RCn_MASK) { 164 case GEN6_RC0: 165 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK) 166 seq_puts(m, "Core Power Down\n"); 167 else 168 seq_puts(m, "on\n"); 169 break; 170 case GEN6_RC3: 171 seq_puts(m, "RC3\n"); 172 break; 173 case GEN6_RC6: 174 seq_puts(m, "RC6\n"); 175 break; 176 case GEN6_RC7: 177 seq_puts(m, "RC7\n"); 178 break; 179 default: 180 seq_puts(m, "Unknown\n"); 181 break; 182 } 183 184 seq_printf(m, "Core Power Down: %s\n", 185 str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 186 seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req); 187 if (GRAPHICS_VER(i915) >= 9) { 188 seq_printf(m, "Render Power Well: %s\n", 189 (gen9_powergate_status & 190 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down"); 191 seq_printf(m, "Media Power Well: %s\n", 192 (gen9_powergate_status & 193 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down"); 194 } 195 196 /* Not exactly sure what this is */ 197 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:", 198 GEN6_GT_GFX_RC6_LOCKED); 199 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6); 200 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 201 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 202 203 if (GRAPHICS_VER(i915) <= 7) { 204 seq_printf(m, "RC6 voltage: %dmV\n", 205 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 206 seq_printf(m, "RC6+ voltage: %dmV\n", 207 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 208 seq_printf(m, "RC6++ voltage: %dmV\n", 209 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 210 } 211 212 return fw_domains_show(m, NULL); 213 } 214 215 static int ilk_drpc(struct seq_file *m) 216 { 217 struct intel_gt *gt = m->private; 218 struct intel_uncore *uncore = gt->uncore; 219 u32 rgvmodectl, rstdbyctl; 220 u16 crstandvid; 221 222 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); 223 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL); 224 crstandvid = intel_uncore_read16(uncore, CRSTANDVID); 225 226 seq_printf(m, "HD boost: %s\n", 227 str_yes_no(rgvmodectl & MEMMODE_BOOST_EN)); 228 seq_printf(m, "Boost freq: %d\n", 229 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> 230 MEMMODE_BOOST_FREQ_SHIFT); 231 seq_printf(m, "HW control enabled: %s\n", 232 str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN)); 233 seq_printf(m, "SW control enabled: %s\n", 234 str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN)); 235 seq_printf(m, "Gated voltage change: %s\n", 236 str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE)); 237 seq_printf(m, "Starting frequency: P%d\n", 238 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); 239 seq_printf(m, "Max P-state: P%d\n", 240 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); 241 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); 242 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f)); 243 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f)); 244 seq_printf(m, "Render standby enabled: %s\n", 245 str_yes_no(!(rstdbyctl & RCX_SW_EXIT))); 246 seq_puts(m, "Current RS state: "); 247 switch (rstdbyctl & RSX_STATUS_MASK) { 248 case RSX_STATUS_ON: 249 seq_puts(m, "on\n"); 250 break; 251 case RSX_STATUS_RC1: 252 seq_puts(m, "RC1\n"); 253 break; 254 case RSX_STATUS_RC1E: 255 seq_puts(m, "RC1E\n"); 256 break; 257 case RSX_STATUS_RS1: 258 seq_puts(m, "RS1\n"); 259 break; 260 case RSX_STATUS_RS2: 261 seq_puts(m, "RS2 (RC6)\n"); 262 break; 263 case RSX_STATUS_RS3: 264 seq_puts(m, "RC3 (RC6+)\n"); 265 break; 266 default: 267 seq_puts(m, "unknown\n"); 268 break; 269 } 270 271 return 0; 272 } 273 274 static int drpc_show(struct seq_file *m, void *unused) 275 { 276 struct intel_gt *gt = m->private; 277 struct drm_i915_private *i915 = gt->i915; 278 intel_wakeref_t wakeref; 279 int err = -ENODEV; 280 281 with_intel_runtime_pm(gt->uncore->rpm, wakeref) { 282 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 283 err = vlv_drpc(m); 284 else if (GRAPHICS_VER(i915) >= 6) 285 err = gen6_drpc(m); 286 else 287 err = ilk_drpc(m); 288 } 289 290 return err; 291 } 292 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc); 293 294 void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p) 295 { 296 struct drm_i915_private *i915 = gt->i915; 297 struct intel_uncore *uncore = gt->uncore; 298 struct intel_rps *rps = >->rps; 299 intel_wakeref_t wakeref; 300 301 wakeref = intel_runtime_pm_get(uncore->rpm); 302 303 if (GRAPHICS_VER(i915) == 5) { 304 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); 305 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK); 306 307 drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 308 drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f); 309 drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 310 MEMSTAT_VID_SHIFT); 311 drm_printf(p, "Current P-state: %d\n", 312 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 313 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { 314 u32 rpmodectl, freq_sts; 315 316 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 317 drm_printf(p, "Video Turbo Mode: %s\n", 318 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 319 drm_printf(p, "HW control enabled: %s\n", 320 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 321 drm_printf(p, "SW control enabled: %s\n", 322 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 323 324 vlv_punit_get(i915); 325 freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); 326 vlv_punit_put(i915); 327 328 drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts); 329 drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq); 330 331 drm_printf(p, "actual GPU freq: %d MHz\n", 332 intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); 333 334 drm_printf(p, "current GPU freq: %d MHz\n", 335 intel_gpu_freq(rps, rps->cur_freq)); 336 337 drm_printf(p, "max GPU freq: %d MHz\n", 338 intel_gpu_freq(rps, rps->max_freq)); 339 340 drm_printf(p, "min GPU freq: %d MHz\n", 341 intel_gpu_freq(rps, rps->min_freq)); 342 343 drm_printf(p, "idle GPU freq: %d MHz\n", 344 intel_gpu_freq(rps, rps->idle_freq)); 345 346 drm_printf(p, "efficient (RPe) frequency: %d MHz\n", 347 intel_gpu_freq(rps, rps->efficient_freq)); 348 } else if (GRAPHICS_VER(i915) >= 6) { 349 u32 rp_state_limits; 350 u32 gt_perf_status; 351 struct intel_rps_freq_caps caps; 352 u32 rpmodectl, rpinclimit, rpdeclimit; 353 u32 rpstat, cagf, reqf; 354 u32 rpcurupei, rpcurup, rpprevup; 355 u32 rpcurdownei, rpcurdown, rpprevdown; 356 u32 rpupei, rpupt, rpdownei, rpdownt; 357 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; 358 359 rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS); 360 gen6_rps_get_freq_caps(rps, &caps); 361 if (IS_GEN9_LP(i915)) 362 gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS); 363 else 364 gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS); 365 366 /* RPSTAT1 is in the GT power well */ 367 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 368 369 reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ); 370 if (GRAPHICS_VER(i915) >= 9) { 371 reqf >>= 23; 372 } else { 373 reqf &= ~GEN6_TURBO_DISABLE; 374 if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 375 reqf >>= 24; 376 else 377 reqf >>= 25; 378 } 379 reqf = intel_gpu_freq(rps, reqf); 380 381 rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL); 382 rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 383 rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 384 385 rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1); 386 rpcurupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; 387 rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; 388 rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; 389 rpcurdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; 390 rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; 391 rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; 392 393 rpupei = intel_uncore_read(uncore, GEN6_RP_UP_EI); 394 rpupt = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD); 395 396 rpdownei = intel_uncore_read(uncore, GEN6_RP_DOWN_EI); 397 rpdownt = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD); 398 399 cagf = intel_rps_read_actual_frequency(rps); 400 401 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 402 403 if (GRAPHICS_VER(i915) >= 11) { 404 pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); 405 pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); 406 /* 407 * The equivalent to the PM ISR & IIR cannot be read 408 * without affecting the current state of the system 409 */ 410 pm_isr = 0; 411 pm_iir = 0; 412 } else if (GRAPHICS_VER(i915) >= 8) { 413 pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2)); 414 pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2)); 415 pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2)); 416 pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2)); 417 } else { 418 pm_ier = intel_uncore_read(uncore, GEN6_PMIER); 419 pm_imr = intel_uncore_read(uncore, GEN6_PMIMR); 420 pm_isr = intel_uncore_read(uncore, GEN6_PMISR); 421 pm_iir = intel_uncore_read(uncore, GEN6_PMIIR); 422 } 423 pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK); 424 425 drm_printf(p, "Video Turbo Mode: %s\n", 426 str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO)); 427 drm_printf(p, "HW control enabled: %s\n", 428 str_yes_no(rpmodectl & GEN6_RP_ENABLE)); 429 drm_printf(p, "SW control enabled: %s\n", 430 str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE)); 431 432 drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n", 433 pm_ier, pm_imr, pm_mask); 434 if (GRAPHICS_VER(i915) <= 10) 435 drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n", 436 pm_isr, pm_iir); 437 drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n", 438 rps->pm_intrmsk_mbz); 439 drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 440 drm_printf(p, "Render p-state ratio: %d\n", 441 (gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8); 442 drm_printf(p, "Render p-state VID: %d\n", 443 gt_perf_status & 0xff); 444 drm_printf(p, "Render p-state limit: %d\n", 445 rp_state_limits & 0xff); 446 drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat); 447 drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl); 448 drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit); 449 drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit); 450 drm_printf(p, "RPNSWREQ: %dMHz\n", reqf); 451 drm_printf(p, "CAGF: %dMHz\n", cagf); 452 drm_printf(p, "RP CUR UP EI: %d (%lldns)\n", 453 rpcurupei, 454 intel_gt_pm_interval_to_ns(gt, rpcurupei)); 455 drm_printf(p, "RP CUR UP: %d (%lldns)\n", 456 rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); 457 drm_printf(p, "RP PREV UP: %d (%lldns)\n", 458 rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); 459 drm_printf(p, "Up threshold: %d%%\n", 460 rps->power.up_threshold); 461 drm_printf(p, "RP UP EI: %d (%lldns)\n", 462 rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); 463 drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n", 464 rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); 465 466 drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n", 467 rpcurdownei, 468 intel_gt_pm_interval_to_ns(gt, rpcurdownei)); 469 drm_printf(p, "RP CUR DOWN: %d (%lldns)\n", 470 rpcurdown, 471 intel_gt_pm_interval_to_ns(gt, rpcurdown)); 472 drm_printf(p, "RP PREV DOWN: %d (%lldns)\n", 473 rpprevdown, 474 intel_gt_pm_interval_to_ns(gt, rpprevdown)); 475 drm_printf(p, "Down threshold: %d%%\n", 476 rps->power.down_threshold); 477 drm_printf(p, "RP DOWN EI: %d (%lldns)\n", 478 rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); 479 drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n", 480 rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); 481 482 drm_printf(p, "Lowest (RPN) frequency: %dMHz\n", 483 intel_gpu_freq(rps, caps.min_freq)); 484 drm_printf(p, "Nominal (RP1) frequency: %dMHz\n", 485 intel_gpu_freq(rps, caps.rp1_freq)); 486 drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n", 487 intel_gpu_freq(rps, caps.rp0_freq)); 488 drm_printf(p, "Max overclocked frequency: %dMHz\n", 489 intel_gpu_freq(rps, rps->max_freq)); 490 491 drm_printf(p, "Current freq: %d MHz\n", 492 intel_gpu_freq(rps, rps->cur_freq)); 493 drm_printf(p, "Actual freq: %d MHz\n", cagf); 494 drm_printf(p, "Idle freq: %d MHz\n", 495 intel_gpu_freq(rps, rps->idle_freq)); 496 drm_printf(p, "Min freq: %d MHz\n", 497 intel_gpu_freq(rps, rps->min_freq)); 498 drm_printf(p, "Boost freq: %d MHz\n", 499 intel_gpu_freq(rps, rps->boost_freq)); 500 drm_printf(p, "Max freq: %d MHz\n", 501 intel_gpu_freq(rps, rps->max_freq)); 502 drm_printf(p, 503 "efficient (RPe) frequency: %d MHz\n", 504 intel_gpu_freq(rps, rps->efficient_freq)); 505 } else { 506 drm_puts(p, "no P-state info available\n"); 507 } 508 509 drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk); 510 drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq); 511 drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq); 512 513 intel_runtime_pm_put(uncore->rpm, wakeref); 514 } 515 516 static int frequency_show(struct seq_file *m, void *unused) 517 { 518 struct intel_gt *gt = m->private; 519 struct drm_printer p = drm_seq_file_printer(m); 520 521 intel_gt_pm_frequency_dump(gt, &p); 522 523 return 0; 524 } 525 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency); 526 527 static int llc_show(struct seq_file *m, void *data) 528 { 529 struct intel_gt *gt = m->private; 530 struct drm_i915_private *i915 = gt->i915; 531 const bool edram = GRAPHICS_VER(i915) > 8; 532 struct intel_rps *rps = >->rps; 533 unsigned int max_gpu_freq, min_gpu_freq; 534 intel_wakeref_t wakeref; 535 int gpu_freq, ia_freq; 536 537 seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915))); 538 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", 539 i915->edram_size_mb); 540 541 min_gpu_freq = rps->min_freq; 542 max_gpu_freq = rps->max_freq; 543 if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { 544 /* Convert GT frequency to 50 HZ units */ 545 min_gpu_freq /= GEN9_FREQ_SCALER; 546 max_gpu_freq /= GEN9_FREQ_SCALER; 547 } 548 549 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); 550 551 wakeref = intel_runtime_pm_get(gt->uncore->rpm); 552 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { 553 ia_freq = gpu_freq; 554 snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE, 555 &ia_freq, NULL); 556 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", 557 intel_gpu_freq(rps, 558 (gpu_freq * 559 (IS_GEN9_BC(i915) || 560 GRAPHICS_VER(i915) >= 11 ? 561 GEN9_FREQ_SCALER : 1))), 562 ((ia_freq >> 0) & 0xff) * 100, 563 ((ia_freq >> 8) & 0xff) * 100); 564 } 565 intel_runtime_pm_put(gt->uncore->rpm, wakeref); 566 567 return 0; 568 } 569 570 static bool llc_eval(void *data) 571 { 572 struct intel_gt *gt = data; 573 574 return HAS_LLC(gt->i915); 575 } 576 577 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc); 578 579 static const char *rps_power_to_str(unsigned int power) 580 { 581 static const char * const strings[] = { 582 [LOW_POWER] = "low power", 583 [BETWEEN] = "mixed", 584 [HIGH_POWER] = "high power", 585 }; 586 587 if (power >= ARRAY_SIZE(strings) || !strings[power]) 588 return "unknown"; 589 590 return strings[power]; 591 } 592 593 static int rps_boost_show(struct seq_file *m, void *data) 594 { 595 struct intel_gt *gt = m->private; 596 struct drm_i915_private *i915 = gt->i915; 597 struct intel_rps *rps = >->rps; 598 599 seq_printf(m, "RPS enabled? %s\n", 600 str_yes_no(intel_rps_is_enabled(rps))); 601 seq_printf(m, "RPS active? %s\n", 602 str_yes_no(intel_rps_is_active(rps))); 603 seq_printf(m, "GPU busy? %s, %llums\n", 604 str_yes_no(gt->awake), 605 ktime_to_ms(intel_gt_get_awake_time(gt))); 606 seq_printf(m, "Boosts outstanding? %d\n", 607 atomic_read(&rps->num_waiters)); 608 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); 609 seq_printf(m, "Frequency requested %d, actual %d\n", 610 intel_gpu_freq(rps, rps->cur_freq), 611 intel_rps_read_actual_frequency(rps)); 612 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", 613 intel_gpu_freq(rps, rps->min_freq), 614 intel_gpu_freq(rps, rps->min_freq_softlimit), 615 intel_gpu_freq(rps, rps->max_freq_softlimit), 616 intel_gpu_freq(rps, rps->max_freq)); 617 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", 618 intel_gpu_freq(rps, rps->idle_freq), 619 intel_gpu_freq(rps, rps->efficient_freq), 620 intel_gpu_freq(rps, rps->boost_freq)); 621 622 seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); 623 624 if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) { 625 struct intel_uncore *uncore = gt->uncore; 626 u32 rpup, rpupei; 627 u32 rpdown, rpdownei; 628 629 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 630 rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; 631 rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; 632 rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; 633 rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; 634 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 635 636 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", 637 rps_power_to_str(rps->power.mode)); 638 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", 639 rpup && rpupei ? 100 * rpup / rpupei : 0, 640 rps->power.up_threshold); 641 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", 642 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, 643 rps->power.down_threshold); 644 } else { 645 seq_puts(m, "\nRPS Autotuning inactive\n"); 646 } 647 648 return 0; 649 } 650 651 static bool rps_eval(void *data) 652 { 653 struct intel_gt *gt = data; 654 655 return HAS_RPS(gt->i915); 656 } 657 658 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost); 659 660 #endif /* notyet */ 661 662 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root) 663 { 664 #ifdef notyet 665 static const struct intel_gt_debugfs_file files[] = { 666 { "drpc", &drpc_fops, NULL }, 667 { "frequency", &frequency_fops, NULL }, 668 { "forcewake", &fw_domains_fops, NULL }, 669 { "forcewake_user", &forcewake_user_fops, NULL}, 670 { "llc", &llc_fops, llc_eval }, 671 { "rps_boost", &rps_boost_fops, rps_eval }, 672 }; 673 674 intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt); 675 #endif 676 } 677