15ca02815Sjsg /*
25ca02815Sjsg * Copyright © 2014 Intel Corporation
35ca02815Sjsg *
45ca02815Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
55ca02815Sjsg * copy of this software and associated documentation files (the "Software"),
65ca02815Sjsg * to deal in the Software without restriction, including without limitation
75ca02815Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
85ca02815Sjsg * and/or sell copies of the Software, and to permit persons to whom the
95ca02815Sjsg * Software is furnished to do so, subject to the following conditions:
105ca02815Sjsg *
115ca02815Sjsg * The above copyright notice and this permission notice (including the next
125ca02815Sjsg * paragraph) shall be included in all copies or substantial portions of the
135ca02815Sjsg * Software.
145ca02815Sjsg *
155ca02815Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
165ca02815Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
175ca02815Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
185ca02815Sjsg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
195ca02815Sjsg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
205ca02815Sjsg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
215ca02815Sjsg * IN THE SOFTWARE.
225ca02815Sjsg *
235ca02815Sjsg */
245ca02815Sjsg
255ca02815Sjsg #include <linux/firmware.h>
265ca02815Sjsg
275ca02815Sjsg #include "i915_drv.h"
285ca02815Sjsg #include "i915_reg.h"
295ca02815Sjsg #include "intel_de.h"
305ca02815Sjsg #include "intel_dmc.h"
311bb76ff1Sjsg #include "intel_dmc_regs.h"
325ca02815Sjsg
335ca02815Sjsg /**
345ca02815Sjsg * DOC: DMC Firmware Support
355ca02815Sjsg *
365ca02815Sjsg * From gen9 onwards we have newly added DMC (Display microcontroller) in display
375ca02815Sjsg * engine to save and restore the state of display engine when it enter into
385ca02815Sjsg * low-power state and comes back to normal.
395ca02815Sjsg */
405ca02815Sjsg
41*f005ef32Sjsg enum intel_dmc_id {
42*f005ef32Sjsg DMC_FW_MAIN = 0,
43*f005ef32Sjsg DMC_FW_PIPEA,
44*f005ef32Sjsg DMC_FW_PIPEB,
45*f005ef32Sjsg DMC_FW_PIPEC,
46*f005ef32Sjsg DMC_FW_PIPED,
47*f005ef32Sjsg DMC_FW_MAX
48*f005ef32Sjsg };
49*f005ef32Sjsg
50*f005ef32Sjsg struct intel_dmc {
51*f005ef32Sjsg struct drm_i915_private *i915;
52*f005ef32Sjsg struct work_struct work;
53*f005ef32Sjsg const char *fw_path;
54*f005ef32Sjsg u32 max_fw_size; /* bytes */
55*f005ef32Sjsg u32 version;
56*f005ef32Sjsg struct dmc_fw_info {
57*f005ef32Sjsg u32 mmio_count;
58*f005ef32Sjsg i915_reg_t mmioaddr[20];
59*f005ef32Sjsg u32 mmiodata[20];
60*f005ef32Sjsg u32 dmc_offset;
61*f005ef32Sjsg u32 start_mmioaddr;
62*f005ef32Sjsg u32 dmc_fw_size; /*dwords */
63*f005ef32Sjsg u32 *payload;
64*f005ef32Sjsg bool present;
65*f005ef32Sjsg } dmc_info[DMC_FW_MAX];
66*f005ef32Sjsg };
67*f005ef32Sjsg
68*f005ef32Sjsg /* Note: This may be NULL. */
i915_to_dmc(struct drm_i915_private * i915)69*f005ef32Sjsg static struct intel_dmc *i915_to_dmc(struct drm_i915_private *i915)
70*f005ef32Sjsg {
71*f005ef32Sjsg return i915->display.dmc.dmc;
72*f005ef32Sjsg }
73*f005ef32Sjsg
741bb76ff1Sjsg #define DMC_VERSION(major, minor) ((major) << 16 | (minor))
751bb76ff1Sjsg #define DMC_VERSION_MAJOR(version) ((version) >> 16)
761bb76ff1Sjsg #define DMC_VERSION_MINOR(version) ((version) & 0xffff)
771bb76ff1Sjsg
78*f005ef32Sjsg #define DMC_PATH(platform) \
79*f005ef32Sjsg "i915/" __stringify(platform) "_dmc.bin"
80*f005ef32Sjsg
81*f005ef32Sjsg /*
82*f005ef32Sjsg * New DMC additions should not use this. This is used solely to remain
83*f005ef32Sjsg * compatible with systems that have not yet updated DMC blobs to use
84*f005ef32Sjsg * unversioned file names.
85*f005ef32Sjsg */
86*f005ef32Sjsg #define DMC_LEGACY_PATH(platform, major, minor) \
875ca02815Sjsg "i915/" \
885ca02815Sjsg __stringify(platform) "_dmc_ver" \
895ca02815Sjsg __stringify(major) "_" \
905ca02815Sjsg __stringify(minor) ".bin"
915ca02815Sjsg
92*f005ef32Sjsg #define XELPDP_DMC_MAX_FW_SIZE 0x7000
931bb76ff1Sjsg #define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
941bb76ff1Sjsg #define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
951bb76ff1Sjsg
96*f005ef32Sjsg #define MTL_DMC_PATH DMC_PATH(mtl)
97*f005ef32Sjsg MODULE_FIRMWARE(MTL_DMC_PATH);
98*f005ef32Sjsg
99*f005ef32Sjsg #define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
1001bb76ff1Sjsg MODULE_FIRMWARE(DG2_DMC_PATH);
1011bb76ff1Sjsg
102*f005ef32Sjsg #define ADLP_DMC_PATH DMC_PATH(adlp)
103*f005ef32Sjsg #define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16)
1045ca02815Sjsg MODULE_FIRMWARE(ADLP_DMC_PATH);
105*f005ef32Sjsg MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
1065ca02815Sjsg
107*f005ef32Sjsg #define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01)
1085ca02815Sjsg MODULE_FIRMWARE(ADLS_DMC_PATH);
1095ca02815Sjsg
110*f005ef32Sjsg #define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02)
1115ca02815Sjsg MODULE_FIRMWARE(DG1_DMC_PATH);
1125ca02815Sjsg
113*f005ef32Sjsg #define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03)
1145ca02815Sjsg MODULE_FIRMWARE(RKL_DMC_PATH);
1155ca02815Sjsg
116*f005ef32Sjsg #define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12)
1175ca02815Sjsg MODULE_FIRMWARE(TGL_DMC_PATH);
1185ca02815Sjsg
119*f005ef32Sjsg #define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09)
1205ca02815Sjsg #define ICL_DMC_MAX_FW_SIZE 0x6000
1215ca02815Sjsg MODULE_FIRMWARE(ICL_DMC_PATH);
1225ca02815Sjsg
123*f005ef32Sjsg #define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04)
1245ca02815Sjsg #define GLK_DMC_MAX_FW_SIZE 0x4000
1255ca02815Sjsg MODULE_FIRMWARE(GLK_DMC_PATH);
1265ca02815Sjsg
127*f005ef32Sjsg #define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04)
1285ca02815Sjsg #define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
1295ca02815Sjsg MODULE_FIRMWARE(KBL_DMC_PATH);
1305ca02815Sjsg
131*f005ef32Sjsg #define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27)
1325ca02815Sjsg #define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
1335ca02815Sjsg MODULE_FIRMWARE(SKL_DMC_PATH);
1345ca02815Sjsg
135*f005ef32Sjsg #define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07)
1365ca02815Sjsg #define BXT_DMC_MAX_FW_SIZE 0x3000
1375ca02815Sjsg MODULE_FIRMWARE(BXT_DMC_PATH);
1385ca02815Sjsg
1395ca02815Sjsg #define DMC_DEFAULT_FW_OFFSET 0xFFFFFFFF
1405ca02815Sjsg #define PACKAGE_MAX_FW_INFO_ENTRIES 20
1415ca02815Sjsg #define PACKAGE_V2_MAX_FW_INFO_ENTRIES 32
1425ca02815Sjsg #define DMC_V1_MAX_MMIO_COUNT 8
1435ca02815Sjsg #define DMC_V3_MAX_MMIO_COUNT 20
1445ca02815Sjsg #define DMC_V1_MMIO_START_RANGE 0x80000
1455ca02815Sjsg
146*f005ef32Sjsg #define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A))
147*f005ef32Sjsg
1485ca02815Sjsg struct intel_css_header {
1495ca02815Sjsg /* 0x09 for DMC */
1505ca02815Sjsg u32 module_type;
1515ca02815Sjsg
1525ca02815Sjsg /* Includes the DMC specific header in dwords */
1535ca02815Sjsg u32 header_len;
1545ca02815Sjsg
1555ca02815Sjsg /* always value would be 0x10000 */
1565ca02815Sjsg u32 header_ver;
1575ca02815Sjsg
1585ca02815Sjsg /* Not used */
1595ca02815Sjsg u32 module_id;
1605ca02815Sjsg
1615ca02815Sjsg /* Not used */
1625ca02815Sjsg u32 module_vendor;
1635ca02815Sjsg
1645ca02815Sjsg /* in YYYYMMDD format */
1655ca02815Sjsg u32 date;
1665ca02815Sjsg
1675ca02815Sjsg /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
1685ca02815Sjsg u32 size;
1695ca02815Sjsg
1705ca02815Sjsg /* Not used */
1715ca02815Sjsg u32 key_size;
1725ca02815Sjsg
1735ca02815Sjsg /* Not used */
1745ca02815Sjsg u32 modulus_size;
1755ca02815Sjsg
1765ca02815Sjsg /* Not used */
1775ca02815Sjsg u32 exponent_size;
1785ca02815Sjsg
1795ca02815Sjsg /* Not used */
1805ca02815Sjsg u32 reserved1[12];
1815ca02815Sjsg
1825ca02815Sjsg /* Major Minor */
1835ca02815Sjsg u32 version;
1845ca02815Sjsg
1855ca02815Sjsg /* Not used */
1865ca02815Sjsg u32 reserved2[8];
1875ca02815Sjsg
1885ca02815Sjsg /* Not used */
1895ca02815Sjsg u32 kernel_header_info;
1905ca02815Sjsg } __packed;
1915ca02815Sjsg
1925ca02815Sjsg struct intel_fw_info {
1935ca02815Sjsg u8 reserved1;
1945ca02815Sjsg
1955ca02815Sjsg /* reserved on package_header version 1, must be 0 on version 2 */
1965ca02815Sjsg u8 dmc_id;
1975ca02815Sjsg
1985ca02815Sjsg /* Stepping (A, B, C, ..., *). * is a wildcard */
1995ca02815Sjsg char stepping;
2005ca02815Sjsg
2015ca02815Sjsg /* Sub-stepping (0, 1, ..., *). * is a wildcard */
2025ca02815Sjsg char substepping;
2035ca02815Sjsg
2045ca02815Sjsg u32 offset;
2055ca02815Sjsg u32 reserved2;
2065ca02815Sjsg } __packed;
2075ca02815Sjsg
2085ca02815Sjsg struct intel_package_header {
2095ca02815Sjsg /* DMC container header length in dwords */
2105ca02815Sjsg u8 header_len;
2115ca02815Sjsg
2125ca02815Sjsg /* 0x01, 0x02 */
2135ca02815Sjsg u8 header_ver;
2145ca02815Sjsg
2155ca02815Sjsg u8 reserved[10];
2165ca02815Sjsg
2175ca02815Sjsg /* Number of valid entries in the FWInfo array below */
2185ca02815Sjsg u32 num_entries;
2195ca02815Sjsg } __packed;
2205ca02815Sjsg
2215ca02815Sjsg struct intel_dmc_header_base {
2225ca02815Sjsg /* always value would be 0x40403E3E */
2235ca02815Sjsg u32 signature;
2245ca02815Sjsg
2255ca02815Sjsg /* DMC binary header length */
2265ca02815Sjsg u8 header_len;
2275ca02815Sjsg
2285ca02815Sjsg /* 0x01 */
2295ca02815Sjsg u8 header_ver;
2305ca02815Sjsg
2315ca02815Sjsg /* Reserved */
2325ca02815Sjsg u16 dmcc_ver;
2335ca02815Sjsg
2345ca02815Sjsg /* Major, Minor */
2355ca02815Sjsg u32 project;
2365ca02815Sjsg
2375ca02815Sjsg /* Firmware program size (excluding header) in dwords */
2385ca02815Sjsg u32 fw_size;
2395ca02815Sjsg
2405ca02815Sjsg /* Major Minor version */
2415ca02815Sjsg u32 fw_version;
2425ca02815Sjsg } __packed;
2435ca02815Sjsg
2445ca02815Sjsg struct intel_dmc_header_v1 {
2455ca02815Sjsg struct intel_dmc_header_base base;
2465ca02815Sjsg
2475ca02815Sjsg /* Number of valid MMIO cycles present. */
2485ca02815Sjsg u32 mmio_count;
2495ca02815Sjsg
2505ca02815Sjsg /* MMIO address */
2515ca02815Sjsg u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
2525ca02815Sjsg
2535ca02815Sjsg /* MMIO data */
2545ca02815Sjsg u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
2555ca02815Sjsg
2565ca02815Sjsg /* FW filename */
2575ca02815Sjsg char dfile[32];
2585ca02815Sjsg
2595ca02815Sjsg u32 reserved1[2];
2605ca02815Sjsg } __packed;
2615ca02815Sjsg
2625ca02815Sjsg struct intel_dmc_header_v3 {
2635ca02815Sjsg struct intel_dmc_header_base base;
2645ca02815Sjsg
2655ca02815Sjsg /* DMC RAM start MMIO address */
2665ca02815Sjsg u32 start_mmioaddr;
2675ca02815Sjsg
2685ca02815Sjsg u32 reserved[9];
2695ca02815Sjsg
2705ca02815Sjsg /* FW filename */
2715ca02815Sjsg char dfile[32];
2725ca02815Sjsg
2735ca02815Sjsg /* Number of valid MMIO cycles present. */
2745ca02815Sjsg u32 mmio_count;
2755ca02815Sjsg
2765ca02815Sjsg /* MMIO address */
2775ca02815Sjsg u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
2785ca02815Sjsg
2795ca02815Sjsg /* MMIO data */
2805ca02815Sjsg u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
2815ca02815Sjsg } __packed;
2825ca02815Sjsg
2835ca02815Sjsg struct stepping_info {
2845ca02815Sjsg char stepping;
2855ca02815Sjsg char substepping;
2865ca02815Sjsg };
2875ca02815Sjsg
288*f005ef32Sjsg #define for_each_dmc_id(__dmc_id) \
289*f005ef32Sjsg for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
290*f005ef32Sjsg
is_valid_dmc_id(enum intel_dmc_id dmc_id)291*f005ef32Sjsg static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
2921bb76ff1Sjsg {
293*f005ef32Sjsg return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
294*f005ef32Sjsg }
295*f005ef32Sjsg
has_dmc_id_fw(struct drm_i915_private * i915,enum intel_dmc_id dmc_id)296*f005ef32Sjsg static bool has_dmc_id_fw(struct drm_i915_private *i915, enum intel_dmc_id dmc_id)
297*f005ef32Sjsg {
298*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
299*f005ef32Sjsg
300*f005ef32Sjsg return dmc && dmc->dmc_info[dmc_id].payload;
3011bb76ff1Sjsg }
3021bb76ff1Sjsg
intel_dmc_has_payload(struct drm_i915_private * i915)3035ca02815Sjsg bool intel_dmc_has_payload(struct drm_i915_private *i915)
3045ca02815Sjsg {
3051bb76ff1Sjsg return has_dmc_id_fw(i915, DMC_FW_MAIN);
3065ca02815Sjsg }
3075ca02815Sjsg
3085ca02815Sjsg static const struct stepping_info *
intel_get_stepping_info(struct drm_i915_private * i915,struct stepping_info * si)3095ca02815Sjsg intel_get_stepping_info(struct drm_i915_private *i915,
3105ca02815Sjsg struct stepping_info *si)
3115ca02815Sjsg {
3125ca02815Sjsg const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step);
3135ca02815Sjsg
3145ca02815Sjsg si->stepping = step_name[0];
3155ca02815Sjsg si->substepping = step_name[1];
3165ca02815Sjsg return si;
3175ca02815Sjsg }
3185ca02815Sjsg
gen9_set_dc_state_debugmask(struct drm_i915_private * i915)319*f005ef32Sjsg static void gen9_set_dc_state_debugmask(struct drm_i915_private *i915)
3205ca02815Sjsg {
3215ca02815Sjsg /* The below bit doesn't need to be cleared ever afterwards */
322*f005ef32Sjsg intel_de_rmw(i915, DC_STATE_DEBUG, 0,
3231bb76ff1Sjsg DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
324*f005ef32Sjsg intel_de_posting_read(i915, DC_STATE_DEBUG);
3255ca02815Sjsg }
3261bb76ff1Sjsg
disable_event_handler(struct drm_i915_private * i915,i915_reg_t ctl_reg,i915_reg_t htp_reg)3271bb76ff1Sjsg static void disable_event_handler(struct drm_i915_private *i915,
3281bb76ff1Sjsg i915_reg_t ctl_reg, i915_reg_t htp_reg)
3291bb76ff1Sjsg {
3301bb76ff1Sjsg intel_de_write(i915, ctl_reg,
3311bb76ff1Sjsg REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
3321bb76ff1Sjsg DMC_EVT_CTL_TYPE_EDGE_0_1) |
3331bb76ff1Sjsg REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
3341bb76ff1Sjsg DMC_EVT_CTL_EVENT_ID_FALSE));
3351bb76ff1Sjsg intel_de_write(i915, htp_reg, 0);
3361bb76ff1Sjsg }
3371bb76ff1Sjsg
3381bb76ff1Sjsg static void
disable_flip_queue_event(struct drm_i915_private * i915,i915_reg_t ctl_reg,i915_reg_t htp_reg)3391bb76ff1Sjsg disable_flip_queue_event(struct drm_i915_private *i915,
3401bb76ff1Sjsg i915_reg_t ctl_reg, i915_reg_t htp_reg)
3411bb76ff1Sjsg {
3421bb76ff1Sjsg u32 event_ctl;
3431bb76ff1Sjsg u32 event_htp;
3441bb76ff1Sjsg
3451bb76ff1Sjsg event_ctl = intel_de_read(i915, ctl_reg);
3461bb76ff1Sjsg event_htp = intel_de_read(i915, htp_reg);
3471bb76ff1Sjsg if (event_ctl != (DMC_EVT_CTL_ENABLE |
3481bb76ff1Sjsg DMC_EVT_CTL_RECURRING |
3491bb76ff1Sjsg REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
3501bb76ff1Sjsg DMC_EVT_CTL_TYPE_EDGE_0_1) |
3511bb76ff1Sjsg REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
3521bb76ff1Sjsg DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
3531bb76ff1Sjsg !event_htp) {
3541bb76ff1Sjsg drm_dbg_kms(&i915->drm,
3551bb76ff1Sjsg "Unexpected DMC event configuration (control %08x htp %08x)\n",
3561bb76ff1Sjsg event_ctl, event_htp);
3571bb76ff1Sjsg return;
3581bb76ff1Sjsg }
3591bb76ff1Sjsg
3601bb76ff1Sjsg disable_event_handler(i915, ctl_reg, htp_reg);
3611bb76ff1Sjsg }
3621bb76ff1Sjsg
3631bb76ff1Sjsg static bool
get_flip_queue_event_regs(struct drm_i915_private * i915,enum intel_dmc_id dmc_id,i915_reg_t * ctl_reg,i915_reg_t * htp_reg)364*f005ef32Sjsg get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
3651bb76ff1Sjsg i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
3661bb76ff1Sjsg {
367*f005ef32Sjsg if (dmc_id == DMC_FW_MAIN) {
3681bb76ff1Sjsg if (DISPLAY_VER(i915) == 12) {
3691bb76ff1Sjsg *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
3701bb76ff1Sjsg *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
3711bb76ff1Sjsg
3721bb76ff1Sjsg return true;
3731bb76ff1Sjsg }
374*f005ef32Sjsg } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
3751bb76ff1Sjsg if (IS_DG2(i915)) {
3761bb76ff1Sjsg *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
3771bb76ff1Sjsg *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
3781bb76ff1Sjsg
3791bb76ff1Sjsg return true;
3801bb76ff1Sjsg }
3811bb76ff1Sjsg }
3821bb76ff1Sjsg
3831bb76ff1Sjsg return false;
3841bb76ff1Sjsg }
3851bb76ff1Sjsg
3861bb76ff1Sjsg static void
disable_all_flip_queue_events(struct drm_i915_private * i915)3871bb76ff1Sjsg disable_all_flip_queue_events(struct drm_i915_private *i915)
3881bb76ff1Sjsg {
389*f005ef32Sjsg enum intel_dmc_id dmc_id;
3901bb76ff1Sjsg
3911bb76ff1Sjsg /* TODO: check if the following applies to all D13+ platforms. */
392*f005ef32Sjsg if (!IS_TIGERLAKE(i915))
3931bb76ff1Sjsg return;
3941bb76ff1Sjsg
395*f005ef32Sjsg for_each_dmc_id(dmc_id) {
3961bb76ff1Sjsg i915_reg_t ctl_reg;
3971bb76ff1Sjsg i915_reg_t htp_reg;
3981bb76ff1Sjsg
3991bb76ff1Sjsg if (!has_dmc_id_fw(i915, dmc_id))
4001bb76ff1Sjsg continue;
4011bb76ff1Sjsg
4021bb76ff1Sjsg if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
4031bb76ff1Sjsg continue;
4041bb76ff1Sjsg
4051bb76ff1Sjsg disable_flip_queue_event(i915, ctl_reg, htp_reg);
4061bb76ff1Sjsg }
4071bb76ff1Sjsg }
4081bb76ff1Sjsg
disable_all_event_handlers(struct drm_i915_private * i915)4091bb76ff1Sjsg static void disable_all_event_handlers(struct drm_i915_private *i915)
4101bb76ff1Sjsg {
411*f005ef32Sjsg enum intel_dmc_id dmc_id;
4121bb76ff1Sjsg
4131bb76ff1Sjsg /* TODO: disable the event handlers on pre-GEN12 platforms as well */
4141bb76ff1Sjsg if (DISPLAY_VER(i915) < 12)
4151bb76ff1Sjsg return;
4161bb76ff1Sjsg
417*f005ef32Sjsg for_each_dmc_id(dmc_id) {
4181bb76ff1Sjsg int handler;
4191bb76ff1Sjsg
420*f005ef32Sjsg if (!has_dmc_id_fw(i915, dmc_id))
4211bb76ff1Sjsg continue;
4221bb76ff1Sjsg
4231bb76ff1Sjsg for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
4241bb76ff1Sjsg disable_event_handler(i915,
425*f005ef32Sjsg DMC_EVT_CTL(i915, dmc_id, handler),
426*f005ef32Sjsg DMC_EVT_HTP(i915, dmc_id, handler));
4271bb76ff1Sjsg }
4281bb76ff1Sjsg }
4291bb76ff1Sjsg
adlp_pipedmc_clock_gating_wa(struct drm_i915_private * i915,bool enable)430*f005ef32Sjsg static void adlp_pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
4311bb76ff1Sjsg {
4321bb76ff1Sjsg enum pipe pipe;
4331bb76ff1Sjsg
4341bb76ff1Sjsg /*
4351bb76ff1Sjsg * Wa_16015201720:adl-p,dg2
4361bb76ff1Sjsg * The WA requires clock gating to be disabled all the time
4371bb76ff1Sjsg * for pipe A and B.
4381bb76ff1Sjsg * For pipe C and D clock gating needs to be disabled only
4391bb76ff1Sjsg * during initializing the firmware.
4401bb76ff1Sjsg */
4411bb76ff1Sjsg if (enable)
4421bb76ff1Sjsg for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
4431bb76ff1Sjsg intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
4441bb76ff1Sjsg 0, PIPEDMC_GATING_DIS);
4451bb76ff1Sjsg else
4461bb76ff1Sjsg for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
4471bb76ff1Sjsg intel_de_rmw(i915, CLKGATE_DIS_PSL_EXT(pipe),
4481bb76ff1Sjsg PIPEDMC_GATING_DIS, 0);
4495ca02815Sjsg }
4505ca02815Sjsg
mtl_pipedmc_clock_gating_wa(struct drm_i915_private * i915)451*f005ef32Sjsg static void mtl_pipedmc_clock_gating_wa(struct drm_i915_private *i915)
452*f005ef32Sjsg {
453*f005ef32Sjsg /*
454*f005ef32Sjsg * Wa_16015201720
455*f005ef32Sjsg * The WA requires clock gating to be disabled all the time
456*f005ef32Sjsg * for pipe A and B.
457*f005ef32Sjsg */
458*f005ef32Sjsg intel_de_rmw(i915, GEN9_CLKGATE_DIS_0, 0,
459*f005ef32Sjsg MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B);
460*f005ef32Sjsg }
461*f005ef32Sjsg
pipedmc_clock_gating_wa(struct drm_i915_private * i915,bool enable)462*f005ef32Sjsg static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
463*f005ef32Sjsg {
464*f005ef32Sjsg if (DISPLAY_VER(i915) >= 14 && enable)
465*f005ef32Sjsg mtl_pipedmc_clock_gating_wa(i915);
466*f005ef32Sjsg else if (DISPLAY_VER(i915) == 13)
467*f005ef32Sjsg adlp_pipedmc_clock_gating_wa(i915, enable);
468*f005ef32Sjsg }
469*f005ef32Sjsg
intel_dmc_enable_pipe(struct drm_i915_private * i915,enum pipe pipe)470*f005ef32Sjsg void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
471*f005ef32Sjsg {
472*f005ef32Sjsg enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
473*f005ef32Sjsg
474*f005ef32Sjsg if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
475*f005ef32Sjsg return;
476*f005ef32Sjsg
477*f005ef32Sjsg if (DISPLAY_VER(i915) >= 14)
478*f005ef32Sjsg intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
479*f005ef32Sjsg else
480*f005ef32Sjsg intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
481*f005ef32Sjsg }
482*f005ef32Sjsg
intel_dmc_disable_pipe(struct drm_i915_private * i915,enum pipe pipe)483*f005ef32Sjsg void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
484*f005ef32Sjsg {
485*f005ef32Sjsg enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
486*f005ef32Sjsg
487*f005ef32Sjsg if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(i915, dmc_id))
488*f005ef32Sjsg return;
489*f005ef32Sjsg
490*f005ef32Sjsg if (DISPLAY_VER(i915) >= 14)
491*f005ef32Sjsg intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
492*f005ef32Sjsg else
493*f005ef32Sjsg intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
494*f005ef32Sjsg }
495*f005ef32Sjsg
is_dmc_evt_ctl_reg(struct drm_i915_private * i915,enum intel_dmc_id dmc_id,i915_reg_t reg)496*f005ef32Sjsg static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
497*f005ef32Sjsg enum intel_dmc_id dmc_id, i915_reg_t reg)
498*f005ef32Sjsg {
499*f005ef32Sjsg u32 offset = i915_mmio_reg_offset(reg);
500*f005ef32Sjsg u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
501*f005ef32Sjsg u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
502*f005ef32Sjsg
503*f005ef32Sjsg return offset >= start && offset < end;
504*f005ef32Sjsg }
505*f005ef32Sjsg
disable_dmc_evt(struct drm_i915_private * i915,enum intel_dmc_id dmc_id,i915_reg_t reg,u32 data)506*f005ef32Sjsg static bool disable_dmc_evt(struct drm_i915_private *i915,
507*f005ef32Sjsg enum intel_dmc_id dmc_id,
508*f005ef32Sjsg i915_reg_t reg, u32 data)
509*f005ef32Sjsg {
510*f005ef32Sjsg if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
511*f005ef32Sjsg return false;
512*f005ef32Sjsg
513*f005ef32Sjsg /* keep all pipe DMC events disabled by default */
514*f005ef32Sjsg if (dmc_id != DMC_FW_MAIN)
515*f005ef32Sjsg return true;
516*f005ef32Sjsg
517*f005ef32Sjsg return false;
518*f005ef32Sjsg }
519*f005ef32Sjsg
dmc_mmiodata(struct drm_i915_private * i915,struct intel_dmc * dmc,enum intel_dmc_id dmc_id,int i)520*f005ef32Sjsg static u32 dmc_mmiodata(struct drm_i915_private *i915,
521*f005ef32Sjsg struct intel_dmc *dmc,
522*f005ef32Sjsg enum intel_dmc_id dmc_id, int i)
523*f005ef32Sjsg {
524*f005ef32Sjsg if (disable_dmc_evt(i915, dmc_id,
525*f005ef32Sjsg dmc->dmc_info[dmc_id].mmioaddr[i],
526*f005ef32Sjsg dmc->dmc_info[dmc_id].mmiodata[i]))
527*f005ef32Sjsg return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
528*f005ef32Sjsg DMC_EVT_CTL_TYPE_EDGE_0_1) |
529*f005ef32Sjsg REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
530*f005ef32Sjsg DMC_EVT_CTL_EVENT_ID_FALSE);
531*f005ef32Sjsg else
532*f005ef32Sjsg return dmc->dmc_info[dmc_id].mmiodata[i];
533*f005ef32Sjsg }
534*f005ef32Sjsg
5355ca02815Sjsg /**
5365ca02815Sjsg * intel_dmc_load_program() - write the firmware from memory to register.
537*f005ef32Sjsg * @i915: i915 drm device.
5385ca02815Sjsg *
5395ca02815Sjsg * DMC firmware is read from a .bin file and kept in internal memory one time.
5405ca02815Sjsg * Everytime display comes back from low power state this function is called to
5415ca02815Sjsg * copy the firmware from internal memory to registers.
5425ca02815Sjsg */
intel_dmc_load_program(struct drm_i915_private * i915)543*f005ef32Sjsg void intel_dmc_load_program(struct drm_i915_private *i915)
5445ca02815Sjsg {
545*f005ef32Sjsg struct i915_power_domains *power_domains = &i915->display.power.domains;
546*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
547*f005ef32Sjsg enum intel_dmc_id dmc_id;
548*f005ef32Sjsg u32 i;
5495ca02815Sjsg
550*f005ef32Sjsg if (!intel_dmc_has_payload(i915))
5515ca02815Sjsg return;
5525ca02815Sjsg
553*f005ef32Sjsg pipedmc_clock_gating_wa(i915, true);
5541bb76ff1Sjsg
555*f005ef32Sjsg disable_all_event_handlers(i915);
5565ca02815Sjsg
557*f005ef32Sjsg assert_rpm_wakelock_held(&i915->runtime_pm);
5585ca02815Sjsg
5595ca02815Sjsg preempt_disable();
5605ca02815Sjsg
561*f005ef32Sjsg for_each_dmc_id(dmc_id) {
562*f005ef32Sjsg for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
563*f005ef32Sjsg intel_de_write_fw(i915,
564*f005ef32Sjsg DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
565*f005ef32Sjsg dmc->dmc_info[dmc_id].payload[i]);
5665ca02815Sjsg }
5675ca02815Sjsg }
5685ca02815Sjsg
5695ca02815Sjsg preempt_enable();
5705ca02815Sjsg
571*f005ef32Sjsg for_each_dmc_id(dmc_id) {
572*f005ef32Sjsg for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
573*f005ef32Sjsg intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
574*f005ef32Sjsg dmc_mmiodata(i915, dmc, dmc_id, i));
5755ca02815Sjsg }
5765ca02815Sjsg }
5775ca02815Sjsg
578*f005ef32Sjsg power_domains->dc_state = 0;
5795ca02815Sjsg
580*f005ef32Sjsg gen9_set_dc_state_debugmask(i915);
5811bb76ff1Sjsg
5821bb76ff1Sjsg /*
5831bb76ff1Sjsg * Flip queue events need to be disabled before enabling DC5/6.
5841bb76ff1Sjsg * i915 doesn't use the flip queue feature, so disable it already
5851bb76ff1Sjsg * here.
5861bb76ff1Sjsg */
587*f005ef32Sjsg disable_all_flip_queue_events(i915);
5881bb76ff1Sjsg
589*f005ef32Sjsg pipedmc_clock_gating_wa(i915, false);
5901bb76ff1Sjsg }
5911bb76ff1Sjsg
5921bb76ff1Sjsg /**
5931bb76ff1Sjsg * intel_dmc_disable_program() - disable the firmware
5941bb76ff1Sjsg * @i915: i915 drm device
5951bb76ff1Sjsg *
5961bb76ff1Sjsg * Disable all event handlers in the firmware, making sure the firmware is
5971bb76ff1Sjsg * inactive after the display is uninitialized.
5981bb76ff1Sjsg */
intel_dmc_disable_program(struct drm_i915_private * i915)5991bb76ff1Sjsg void intel_dmc_disable_program(struct drm_i915_private *i915)
6001bb76ff1Sjsg {
6011bb76ff1Sjsg if (!intel_dmc_has_payload(i915))
6021bb76ff1Sjsg return;
6031bb76ff1Sjsg
6041bb76ff1Sjsg pipedmc_clock_gating_wa(i915, true);
6051bb76ff1Sjsg disable_all_event_handlers(i915);
6061bb76ff1Sjsg pipedmc_clock_gating_wa(i915, false);
6071bb76ff1Sjsg }
6081bb76ff1Sjsg
assert_dmc_loaded(struct drm_i915_private * i915)6091bb76ff1Sjsg void assert_dmc_loaded(struct drm_i915_private *i915)
6101bb76ff1Sjsg {
611*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
612*f005ef32Sjsg
613*f005ef32Sjsg drm_WARN_ONCE(&i915->drm, !dmc, "DMC not initialized\n");
614*f005ef32Sjsg drm_WARN_ONCE(&i915->drm, dmc &&
615*f005ef32Sjsg !intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
6161bb76ff1Sjsg "DMC program storage start is NULL\n");
6171bb76ff1Sjsg drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_SSP_BASE),
6181bb76ff1Sjsg "DMC SSP Base Not fine\n");
6191bb76ff1Sjsg drm_WARN_ONCE(&i915->drm, !intel_de_read(i915, DMC_HTP_SKL),
6201bb76ff1Sjsg "DMC HTP Not fine\n");
6215ca02815Sjsg }
6225ca02815Sjsg
fw_info_matches_stepping(const struct intel_fw_info * fw_info,const struct stepping_info * si)6235ca02815Sjsg static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
6245ca02815Sjsg const struct stepping_info *si)
6255ca02815Sjsg {
6265ca02815Sjsg if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
6275ca02815Sjsg (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
6285ca02815Sjsg /*
6295ca02815Sjsg * If we don't find a more specific one from above two checks, we
6305ca02815Sjsg * then check for the generic one to be sure to work even with
6315ca02815Sjsg * "broken firmware"
6325ca02815Sjsg */
6335ca02815Sjsg (si->stepping == '*' && si->substepping == fw_info->substepping) ||
6345ca02815Sjsg (fw_info->stepping == '*' && fw_info->substepping == '*'))
6355ca02815Sjsg return true;
6365ca02815Sjsg
6375ca02815Sjsg return false;
6385ca02815Sjsg }
6395ca02815Sjsg
6405ca02815Sjsg /*
6415ca02815Sjsg * Search fw_info table for dmc_offset to find firmware binary: num_entries is
6425ca02815Sjsg * already sanitized.
6435ca02815Sjsg */
dmc_set_fw_offset(struct intel_dmc * dmc,const struct intel_fw_info * fw_info,unsigned int num_entries,const struct stepping_info * si,u8 package_ver)6445ca02815Sjsg static void dmc_set_fw_offset(struct intel_dmc *dmc,
6455ca02815Sjsg const struct intel_fw_info *fw_info,
6465ca02815Sjsg unsigned int num_entries,
6475ca02815Sjsg const struct stepping_info *si,
6485ca02815Sjsg u8 package_ver)
6495ca02815Sjsg {
650*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
651*f005ef32Sjsg enum intel_dmc_id dmc_id;
652*f005ef32Sjsg unsigned int i;
6535ca02815Sjsg
6545ca02815Sjsg for (i = 0; i < num_entries; i++) {
655*f005ef32Sjsg dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
6565ca02815Sjsg
657*f005ef32Sjsg if (!is_valid_dmc_id(dmc_id)) {
658*f005ef32Sjsg drm_dbg(&i915->drm, "Unsupported firmware id: %u\n", dmc_id);
6595ca02815Sjsg continue;
6605ca02815Sjsg }
6615ca02815Sjsg
6625ca02815Sjsg /* More specific versions come first, so we don't even have to
6635ca02815Sjsg * check for the stepping since we already found a previous FW
6645ca02815Sjsg * for this id.
6655ca02815Sjsg */
666*f005ef32Sjsg if (dmc->dmc_info[dmc_id].present)
6675ca02815Sjsg continue;
6685ca02815Sjsg
6695ca02815Sjsg if (fw_info_matches_stepping(&fw_info[i], si)) {
670*f005ef32Sjsg dmc->dmc_info[dmc_id].present = true;
671*f005ef32Sjsg dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
6725ca02815Sjsg }
6735ca02815Sjsg }
6745ca02815Sjsg }
6755ca02815Sjsg
dmc_mmio_addr_sanity_check(struct intel_dmc * dmc,const u32 * mmioaddr,u32 mmio_count,int header_ver,enum intel_dmc_id dmc_id)67697efc481Sjsg static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
67797efc481Sjsg const u32 *mmioaddr, u32 mmio_count,
678*f005ef32Sjsg int header_ver, enum intel_dmc_id dmc_id)
67997efc481Sjsg {
680*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
68197efc481Sjsg u32 start_range, end_range;
68297efc481Sjsg int i;
68397efc481Sjsg
68497efc481Sjsg if (header_ver == 1) {
68597efc481Sjsg start_range = DMC_MMIO_START_RANGE;
68697efc481Sjsg end_range = DMC_MMIO_END_RANGE;
68797efc481Sjsg } else if (dmc_id == DMC_FW_MAIN) {
68897efc481Sjsg start_range = TGL_MAIN_MMIO_START;
68997efc481Sjsg end_range = TGL_MAIN_MMIO_END;
69097efc481Sjsg } else if (DISPLAY_VER(i915) >= 13) {
69197efc481Sjsg start_range = ADLP_PIPE_MMIO_START;
69297efc481Sjsg end_range = ADLP_PIPE_MMIO_END;
69397efc481Sjsg } else if (DISPLAY_VER(i915) >= 12) {
69497efc481Sjsg start_range = TGL_PIPE_MMIO_START(dmc_id);
69597efc481Sjsg end_range = TGL_PIPE_MMIO_END(dmc_id);
69697efc481Sjsg } else {
69797efc481Sjsg drm_warn(&i915->drm, "Unknown mmio range for sanity check");
69897efc481Sjsg return false;
69997efc481Sjsg }
70097efc481Sjsg
70197efc481Sjsg for (i = 0; i < mmio_count; i++) {
70297efc481Sjsg if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
70397efc481Sjsg return false;
70497efc481Sjsg }
70597efc481Sjsg
70697efc481Sjsg return true;
70797efc481Sjsg }
70897efc481Sjsg
parse_dmc_fw_header(struct intel_dmc * dmc,const struct intel_dmc_header_base * dmc_header,size_t rem_size,enum intel_dmc_id dmc_id)7095ca02815Sjsg static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
7105ca02815Sjsg const struct intel_dmc_header_base *dmc_header,
711*f005ef32Sjsg size_t rem_size, enum intel_dmc_id dmc_id)
7125ca02815Sjsg {
713*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
7145ca02815Sjsg struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
7155ca02815Sjsg unsigned int header_len_bytes, dmc_header_size, payload_size, i;
7165ca02815Sjsg const u32 *mmioaddr, *mmiodata;
7175ca02815Sjsg u32 mmio_count, mmio_count_max, start_mmioaddr;
7185ca02815Sjsg u8 *payload;
7195ca02815Sjsg
7205ca02815Sjsg BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
7215ca02815Sjsg ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
7225ca02815Sjsg
7235ca02815Sjsg /*
7245ca02815Sjsg * Check if we can access common fields, we will checkc again below
7255ca02815Sjsg * after we have read the version
7265ca02815Sjsg */
7275ca02815Sjsg if (rem_size < sizeof(struct intel_dmc_header_base))
7285ca02815Sjsg goto error_truncated;
7295ca02815Sjsg
7305ca02815Sjsg /* Cope with small differences between v1 and v3 */
7315ca02815Sjsg if (dmc_header->header_ver == 3) {
7325ca02815Sjsg const struct intel_dmc_header_v3 *v3 =
7335ca02815Sjsg (const struct intel_dmc_header_v3 *)dmc_header;
7345ca02815Sjsg
7355ca02815Sjsg if (rem_size < sizeof(struct intel_dmc_header_v3))
7365ca02815Sjsg goto error_truncated;
7375ca02815Sjsg
7385ca02815Sjsg mmioaddr = v3->mmioaddr;
7395ca02815Sjsg mmiodata = v3->mmiodata;
7405ca02815Sjsg mmio_count = v3->mmio_count;
7415ca02815Sjsg mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
7425ca02815Sjsg /* header_len is in dwords */
7435ca02815Sjsg header_len_bytes = dmc_header->header_len * 4;
7445ca02815Sjsg start_mmioaddr = v3->start_mmioaddr;
7455ca02815Sjsg dmc_header_size = sizeof(*v3);
7465ca02815Sjsg } else if (dmc_header->header_ver == 1) {
7475ca02815Sjsg const struct intel_dmc_header_v1 *v1 =
7485ca02815Sjsg (const struct intel_dmc_header_v1 *)dmc_header;
7495ca02815Sjsg
7505ca02815Sjsg if (rem_size < sizeof(struct intel_dmc_header_v1))
7515ca02815Sjsg goto error_truncated;
7525ca02815Sjsg
7535ca02815Sjsg mmioaddr = v1->mmioaddr;
7545ca02815Sjsg mmiodata = v1->mmiodata;
7555ca02815Sjsg mmio_count = v1->mmio_count;
7565ca02815Sjsg mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
7575ca02815Sjsg header_len_bytes = dmc_header->header_len;
7585ca02815Sjsg start_mmioaddr = DMC_V1_MMIO_START_RANGE;
7595ca02815Sjsg dmc_header_size = sizeof(*v1);
7605ca02815Sjsg } else {
7615ca02815Sjsg drm_err(&i915->drm, "Unknown DMC fw header version: %u\n",
7625ca02815Sjsg dmc_header->header_ver);
7635ca02815Sjsg return 0;
7645ca02815Sjsg }
7655ca02815Sjsg
7665ca02815Sjsg if (header_len_bytes != dmc_header_size) {
7675ca02815Sjsg drm_err(&i915->drm, "DMC firmware has wrong dmc header length "
7685ca02815Sjsg "(%u bytes)\n", header_len_bytes);
7695ca02815Sjsg return 0;
7705ca02815Sjsg }
7715ca02815Sjsg
7725ca02815Sjsg /* Cache the dmc header info. */
7735ca02815Sjsg if (mmio_count > mmio_count_max) {
7745ca02815Sjsg drm_err(&i915->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
7755ca02815Sjsg return 0;
7765ca02815Sjsg }
7775ca02815Sjsg
77897efc481Sjsg if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
77997efc481Sjsg dmc_header->header_ver, dmc_id)) {
78097efc481Sjsg drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
78197efc481Sjsg return 0;
78297efc481Sjsg }
78397efc481Sjsg
7845ca02815Sjsg for (i = 0; i < mmio_count; i++) {
7855ca02815Sjsg dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
7865ca02815Sjsg dmc_info->mmiodata[i] = mmiodata[i];
7875ca02815Sjsg }
7885ca02815Sjsg dmc_info->mmio_count = mmio_count;
7895ca02815Sjsg dmc_info->start_mmioaddr = start_mmioaddr;
7905ca02815Sjsg
7915ca02815Sjsg rem_size -= header_len_bytes;
7925ca02815Sjsg
7935ca02815Sjsg /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
7945ca02815Sjsg payload_size = dmc_header->fw_size * 4;
7955ca02815Sjsg if (rem_size < payload_size)
7965ca02815Sjsg goto error_truncated;
7975ca02815Sjsg
7985ca02815Sjsg if (payload_size > dmc->max_fw_size) {
7995ca02815Sjsg drm_err(&i915->drm, "DMC FW too big (%u bytes)\n", payload_size);
8005ca02815Sjsg return 0;
8015ca02815Sjsg }
8025ca02815Sjsg dmc_info->dmc_fw_size = dmc_header->fw_size;
8035ca02815Sjsg
8045ca02815Sjsg dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
8055ca02815Sjsg if (!dmc_info->payload)
8065ca02815Sjsg return 0;
8075ca02815Sjsg
8085ca02815Sjsg payload = (u8 *)(dmc_header) + header_len_bytes;
8095ca02815Sjsg memcpy(dmc_info->payload, payload, payload_size);
8105ca02815Sjsg
8115ca02815Sjsg return header_len_bytes + payload_size;
8125ca02815Sjsg
8135ca02815Sjsg error_truncated:
8145ca02815Sjsg drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
8155ca02815Sjsg return 0;
8165ca02815Sjsg }
8175ca02815Sjsg
8185ca02815Sjsg static u32
parse_dmc_fw_package(struct intel_dmc * dmc,const struct intel_package_header * package_header,const struct stepping_info * si,size_t rem_size)8195ca02815Sjsg parse_dmc_fw_package(struct intel_dmc *dmc,
8205ca02815Sjsg const struct intel_package_header *package_header,
8215ca02815Sjsg const struct stepping_info *si,
8225ca02815Sjsg size_t rem_size)
8235ca02815Sjsg {
824*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
8255ca02815Sjsg u32 package_size = sizeof(struct intel_package_header);
8265ca02815Sjsg u32 num_entries, max_entries;
8275ca02815Sjsg const struct intel_fw_info *fw_info;
8285ca02815Sjsg
8295ca02815Sjsg if (rem_size < package_size)
8305ca02815Sjsg goto error_truncated;
8315ca02815Sjsg
8325ca02815Sjsg if (package_header->header_ver == 1) {
8335ca02815Sjsg max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
8345ca02815Sjsg } else if (package_header->header_ver == 2) {
8355ca02815Sjsg max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
8365ca02815Sjsg } else {
8375ca02815Sjsg drm_err(&i915->drm, "DMC firmware has unknown header version %u\n",
8385ca02815Sjsg package_header->header_ver);
8395ca02815Sjsg return 0;
8405ca02815Sjsg }
8415ca02815Sjsg
8425ca02815Sjsg /*
8435ca02815Sjsg * We should always have space for max_entries,
8445ca02815Sjsg * even if not all are used
8455ca02815Sjsg */
8465ca02815Sjsg package_size += max_entries * sizeof(struct intel_fw_info);
8475ca02815Sjsg if (rem_size < package_size)
8485ca02815Sjsg goto error_truncated;
8495ca02815Sjsg
8505ca02815Sjsg if (package_header->header_len * 4 != package_size) {
8515ca02815Sjsg drm_err(&i915->drm, "DMC firmware has wrong package header length "
8525ca02815Sjsg "(%u bytes)\n", package_size);
8535ca02815Sjsg return 0;
8545ca02815Sjsg }
8555ca02815Sjsg
8565ca02815Sjsg num_entries = package_header->num_entries;
8575ca02815Sjsg if (WARN_ON(package_header->num_entries > max_entries))
8585ca02815Sjsg num_entries = max_entries;
8595ca02815Sjsg
8605ca02815Sjsg fw_info = (const struct intel_fw_info *)
8615ca02815Sjsg ((u8 *)package_header + sizeof(*package_header));
8625ca02815Sjsg dmc_set_fw_offset(dmc, fw_info, num_entries, si,
8635ca02815Sjsg package_header->header_ver);
8645ca02815Sjsg
8655ca02815Sjsg /* dmc_offset is in dwords */
8665ca02815Sjsg return package_size;
8675ca02815Sjsg
8685ca02815Sjsg error_truncated:
8695ca02815Sjsg drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
8705ca02815Sjsg return 0;
8715ca02815Sjsg }
8725ca02815Sjsg
8735ca02815Sjsg /* Return number of bytes parsed or 0 on error */
parse_dmc_fw_css(struct intel_dmc * dmc,struct intel_css_header * css_header,size_t rem_size)8745ca02815Sjsg static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
8755ca02815Sjsg struct intel_css_header *css_header,
8765ca02815Sjsg size_t rem_size)
8775ca02815Sjsg {
878*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
8795ca02815Sjsg
8805ca02815Sjsg if (rem_size < sizeof(struct intel_css_header)) {
8815ca02815Sjsg drm_err(&i915->drm, "Truncated DMC firmware, refusing.\n");
8825ca02815Sjsg return 0;
8835ca02815Sjsg }
8845ca02815Sjsg
8855ca02815Sjsg if (sizeof(struct intel_css_header) !=
8865ca02815Sjsg (css_header->header_len * 4)) {
8875ca02815Sjsg drm_err(&i915->drm, "DMC firmware has wrong CSS header length "
8885ca02815Sjsg "(%u bytes)\n",
8895ca02815Sjsg (css_header->header_len * 4));
8905ca02815Sjsg return 0;
8915ca02815Sjsg }
8925ca02815Sjsg
8935ca02815Sjsg dmc->version = css_header->version;
8945ca02815Sjsg
8955ca02815Sjsg return sizeof(struct intel_css_header);
8965ca02815Sjsg }
8975ca02815Sjsg
parse_dmc_fw(struct intel_dmc * dmc,const struct firmware * fw)898*f005ef32Sjsg static void parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
8995ca02815Sjsg {
900*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
9015ca02815Sjsg struct intel_css_header *css_header;
9025ca02815Sjsg struct intel_package_header *package_header;
9035ca02815Sjsg struct intel_dmc_header_base *dmc_header;
9045ca02815Sjsg struct stepping_info display_info = { '*', '*'};
905*f005ef32Sjsg const struct stepping_info *si = intel_get_stepping_info(i915, &display_info);
906*f005ef32Sjsg enum intel_dmc_id dmc_id;
9075ca02815Sjsg u32 readcount = 0;
9085ca02815Sjsg u32 r, offset;
9095ca02815Sjsg
9105ca02815Sjsg if (!fw)
9115ca02815Sjsg return;
9125ca02815Sjsg
9135ca02815Sjsg /* Extract CSS Header information */
9145ca02815Sjsg css_header = (struct intel_css_header *)fw->data;
9155ca02815Sjsg r = parse_dmc_fw_css(dmc, css_header, fw->size);
9165ca02815Sjsg if (!r)
9175ca02815Sjsg return;
9185ca02815Sjsg
9195ca02815Sjsg readcount += r;
9205ca02815Sjsg
9215ca02815Sjsg /* Extract Package Header information */
9225ca02815Sjsg package_header = (struct intel_package_header *)&fw->data[readcount];
9235ca02815Sjsg r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
9245ca02815Sjsg if (!r)
9255ca02815Sjsg return;
9265ca02815Sjsg
9275ca02815Sjsg readcount += r;
9285ca02815Sjsg
929*f005ef32Sjsg for_each_dmc_id(dmc_id) {
930*f005ef32Sjsg if (!dmc->dmc_info[dmc_id].present)
9315ca02815Sjsg continue;
9325ca02815Sjsg
933*f005ef32Sjsg offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
9345ca02815Sjsg if (offset > fw->size) {
935*f005ef32Sjsg drm_err(&i915->drm, "Reading beyond the fw_size\n");
9365ca02815Sjsg continue;
9375ca02815Sjsg }
9385ca02815Sjsg
9395ca02815Sjsg dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
940*f005ef32Sjsg parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
9415ca02815Sjsg }
9425ca02815Sjsg }
9435ca02815Sjsg
intel_dmc_runtime_pm_get(struct drm_i915_private * i915)944*f005ef32Sjsg static void intel_dmc_runtime_pm_get(struct drm_i915_private *i915)
9455ca02815Sjsg {
946*f005ef32Sjsg drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
947*f005ef32Sjsg i915->display.dmc.wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT);
9485ca02815Sjsg }
9495ca02815Sjsg
intel_dmc_runtime_pm_put(struct drm_i915_private * i915)950*f005ef32Sjsg static void intel_dmc_runtime_pm_put(struct drm_i915_private *i915)
9515ca02815Sjsg {
9525ca02815Sjsg intel_wakeref_t wakeref __maybe_unused =
953*f005ef32Sjsg fetch_and_zero(&i915->display.dmc.wakeref);
9545ca02815Sjsg
955*f005ef32Sjsg intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
956*f005ef32Sjsg }
957*f005ef32Sjsg
dmc_fallback_path(struct drm_i915_private * i915)958*f005ef32Sjsg static const char *dmc_fallback_path(struct drm_i915_private *i915)
959*f005ef32Sjsg {
960*f005ef32Sjsg if (IS_ALDERLAKE_P(i915))
961*f005ef32Sjsg return ADLP_DMC_FALLBACK_PATH;
962*f005ef32Sjsg
963*f005ef32Sjsg return NULL;
9645ca02815Sjsg }
9655ca02815Sjsg
dmc_load_work_fn(struct work_struct * work)9665ca02815Sjsg static void dmc_load_work_fn(struct work_struct *work)
9675ca02815Sjsg {
968*f005ef32Sjsg struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
969*f005ef32Sjsg struct drm_i915_private *i915 = dmc->i915;
9705ca02815Sjsg const struct firmware *fw = NULL;
971*f005ef32Sjsg const char *fallback_path;
972*f005ef32Sjsg int err;
9735ca02815Sjsg
9745ca02815Sjsg #ifdef __linux__
975*f005ef32Sjsg err = request_firmware(&fw, dmc->fw_path, i915->drm.dev);
9765ca02815Sjsg #else
977*f005ef32Sjsg err = request_firmware(&fw, dmc->fw_path, NULL);
9785ca02815Sjsg #endif
9795ca02815Sjsg
980*f005ef32Sjsg if (err == -ENOENT && !i915->params.dmc_firmware_path) {
981*f005ef32Sjsg fallback_path = dmc_fallback_path(i915);
982*f005ef32Sjsg if (fallback_path) {
983*f005ef32Sjsg drm_dbg_kms(&i915->drm, "%s not found, falling back to %s\n",
984*f005ef32Sjsg dmc->fw_path, fallback_path);
985*f005ef32Sjsg err = request_firmware(&fw, fallback_path, i915->drm.dev);
986*f005ef32Sjsg if (err == 0)
987*f005ef32Sjsg dmc->fw_path = fallback_path;
988*f005ef32Sjsg }
989*f005ef32Sjsg }
9905ca02815Sjsg
991*f005ef32Sjsg parse_dmc_fw(dmc, fw);
992*f005ef32Sjsg
993*f005ef32Sjsg if (intel_dmc_has_payload(i915)) {
994*f005ef32Sjsg intel_dmc_load_program(i915);
995*f005ef32Sjsg intel_dmc_runtime_pm_put(i915);
996*f005ef32Sjsg
997*f005ef32Sjsg drm_info(&i915->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
998*f005ef32Sjsg dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
9995ca02815Sjsg DMC_VERSION_MINOR(dmc->version));
10005ca02815Sjsg } else {
1001*f005ef32Sjsg drm_notice(&i915->drm,
10025ca02815Sjsg "Failed to load DMC firmware %s."
10035ca02815Sjsg " Disabling runtime power management.\n",
10045ca02815Sjsg dmc->fw_path);
10055ca02815Sjsg #ifdef __linux__
1006*f005ef32Sjsg drm_notice(&i915->drm, "DMC firmware homepage: %s",
10075ca02815Sjsg INTEL_UC_FIRMWARE_URL);
10085ca02815Sjsg #endif
10095ca02815Sjsg }
10105ca02815Sjsg
10115ca02815Sjsg release_firmware(fw);
10125ca02815Sjsg }
10135ca02815Sjsg
10145ca02815Sjsg /**
1015*f005ef32Sjsg * intel_dmc_init() - initialize the firmware loading.
1016*f005ef32Sjsg * @i915: i915 drm device.
10175ca02815Sjsg *
10185ca02815Sjsg * This function is called at the time of loading the display driver to read
10195ca02815Sjsg * firmware from a .bin file and copied into a internal memory.
10205ca02815Sjsg */
intel_dmc_init(struct drm_i915_private * i915)1021*f005ef32Sjsg void intel_dmc_init(struct drm_i915_private *i915)
10225ca02815Sjsg {
1023*f005ef32Sjsg struct intel_dmc *dmc;
10245ca02815Sjsg
1025*f005ef32Sjsg if (!HAS_DMC(i915))
10265ca02815Sjsg return;
10275ca02815Sjsg
10285ca02815Sjsg /*
10295ca02815Sjsg * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
10305ca02815Sjsg * runtime-suspend.
10315ca02815Sjsg *
10325ca02815Sjsg * On error, we return with the rpm wakeref held to prevent runtime
10335ca02815Sjsg * suspend as runtime suspend *requires* a working DMC for whatever
10345ca02815Sjsg * reason.
10355ca02815Sjsg */
1036*f005ef32Sjsg intel_dmc_runtime_pm_get(i915);
10375ca02815Sjsg
1038*f005ef32Sjsg dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
1039*f005ef32Sjsg if (!dmc)
1040*f005ef32Sjsg return;
1041*f005ef32Sjsg
1042*f005ef32Sjsg dmc->i915 = i915;
1043*f005ef32Sjsg
1044*f005ef32Sjsg INIT_WORK(&dmc->work, dmc_load_work_fn);
1045*f005ef32Sjsg
1046*f005ef32Sjsg if (IS_METEORLAKE(i915)) {
1047*f005ef32Sjsg dmc->fw_path = MTL_DMC_PATH;
1048*f005ef32Sjsg dmc->max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
1049*f005ef32Sjsg } else if (IS_DG2(i915)) {
10501bb76ff1Sjsg dmc->fw_path = DG2_DMC_PATH;
10511bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
1052*f005ef32Sjsg } else if (IS_ALDERLAKE_P(i915)) {
10535ca02815Sjsg dmc->fw_path = ADLP_DMC_PATH;
10541bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
1055*f005ef32Sjsg } else if (IS_ALDERLAKE_S(i915)) {
10565ca02815Sjsg dmc->fw_path = ADLS_DMC_PATH;
10571bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1058*f005ef32Sjsg } else if (IS_DG1(i915)) {
10595ca02815Sjsg dmc->fw_path = DG1_DMC_PATH;
10601bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1061*f005ef32Sjsg } else if (IS_ROCKETLAKE(i915)) {
10625ca02815Sjsg dmc->fw_path = RKL_DMC_PATH;
10631bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1064*f005ef32Sjsg } else if (IS_TIGERLAKE(i915)) {
10655ca02815Sjsg dmc->fw_path = TGL_DMC_PATH;
10661bb76ff1Sjsg dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
1067*f005ef32Sjsg } else if (DISPLAY_VER(i915) == 11) {
10685ca02815Sjsg dmc->fw_path = ICL_DMC_PATH;
10695ca02815Sjsg dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
1070*f005ef32Sjsg } else if (IS_GEMINILAKE(i915)) {
10715ca02815Sjsg dmc->fw_path = GLK_DMC_PATH;
10725ca02815Sjsg dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
1073*f005ef32Sjsg } else if (IS_KABYLAKE(i915) ||
1074*f005ef32Sjsg IS_COFFEELAKE(i915) ||
1075*f005ef32Sjsg IS_COMETLAKE(i915)) {
10765ca02815Sjsg dmc->fw_path = KBL_DMC_PATH;
10775ca02815Sjsg dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
1078*f005ef32Sjsg } else if (IS_SKYLAKE(i915)) {
10795ca02815Sjsg dmc->fw_path = SKL_DMC_PATH;
10805ca02815Sjsg dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
1081*f005ef32Sjsg } else if (IS_BROXTON(i915)) {
10825ca02815Sjsg dmc->fw_path = BXT_DMC_PATH;
10835ca02815Sjsg dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
10845ca02815Sjsg }
10855ca02815Sjsg
1086*f005ef32Sjsg if (i915->params.dmc_firmware_path) {
1087*f005ef32Sjsg if (strlen(i915->params.dmc_firmware_path) == 0) {
1088*f005ef32Sjsg drm_info(&i915->drm,
10895ca02815Sjsg "Disabling DMC firmware and runtime PM\n");
1090*f005ef32Sjsg goto out;
10915ca02815Sjsg }
10925ca02815Sjsg
1093*f005ef32Sjsg dmc->fw_path = i915->params.dmc_firmware_path;
10945ca02815Sjsg }
10955ca02815Sjsg
10965ca02815Sjsg if (!dmc->fw_path) {
1097*f005ef32Sjsg drm_dbg_kms(&i915->drm,
10985ca02815Sjsg "No known DMC firmware for platform, disabling runtime PM\n");
1099*f005ef32Sjsg goto out;
11005ca02815Sjsg }
11015ca02815Sjsg
1102*f005ef32Sjsg i915->display.dmc.dmc = dmc;
1103*f005ef32Sjsg
1104*f005ef32Sjsg drm_dbg_kms(&i915->drm, "Loading %s\n", dmc->fw_path);
1105*f005ef32Sjsg queue_work(i915->unordered_wq, &dmc->work);
1106*f005ef32Sjsg
1107*f005ef32Sjsg return;
1108*f005ef32Sjsg
1109*f005ef32Sjsg out:
1110*f005ef32Sjsg kfree(dmc);
11115ca02815Sjsg }
11125ca02815Sjsg
11135ca02815Sjsg /**
1114*f005ef32Sjsg * intel_dmc_suspend() - prepare DMC firmware before system suspend
1115*f005ef32Sjsg * @i915: i915 drm device
11165ca02815Sjsg *
11175ca02815Sjsg * Prepare the DMC firmware before entering system suspend. This includes
11185ca02815Sjsg * flushing pending work items and releasing any resources acquired during
11195ca02815Sjsg * init.
11205ca02815Sjsg */
intel_dmc_suspend(struct drm_i915_private * i915)1121*f005ef32Sjsg void intel_dmc_suspend(struct drm_i915_private *i915)
11225ca02815Sjsg {
1123*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
1124*f005ef32Sjsg
1125*f005ef32Sjsg if (!HAS_DMC(i915))
11265ca02815Sjsg return;
11275ca02815Sjsg
1128*f005ef32Sjsg if (dmc)
1129*f005ef32Sjsg flush_work(&dmc->work);
11305ca02815Sjsg
11315ca02815Sjsg /* Drop the reference held in case DMC isn't loaded. */
1132*f005ef32Sjsg if (!intel_dmc_has_payload(i915))
1133*f005ef32Sjsg intel_dmc_runtime_pm_put(i915);
11345ca02815Sjsg }
11355ca02815Sjsg
11365ca02815Sjsg /**
1137*f005ef32Sjsg * intel_dmc_resume() - init DMC firmware during system resume
1138*f005ef32Sjsg * @i915: i915 drm device
11395ca02815Sjsg *
11405ca02815Sjsg * Reinitialize the DMC firmware during system resume, reacquiring any
1141*f005ef32Sjsg * resources released in intel_dmc_suspend().
11425ca02815Sjsg */
intel_dmc_resume(struct drm_i915_private * i915)1143*f005ef32Sjsg void intel_dmc_resume(struct drm_i915_private *i915)
11445ca02815Sjsg {
1145*f005ef32Sjsg if (!HAS_DMC(i915))
11465ca02815Sjsg return;
11475ca02815Sjsg
11485ca02815Sjsg /*
11495ca02815Sjsg * Reacquire the reference to keep RPM disabled in case DMC isn't
11505ca02815Sjsg * loaded.
11515ca02815Sjsg */
1152*f005ef32Sjsg if (!intel_dmc_has_payload(i915))
1153*f005ef32Sjsg intel_dmc_runtime_pm_get(i915);
11545ca02815Sjsg }
11555ca02815Sjsg
11565ca02815Sjsg /**
1157*f005ef32Sjsg * intel_dmc_fini() - unload the DMC firmware.
1158*f005ef32Sjsg * @i915: i915 drm device.
11595ca02815Sjsg *
11605ca02815Sjsg * Firmmware unloading includes freeing the internal memory and reset the
11615ca02815Sjsg * firmware loading status.
11625ca02815Sjsg */
intel_dmc_fini(struct drm_i915_private * i915)1163*f005ef32Sjsg void intel_dmc_fini(struct drm_i915_private *i915)
11645ca02815Sjsg {
1165*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
1166*f005ef32Sjsg enum intel_dmc_id dmc_id;
11675ca02815Sjsg
1168*f005ef32Sjsg if (!HAS_DMC(i915))
11695ca02815Sjsg return;
11705ca02815Sjsg
1171*f005ef32Sjsg intel_dmc_suspend(i915);
1172*f005ef32Sjsg drm_WARN_ON(&i915->drm, i915->display.dmc.wakeref);
11735ca02815Sjsg
1174*f005ef32Sjsg if (dmc) {
1175*f005ef32Sjsg for_each_dmc_id(dmc_id)
1176*f005ef32Sjsg kfree(dmc->dmc_info[dmc_id].payload);
1177*f005ef32Sjsg
1178*f005ef32Sjsg kfree(dmc);
1179*f005ef32Sjsg i915->display.dmc.dmc = NULL;
1180*f005ef32Sjsg }
11811bb76ff1Sjsg }
11821bb76ff1Sjsg
intel_dmc_print_error_state(struct drm_i915_error_state_buf * m,struct drm_i915_private * i915)11831bb76ff1Sjsg void intel_dmc_print_error_state(struct drm_i915_error_state_buf *m,
11841bb76ff1Sjsg struct drm_i915_private *i915)
11851bb76ff1Sjsg {
1186*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
11871bb76ff1Sjsg
11881bb76ff1Sjsg if (!HAS_DMC(i915))
11891bb76ff1Sjsg return;
11901bb76ff1Sjsg
1191*f005ef32Sjsg i915_error_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
11921bb76ff1Sjsg i915_error_printf(m, "DMC loaded: %s\n",
11931bb76ff1Sjsg str_yes_no(intel_dmc_has_payload(i915)));
1194*f005ef32Sjsg if (dmc)
11951bb76ff1Sjsg i915_error_printf(m, "DMC fw version: %d.%d\n",
11961bb76ff1Sjsg DMC_VERSION_MAJOR(dmc->version),
11971bb76ff1Sjsg DMC_VERSION_MINOR(dmc->version));
11981bb76ff1Sjsg }
11991bb76ff1Sjsg
12001bb76ff1Sjsg #ifdef notyet
12011bb76ff1Sjsg
intel_dmc_debugfs_status_show(struct seq_file * m,void * unused)12021bb76ff1Sjsg static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
12031bb76ff1Sjsg {
12041bb76ff1Sjsg struct drm_i915_private *i915 = m->private;
1205*f005ef32Sjsg struct intel_dmc *dmc = i915_to_dmc(i915);
12061bb76ff1Sjsg intel_wakeref_t wakeref;
12071bb76ff1Sjsg i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
12081bb76ff1Sjsg
12091bb76ff1Sjsg if (!HAS_DMC(i915))
12101bb76ff1Sjsg return -ENODEV;
12111bb76ff1Sjsg
12121bb76ff1Sjsg wakeref = intel_runtime_pm_get(&i915->runtime_pm);
12131bb76ff1Sjsg
1214*f005ef32Sjsg seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
12151bb76ff1Sjsg seq_printf(m, "fw loaded: %s\n",
12161bb76ff1Sjsg str_yes_no(intel_dmc_has_payload(i915)));
1217*f005ef32Sjsg seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
1218*f005ef32Sjsg seq_printf(m, "Pipe A fw needed: %s\n",
12191bb76ff1Sjsg str_yes_no(GRAPHICS_VER(i915) >= 12));
12201bb76ff1Sjsg seq_printf(m, "Pipe A fw loaded: %s\n",
1221*f005ef32Sjsg str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEA)));
1222*f005ef32Sjsg seq_printf(m, "Pipe B fw needed: %s\n",
1223*f005ef32Sjsg str_yes_no(IS_ALDERLAKE_P(i915) ||
1224*f005ef32Sjsg DISPLAY_VER(i915) >= 14));
12251bb76ff1Sjsg seq_printf(m, "Pipe B fw loaded: %s\n",
1226*f005ef32Sjsg str_yes_no(has_dmc_id_fw(i915, DMC_FW_PIPEB)));
12271bb76ff1Sjsg
12281bb76ff1Sjsg if (!intel_dmc_has_payload(i915))
12291bb76ff1Sjsg goto out;
12301bb76ff1Sjsg
12311bb76ff1Sjsg seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
12321bb76ff1Sjsg DMC_VERSION_MINOR(dmc->version));
12331bb76ff1Sjsg
12341bb76ff1Sjsg if (DISPLAY_VER(i915) >= 12) {
1235*f005ef32Sjsg i915_reg_t dc3co_reg;
1236*f005ef32Sjsg
1237*f005ef32Sjsg if (IS_DGFX(i915) || DISPLAY_VER(i915) >= 14) {
1238*f005ef32Sjsg dc3co_reg = DG1_DMC_DEBUG3;
12391bb76ff1Sjsg dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
12401bb76ff1Sjsg } else {
1241*f005ef32Sjsg dc3co_reg = TGL_DMC_DEBUG3;
12421bb76ff1Sjsg dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
12431bb76ff1Sjsg dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
12441bb76ff1Sjsg }
12451bb76ff1Sjsg
12461bb76ff1Sjsg seq_printf(m, "DC3CO count: %d\n",
1247*f005ef32Sjsg intel_de_read(i915, dc3co_reg));
12481bb76ff1Sjsg } else {
12491bb76ff1Sjsg dc5_reg = IS_BROXTON(i915) ? BXT_DMC_DC3_DC5_COUNT :
12501bb76ff1Sjsg SKL_DMC_DC3_DC5_COUNT;
12511bb76ff1Sjsg if (!IS_GEMINILAKE(i915) && !IS_BROXTON(i915))
12521bb76ff1Sjsg dc6_reg = SKL_DMC_DC5_DC6_COUNT;
12531bb76ff1Sjsg }
12541bb76ff1Sjsg
12551bb76ff1Sjsg seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(i915, dc5_reg));
12561bb76ff1Sjsg if (i915_mmio_reg_valid(dc6_reg))
12571bb76ff1Sjsg seq_printf(m, "DC5 -> DC6 count: %d\n",
12581bb76ff1Sjsg intel_de_read(i915, dc6_reg));
12591bb76ff1Sjsg
12601bb76ff1Sjsg seq_printf(m, "program base: 0x%08x\n",
12611bb76ff1Sjsg intel_de_read(i915, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
1262*f005ef32Sjsg
1263*f005ef32Sjsg out:
12641bb76ff1Sjsg seq_printf(m, "ssp base: 0x%08x\n",
12651bb76ff1Sjsg intel_de_read(i915, DMC_SSP_BASE));
12661bb76ff1Sjsg seq_printf(m, "htp: 0x%08x\n", intel_de_read(i915, DMC_HTP_SKL));
12671bb76ff1Sjsg
12681bb76ff1Sjsg intel_runtime_pm_put(&i915->runtime_pm, wakeref);
12691bb76ff1Sjsg
12701bb76ff1Sjsg return 0;
12711bb76ff1Sjsg }
12721bb76ff1Sjsg
12731bb76ff1Sjsg DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
12741bb76ff1Sjsg
12751bb76ff1Sjsg #endif /* notyet */
12761bb76ff1Sjsg
intel_dmc_debugfs_register(struct drm_i915_private * i915)12771bb76ff1Sjsg void intel_dmc_debugfs_register(struct drm_i915_private *i915)
12781bb76ff1Sjsg {
12791bb76ff1Sjsg struct drm_minor *minor = i915->drm.primary;
12801bb76ff1Sjsg
12811bb76ff1Sjsg debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
12821bb76ff1Sjsg i915, &intel_dmc_debugfs_status_fops);
12835ca02815Sjsg }
1284