1*f005ef32Sjsg // SPDX-License-Identifier: MIT
2*f005ef32Sjsg /*
3*f005ef32Sjsg * Copyright © 2023 Intel Corporation
4*f005ef32Sjsg */
5*f005ef32Sjsg
6*f005ef32Sjsg #include <linux/bitops.h>
7*f005ef32Sjsg
8*f005ef32Sjsg #include "i915_drv.h"
9*f005ef32Sjsg #include "i915_reg.h"
10*f005ef32Sjsg #include "intel_atomic.h"
11*f005ef32Sjsg #include "intel_bw.h"
12*f005ef32Sjsg #include "intel_cdclk.h"
13*f005ef32Sjsg #include "intel_de.h"
14*f005ef32Sjsg #include "intel_display_trace.h"
15*f005ef32Sjsg #include "intel_pmdemand.h"
16*f005ef32Sjsg #include "skl_watermark.h"
17*f005ef32Sjsg
18*f005ef32Sjsg static struct intel_global_state *
intel_pmdemand_duplicate_state(struct intel_global_obj * obj)19*f005ef32Sjsg intel_pmdemand_duplicate_state(struct intel_global_obj *obj)
20*f005ef32Sjsg {
21*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state;
22*f005ef32Sjsg
23*f005ef32Sjsg pmdemand_state = kmemdup(obj->state, sizeof(*pmdemand_state), GFP_KERNEL);
24*f005ef32Sjsg if (!pmdemand_state)
25*f005ef32Sjsg return NULL;
26*f005ef32Sjsg
27*f005ef32Sjsg return &pmdemand_state->base;
28*f005ef32Sjsg }
29*f005ef32Sjsg
intel_pmdemand_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)30*f005ef32Sjsg static void intel_pmdemand_destroy_state(struct intel_global_obj *obj,
31*f005ef32Sjsg struct intel_global_state *state)
32*f005ef32Sjsg {
33*f005ef32Sjsg kfree(state);
34*f005ef32Sjsg }
35*f005ef32Sjsg
36*f005ef32Sjsg static const struct intel_global_state_funcs intel_pmdemand_funcs = {
37*f005ef32Sjsg .atomic_duplicate_state = intel_pmdemand_duplicate_state,
38*f005ef32Sjsg .atomic_destroy_state = intel_pmdemand_destroy_state,
39*f005ef32Sjsg };
40*f005ef32Sjsg
41*f005ef32Sjsg static struct intel_pmdemand_state *
intel_atomic_get_pmdemand_state(struct intel_atomic_state * state)42*f005ef32Sjsg intel_atomic_get_pmdemand_state(struct intel_atomic_state *state)
43*f005ef32Sjsg {
44*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
45*f005ef32Sjsg struct intel_global_state *pmdemand_state =
46*f005ef32Sjsg intel_atomic_get_global_obj_state(state,
47*f005ef32Sjsg &i915->display.pmdemand.obj);
48*f005ef32Sjsg
49*f005ef32Sjsg if (IS_ERR(pmdemand_state))
50*f005ef32Sjsg return ERR_CAST(pmdemand_state);
51*f005ef32Sjsg
52*f005ef32Sjsg return to_intel_pmdemand_state(pmdemand_state);
53*f005ef32Sjsg }
54*f005ef32Sjsg
55*f005ef32Sjsg static struct intel_pmdemand_state *
intel_atomic_get_old_pmdemand_state(struct intel_atomic_state * state)56*f005ef32Sjsg intel_atomic_get_old_pmdemand_state(struct intel_atomic_state *state)
57*f005ef32Sjsg {
58*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
59*f005ef32Sjsg struct intel_global_state *pmdemand_state =
60*f005ef32Sjsg intel_atomic_get_old_global_obj_state(state,
61*f005ef32Sjsg &i915->display.pmdemand.obj);
62*f005ef32Sjsg
63*f005ef32Sjsg if (!pmdemand_state)
64*f005ef32Sjsg return NULL;
65*f005ef32Sjsg
66*f005ef32Sjsg return to_intel_pmdemand_state(pmdemand_state);
67*f005ef32Sjsg }
68*f005ef32Sjsg
69*f005ef32Sjsg static struct intel_pmdemand_state *
intel_atomic_get_new_pmdemand_state(struct intel_atomic_state * state)70*f005ef32Sjsg intel_atomic_get_new_pmdemand_state(struct intel_atomic_state *state)
71*f005ef32Sjsg {
72*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
73*f005ef32Sjsg struct intel_global_state *pmdemand_state =
74*f005ef32Sjsg intel_atomic_get_new_global_obj_state(state,
75*f005ef32Sjsg &i915->display.pmdemand.obj);
76*f005ef32Sjsg
77*f005ef32Sjsg if (!pmdemand_state)
78*f005ef32Sjsg return NULL;
79*f005ef32Sjsg
80*f005ef32Sjsg return to_intel_pmdemand_state(pmdemand_state);
81*f005ef32Sjsg }
82*f005ef32Sjsg
intel_pmdemand_init(struct drm_i915_private * i915)83*f005ef32Sjsg int intel_pmdemand_init(struct drm_i915_private *i915)
84*f005ef32Sjsg {
85*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state;
86*f005ef32Sjsg
87*f005ef32Sjsg pmdemand_state = kzalloc(sizeof(*pmdemand_state), GFP_KERNEL);
88*f005ef32Sjsg if (!pmdemand_state)
89*f005ef32Sjsg return -ENOMEM;
90*f005ef32Sjsg
91*f005ef32Sjsg intel_atomic_global_obj_init(i915, &i915->display.pmdemand.obj,
92*f005ef32Sjsg &pmdemand_state->base,
93*f005ef32Sjsg &intel_pmdemand_funcs);
94*f005ef32Sjsg
95*f005ef32Sjsg if (IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0))
96*f005ef32Sjsg /* Wa_14016740474 */
97*f005ef32Sjsg intel_de_rmw(i915, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
98*f005ef32Sjsg
99*f005ef32Sjsg return 0;
100*f005ef32Sjsg }
101*f005ef32Sjsg
intel_pmdemand_init_early(struct drm_i915_private * i915)102*f005ef32Sjsg void intel_pmdemand_init_early(struct drm_i915_private *i915)
103*f005ef32Sjsg {
104*f005ef32Sjsg rw_init(&i915->display.pmdemand.lock, "pmdem");
105*f005ef32Sjsg init_waitqueue_head(&i915->display.pmdemand.waitqueue);
106*f005ef32Sjsg }
107*f005ef32Sjsg
108*f005ef32Sjsg void
intel_pmdemand_update_phys_mask(struct drm_i915_private * i915,struct intel_encoder * encoder,struct intel_pmdemand_state * pmdemand_state,bool set_bit)109*f005ef32Sjsg intel_pmdemand_update_phys_mask(struct drm_i915_private *i915,
110*f005ef32Sjsg struct intel_encoder *encoder,
111*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state,
112*f005ef32Sjsg bool set_bit)
113*f005ef32Sjsg {
114*f005ef32Sjsg enum phy phy;
115*f005ef32Sjsg
116*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
117*f005ef32Sjsg return;
118*f005ef32Sjsg
119*f005ef32Sjsg if (!encoder)
120*f005ef32Sjsg return;
121*f005ef32Sjsg
122*f005ef32Sjsg phy = intel_port_to_phy(i915, encoder->port);
123*f005ef32Sjsg if (intel_phy_is_tc(i915, phy))
124*f005ef32Sjsg return;
125*f005ef32Sjsg
126*f005ef32Sjsg if (set_bit)
127*f005ef32Sjsg pmdemand_state->active_combo_phys_mask |= BIT(phy);
128*f005ef32Sjsg else
129*f005ef32Sjsg pmdemand_state->active_combo_phys_mask &= ~BIT(phy);
130*f005ef32Sjsg }
131*f005ef32Sjsg
132*f005ef32Sjsg void
intel_pmdemand_update_port_clock(struct drm_i915_private * i915,struct intel_pmdemand_state * pmdemand_state,enum pipe pipe,int port_clock)133*f005ef32Sjsg intel_pmdemand_update_port_clock(struct drm_i915_private *i915,
134*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state,
135*f005ef32Sjsg enum pipe pipe, int port_clock)
136*f005ef32Sjsg {
137*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
138*f005ef32Sjsg return;
139*f005ef32Sjsg
140*f005ef32Sjsg pmdemand_state->ddi_clocks[pipe] = port_clock;
141*f005ef32Sjsg }
142*f005ef32Sjsg
143*f005ef32Sjsg static void
intel_pmdemand_update_max_ddiclk(struct drm_i915_private * i915,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)144*f005ef32Sjsg intel_pmdemand_update_max_ddiclk(struct drm_i915_private *i915,
145*f005ef32Sjsg struct intel_atomic_state *state,
146*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state)
147*f005ef32Sjsg {
148*f005ef32Sjsg int max_ddiclk = 0;
149*f005ef32Sjsg const struct intel_crtc_state *new_crtc_state;
150*f005ef32Sjsg struct intel_crtc *crtc;
151*f005ef32Sjsg int i;
152*f005ef32Sjsg
153*f005ef32Sjsg for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
154*f005ef32Sjsg intel_pmdemand_update_port_clock(i915, pmdemand_state,
155*f005ef32Sjsg crtc->pipe,
156*f005ef32Sjsg new_crtc_state->port_clock);
157*f005ef32Sjsg
158*f005ef32Sjsg for (i = 0; i < ARRAY_SIZE(pmdemand_state->ddi_clocks); i++)
159*f005ef32Sjsg max_ddiclk = max(pmdemand_state->ddi_clocks[i], max_ddiclk);
160*f005ef32Sjsg
161*f005ef32Sjsg pmdemand_state->params.ddiclk_max = DIV_ROUND_UP(max_ddiclk, 1000);
162*f005ef32Sjsg }
163*f005ef32Sjsg
164*f005ef32Sjsg static void
intel_pmdemand_update_connector_phys(struct drm_i915_private * i915,struct intel_atomic_state * state,struct drm_connector_state * conn_state,bool set_bit,struct intel_pmdemand_state * pmdemand_state)165*f005ef32Sjsg intel_pmdemand_update_connector_phys(struct drm_i915_private *i915,
166*f005ef32Sjsg struct intel_atomic_state *state,
167*f005ef32Sjsg struct drm_connector_state *conn_state,
168*f005ef32Sjsg bool set_bit,
169*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state)
170*f005ef32Sjsg {
171*f005ef32Sjsg struct intel_encoder *encoder = to_intel_encoder(conn_state->best_encoder);
172*f005ef32Sjsg struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
173*f005ef32Sjsg struct intel_crtc_state *crtc_state;
174*f005ef32Sjsg
175*f005ef32Sjsg if (!crtc)
176*f005ef32Sjsg return;
177*f005ef32Sjsg
178*f005ef32Sjsg if (set_bit)
179*f005ef32Sjsg crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
180*f005ef32Sjsg else
181*f005ef32Sjsg crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
182*f005ef32Sjsg
183*f005ef32Sjsg if (!crtc_state->hw.active)
184*f005ef32Sjsg return;
185*f005ef32Sjsg
186*f005ef32Sjsg intel_pmdemand_update_phys_mask(i915, encoder, pmdemand_state,
187*f005ef32Sjsg set_bit);
188*f005ef32Sjsg }
189*f005ef32Sjsg
190*f005ef32Sjsg static void
intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private * i915,struct intel_atomic_state * state,struct intel_pmdemand_state * pmdemand_state)191*f005ef32Sjsg intel_pmdemand_update_active_non_tc_phys(struct drm_i915_private *i915,
192*f005ef32Sjsg struct intel_atomic_state *state,
193*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state)
194*f005ef32Sjsg {
195*f005ef32Sjsg struct drm_connector_state *old_conn_state;
196*f005ef32Sjsg struct drm_connector_state *new_conn_state;
197*f005ef32Sjsg struct drm_connector *connector;
198*f005ef32Sjsg int i;
199*f005ef32Sjsg
200*f005ef32Sjsg for_each_oldnew_connector_in_state(&state->base, connector,
201*f005ef32Sjsg old_conn_state, new_conn_state, i) {
202*f005ef32Sjsg if (!intel_connector_needs_modeset(state, connector))
203*f005ef32Sjsg continue;
204*f005ef32Sjsg
205*f005ef32Sjsg /* First clear the active phys in the old connector state */
206*f005ef32Sjsg intel_pmdemand_update_connector_phys(i915, state,
207*f005ef32Sjsg old_conn_state, false,
208*f005ef32Sjsg pmdemand_state);
209*f005ef32Sjsg
210*f005ef32Sjsg /* Then set the active phys in new connector state */
211*f005ef32Sjsg intel_pmdemand_update_connector_phys(i915, state,
212*f005ef32Sjsg new_conn_state, true,
213*f005ef32Sjsg pmdemand_state);
214*f005ef32Sjsg }
215*f005ef32Sjsg
216*f005ef32Sjsg pmdemand_state->params.active_phys =
217*f005ef32Sjsg min_t(u16, hweight16(pmdemand_state->active_combo_phys_mask),
218*f005ef32Sjsg 7);
219*f005ef32Sjsg }
220*f005ef32Sjsg
221*f005ef32Sjsg static bool
intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private * i915,struct intel_encoder * encoder)222*f005ef32Sjsg intel_pmdemand_encoder_has_tc_phy(struct drm_i915_private *i915,
223*f005ef32Sjsg struct intel_encoder *encoder)
224*f005ef32Sjsg {
225*f005ef32Sjsg enum phy phy;
226*f005ef32Sjsg
227*f005ef32Sjsg if (!encoder)
228*f005ef32Sjsg return false;
229*f005ef32Sjsg
230*f005ef32Sjsg phy = intel_port_to_phy(i915, encoder->port);
231*f005ef32Sjsg
232*f005ef32Sjsg return intel_phy_is_tc(i915, phy);
233*f005ef32Sjsg }
234*f005ef32Sjsg
235*f005ef32Sjsg static bool
intel_pmdemand_connector_needs_update(struct intel_atomic_state * state)236*f005ef32Sjsg intel_pmdemand_connector_needs_update(struct intel_atomic_state *state)
237*f005ef32Sjsg {
238*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
239*f005ef32Sjsg struct drm_connector_state *old_conn_state;
240*f005ef32Sjsg struct drm_connector_state *new_conn_state;
241*f005ef32Sjsg struct drm_connector *connector;
242*f005ef32Sjsg int i;
243*f005ef32Sjsg
244*f005ef32Sjsg for_each_oldnew_connector_in_state(&state->base, connector,
245*f005ef32Sjsg old_conn_state, new_conn_state, i) {
246*f005ef32Sjsg struct intel_encoder *old_encoder =
247*f005ef32Sjsg to_intel_encoder(old_conn_state->best_encoder);
248*f005ef32Sjsg struct intel_encoder *new_encoder =
249*f005ef32Sjsg to_intel_encoder(new_conn_state->best_encoder);
250*f005ef32Sjsg
251*f005ef32Sjsg if (!intel_connector_needs_modeset(state, connector))
252*f005ef32Sjsg continue;
253*f005ef32Sjsg
254*f005ef32Sjsg if (old_encoder == new_encoder ||
255*f005ef32Sjsg (intel_pmdemand_encoder_has_tc_phy(i915, old_encoder) &&
256*f005ef32Sjsg intel_pmdemand_encoder_has_tc_phy(i915, new_encoder)))
257*f005ef32Sjsg continue;
258*f005ef32Sjsg
259*f005ef32Sjsg return true;
260*f005ef32Sjsg }
261*f005ef32Sjsg
262*f005ef32Sjsg return false;
263*f005ef32Sjsg }
264*f005ef32Sjsg
intel_pmdemand_needs_update(struct intel_atomic_state * state)265*f005ef32Sjsg static bool intel_pmdemand_needs_update(struct intel_atomic_state *state)
266*f005ef32Sjsg {
267*f005ef32Sjsg const struct intel_bw_state *new_bw_state, *old_bw_state;
268*f005ef32Sjsg const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
269*f005ef32Sjsg const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
270*f005ef32Sjsg const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state;
271*f005ef32Sjsg struct intel_crtc *crtc;
272*f005ef32Sjsg int i;
273*f005ef32Sjsg
274*f005ef32Sjsg new_bw_state = intel_atomic_get_new_bw_state(state);
275*f005ef32Sjsg old_bw_state = intel_atomic_get_old_bw_state(state);
276*f005ef32Sjsg if (new_bw_state && new_bw_state->qgv_point_peakbw !=
277*f005ef32Sjsg old_bw_state->qgv_point_peakbw)
278*f005ef32Sjsg return true;
279*f005ef32Sjsg
280*f005ef32Sjsg new_dbuf_state = intel_atomic_get_new_dbuf_state(state);
281*f005ef32Sjsg old_dbuf_state = intel_atomic_get_old_dbuf_state(state);
282*f005ef32Sjsg if (new_dbuf_state &&
283*f005ef32Sjsg (new_dbuf_state->active_pipes !=
284*f005ef32Sjsg old_dbuf_state->active_pipes ||
285*f005ef32Sjsg new_dbuf_state->enabled_slices !=
286*f005ef32Sjsg old_dbuf_state->enabled_slices))
287*f005ef32Sjsg return true;
288*f005ef32Sjsg
289*f005ef32Sjsg new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
290*f005ef32Sjsg old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
291*f005ef32Sjsg if (new_cdclk_state &&
292*f005ef32Sjsg (new_cdclk_state->actual.cdclk !=
293*f005ef32Sjsg old_cdclk_state->actual.cdclk ||
294*f005ef32Sjsg new_cdclk_state->actual.voltage_level !=
295*f005ef32Sjsg old_cdclk_state->actual.voltage_level))
296*f005ef32Sjsg return true;
297*f005ef32Sjsg
298*f005ef32Sjsg for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
299*f005ef32Sjsg new_crtc_state, i)
300*f005ef32Sjsg if (new_crtc_state->port_clock != old_crtc_state->port_clock)
301*f005ef32Sjsg return true;
302*f005ef32Sjsg
303*f005ef32Sjsg return intel_pmdemand_connector_needs_update(state);
304*f005ef32Sjsg }
305*f005ef32Sjsg
intel_pmdemand_atomic_check(struct intel_atomic_state * state)306*f005ef32Sjsg int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
307*f005ef32Sjsg {
308*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
309*f005ef32Sjsg const struct intel_bw_state *new_bw_state;
310*f005ef32Sjsg const struct intel_cdclk_state *new_cdclk_state;
311*f005ef32Sjsg const struct intel_dbuf_state *new_dbuf_state;
312*f005ef32Sjsg struct intel_pmdemand_state *new_pmdemand_state;
313*f005ef32Sjsg
314*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
315*f005ef32Sjsg return 0;
316*f005ef32Sjsg
317*f005ef32Sjsg if (!intel_pmdemand_needs_update(state))
318*f005ef32Sjsg return 0;
319*f005ef32Sjsg
320*f005ef32Sjsg new_pmdemand_state = intel_atomic_get_pmdemand_state(state);
321*f005ef32Sjsg if (IS_ERR(new_pmdemand_state))
322*f005ef32Sjsg return PTR_ERR(new_pmdemand_state);
323*f005ef32Sjsg
324*f005ef32Sjsg new_bw_state = intel_atomic_get_bw_state(state);
325*f005ef32Sjsg if (IS_ERR(new_bw_state))
326*f005ef32Sjsg return PTR_ERR(new_bw_state);
327*f005ef32Sjsg
328*f005ef32Sjsg /* firmware will calculate the qclk_gv_index, requirement is set to 0 */
329*f005ef32Sjsg new_pmdemand_state->params.qclk_gv_index = 0;
330*f005ef32Sjsg new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw;
331*f005ef32Sjsg
332*f005ef32Sjsg new_dbuf_state = intel_atomic_get_dbuf_state(state);
333*f005ef32Sjsg if (IS_ERR(new_dbuf_state))
334*f005ef32Sjsg return PTR_ERR(new_dbuf_state);
335*f005ef32Sjsg
336*f005ef32Sjsg new_pmdemand_state->params.active_pipes =
337*f005ef32Sjsg min_t(u8, hweight8(new_dbuf_state->active_pipes), 3);
338*f005ef32Sjsg new_pmdemand_state->params.active_dbufs =
339*f005ef32Sjsg min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3);
340*f005ef32Sjsg
341*f005ef32Sjsg new_cdclk_state = intel_atomic_get_cdclk_state(state);
342*f005ef32Sjsg if (IS_ERR(new_cdclk_state))
343*f005ef32Sjsg return PTR_ERR(new_cdclk_state);
344*f005ef32Sjsg
345*f005ef32Sjsg new_pmdemand_state->params.voltage_index =
346*f005ef32Sjsg new_cdclk_state->actual.voltage_level;
347*f005ef32Sjsg new_pmdemand_state->params.cdclk_freq_mhz =
348*f005ef32Sjsg DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000);
349*f005ef32Sjsg
350*f005ef32Sjsg intel_pmdemand_update_max_ddiclk(i915, state, new_pmdemand_state);
351*f005ef32Sjsg
352*f005ef32Sjsg intel_pmdemand_update_active_non_tc_phys(i915, state, new_pmdemand_state);
353*f005ef32Sjsg
354*f005ef32Sjsg /*
355*f005ef32Sjsg * Active_PLLs starts with 1 because of CDCLK PLL.
356*f005ef32Sjsg * TODO: Missing to account genlock filter when it gets used.
357*f005ef32Sjsg */
358*f005ef32Sjsg new_pmdemand_state->params.plls =
359*f005ef32Sjsg min_t(u16, new_pmdemand_state->params.active_phys + 1, 7);
360*f005ef32Sjsg
361*f005ef32Sjsg /*
362*f005ef32Sjsg * Setting scalers to max as it can not be calculated during flips and
363*f005ef32Sjsg * fastsets without taking global states locks.
364*f005ef32Sjsg */
365*f005ef32Sjsg new_pmdemand_state->params.scalers = 7;
366*f005ef32Sjsg
367*f005ef32Sjsg if (state->base.allow_modeset)
368*f005ef32Sjsg return intel_atomic_serialize_global_state(&new_pmdemand_state->base);
369*f005ef32Sjsg else
370*f005ef32Sjsg return intel_atomic_lock_global_state(&new_pmdemand_state->base);
371*f005ef32Sjsg }
372*f005ef32Sjsg
intel_pmdemand_check_prev_transaction(struct drm_i915_private * i915)373*f005ef32Sjsg static bool intel_pmdemand_check_prev_transaction(struct drm_i915_private *i915)
374*f005ef32Sjsg {
375*f005ef32Sjsg return !(intel_de_wait_for_clear(i915,
376*f005ef32Sjsg XELPDP_INITIATE_PMDEMAND_REQUEST(1),
377*f005ef32Sjsg XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
378*f005ef32Sjsg intel_de_wait_for_clear(i915,
379*f005ef32Sjsg GEN12_DCPR_STATUS_1,
380*f005ef32Sjsg XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
381*f005ef32Sjsg }
382*f005ef32Sjsg
383*f005ef32Sjsg void
intel_pmdemand_init_pmdemand_params(struct drm_i915_private * i915,struct intel_pmdemand_state * pmdemand_state)384*f005ef32Sjsg intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915,
385*f005ef32Sjsg struct intel_pmdemand_state *pmdemand_state)
386*f005ef32Sjsg {
387*f005ef32Sjsg u32 reg1, reg2;
388*f005ef32Sjsg
389*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
390*f005ef32Sjsg return;
391*f005ef32Sjsg
392*f005ef32Sjsg mutex_lock(&i915->display.pmdemand.lock);
393*f005ef32Sjsg if (drm_WARN_ON(&i915->drm,
394*f005ef32Sjsg !intel_pmdemand_check_prev_transaction(i915))) {
395*f005ef32Sjsg memset(&pmdemand_state->params, 0,
396*f005ef32Sjsg sizeof(pmdemand_state->params));
397*f005ef32Sjsg goto unlock;
398*f005ef32Sjsg }
399*f005ef32Sjsg
400*f005ef32Sjsg reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
401*f005ef32Sjsg
402*f005ef32Sjsg reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
403*f005ef32Sjsg
404*f005ef32Sjsg /* Set 1*/
405*f005ef32Sjsg pmdemand_state->params.qclk_gv_bw =
406*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1);
407*f005ef32Sjsg pmdemand_state->params.voltage_index =
408*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1);
409*f005ef32Sjsg pmdemand_state->params.qclk_gv_index =
410*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1);
411*f005ef32Sjsg pmdemand_state->params.active_pipes =
412*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1);
413*f005ef32Sjsg pmdemand_state->params.active_dbufs =
414*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1);
415*f005ef32Sjsg pmdemand_state->params.active_phys =
416*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1);
417*f005ef32Sjsg
418*f005ef32Sjsg /* Set 2*/
419*f005ef32Sjsg pmdemand_state->params.cdclk_freq_mhz =
420*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2);
421*f005ef32Sjsg pmdemand_state->params.ddiclk_max =
422*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2);
423*f005ef32Sjsg pmdemand_state->params.scalers =
424*f005ef32Sjsg REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2);
425*f005ef32Sjsg
426*f005ef32Sjsg unlock:
427*f005ef32Sjsg mutex_unlock(&i915->display.pmdemand.lock);
428*f005ef32Sjsg }
429*f005ef32Sjsg
intel_pmdemand_req_complete(struct drm_i915_private * i915)430*f005ef32Sjsg static bool intel_pmdemand_req_complete(struct drm_i915_private *i915)
431*f005ef32Sjsg {
432*f005ef32Sjsg return !(intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)) &
433*f005ef32Sjsg XELPDP_PMDEMAND_REQ_ENABLE);
434*f005ef32Sjsg }
435*f005ef32Sjsg
intel_pmdemand_wait(struct drm_i915_private * i915)436*f005ef32Sjsg static void intel_pmdemand_wait(struct drm_i915_private *i915)
437*f005ef32Sjsg {
438*f005ef32Sjsg if (!wait_event_timeout(i915->display.pmdemand.waitqueue,
439*f005ef32Sjsg intel_pmdemand_req_complete(i915),
440*f005ef32Sjsg msecs_to_jiffies_timeout(10)))
441*f005ef32Sjsg drm_err(&i915->drm,
442*f005ef32Sjsg "timed out waiting for Punit PM Demand Response\n");
443*f005ef32Sjsg }
444*f005ef32Sjsg
445*f005ef32Sjsg /* Required to be programmed during Display Init Sequences. */
intel_pmdemand_program_dbuf(struct drm_i915_private * i915,u8 dbuf_slices)446*f005ef32Sjsg void intel_pmdemand_program_dbuf(struct drm_i915_private *i915,
447*f005ef32Sjsg u8 dbuf_slices)
448*f005ef32Sjsg {
449*f005ef32Sjsg u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3);
450*f005ef32Sjsg
451*f005ef32Sjsg mutex_lock(&i915->display.pmdemand.lock);
452*f005ef32Sjsg if (drm_WARN_ON(&i915->drm,
453*f005ef32Sjsg !intel_pmdemand_check_prev_transaction(i915)))
454*f005ef32Sjsg goto unlock;
455*f005ef32Sjsg
456*f005ef32Sjsg intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
457*f005ef32Sjsg XELPDP_PMDEMAND_DBUFS_MASK,
458*f005ef32Sjsg REG_FIELD_PREP(XELPDP_PMDEMAND_DBUFS_MASK, dbufs));
459*f005ef32Sjsg intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
460*f005ef32Sjsg XELPDP_PMDEMAND_REQ_ENABLE);
461*f005ef32Sjsg
462*f005ef32Sjsg intel_pmdemand_wait(i915);
463*f005ef32Sjsg
464*f005ef32Sjsg unlock:
465*f005ef32Sjsg mutex_unlock(&i915->display.pmdemand.lock);
466*f005ef32Sjsg }
467*f005ef32Sjsg
468*f005ef32Sjsg static void
intel_pmdemand_update_params(const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,u32 * reg1,u32 * reg2,bool serialized)469*f005ef32Sjsg intel_pmdemand_update_params(const struct intel_pmdemand_state *new,
470*f005ef32Sjsg const struct intel_pmdemand_state *old,
471*f005ef32Sjsg u32 *reg1, u32 *reg2, bool serialized)
472*f005ef32Sjsg {
473*f005ef32Sjsg /*
474*f005ef32Sjsg * The pmdemand parameter updates happens in two steps. Pre plane and
475*f005ef32Sjsg * post plane updates. During the pre plane, as DE might still be
476*f005ef32Sjsg * handling with some old operations, to avoid unexpected performance
477*f005ef32Sjsg * issues, program the pmdemand parameters with higher of old and new
478*f005ef32Sjsg * values. And then after once settled, use the new parameter values
479*f005ef32Sjsg * as part of the post plane update.
480*f005ef32Sjsg *
481*f005ef32Sjsg * If the pmdemand params update happens without modeset allowed, this
482*f005ef32Sjsg * means we can't serialize the updates. So that implies possibility of
483*f005ef32Sjsg * some parallel atomic commits affecting the pmdemand parameters. In
484*f005ef32Sjsg * that case, we need to consider the current values from the register
485*f005ef32Sjsg * as well. So in pre-plane case, we need to check the max of old, new
486*f005ef32Sjsg * and current register value if not serialized. In post plane update
487*f005ef32Sjsg * we need to consider max of new and current register value if not
488*f005ef32Sjsg * serialized
489*f005ef32Sjsg */
490*f005ef32Sjsg
491*f005ef32Sjsg #define update_reg(reg, field, mask) do { \
492*f005ef32Sjsg u32 current_val = serialized ? 0 : REG_FIELD_GET((mask), *(reg)); \
493*f005ef32Sjsg u32 old_val = old ? old->params.field : 0; \
494*f005ef32Sjsg u32 new_val = new->params.field; \
495*f005ef32Sjsg \
496*f005ef32Sjsg *(reg) &= ~(mask); \
497*f005ef32Sjsg *(reg) |= REG_FIELD_PREP((mask), max3(old_val, new_val, current_val)); \
498*f005ef32Sjsg } while (0)
499*f005ef32Sjsg
500*f005ef32Sjsg /* Set 1*/
501*f005ef32Sjsg update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK);
502*f005ef32Sjsg update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK);
503*f005ef32Sjsg update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK);
504*f005ef32Sjsg update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK);
505*f005ef32Sjsg update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK);
506*f005ef32Sjsg update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK);
507*f005ef32Sjsg
508*f005ef32Sjsg /* Set 2*/
509*f005ef32Sjsg update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK);
510*f005ef32Sjsg update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK);
511*f005ef32Sjsg update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK);
512*f005ef32Sjsg update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK);
513*f005ef32Sjsg
514*f005ef32Sjsg #undef update_reg
515*f005ef32Sjsg }
516*f005ef32Sjsg
517*f005ef32Sjsg static void
intel_pmdemand_program_params(struct drm_i915_private * i915,const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old,bool serialized)518*f005ef32Sjsg intel_pmdemand_program_params(struct drm_i915_private *i915,
519*f005ef32Sjsg const struct intel_pmdemand_state *new,
520*f005ef32Sjsg const struct intel_pmdemand_state *old,
521*f005ef32Sjsg bool serialized)
522*f005ef32Sjsg {
523*f005ef32Sjsg bool changed = false;
524*f005ef32Sjsg u32 reg1, mod_reg1;
525*f005ef32Sjsg u32 reg2, mod_reg2;
526*f005ef32Sjsg
527*f005ef32Sjsg mutex_lock(&i915->display.pmdemand.lock);
528*f005ef32Sjsg if (drm_WARN_ON(&i915->drm,
529*f005ef32Sjsg !intel_pmdemand_check_prev_transaction(i915)))
530*f005ef32Sjsg goto unlock;
531*f005ef32Sjsg
532*f005ef32Sjsg reg1 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0));
533*f005ef32Sjsg mod_reg1 = reg1;
534*f005ef32Sjsg
535*f005ef32Sjsg reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1));
536*f005ef32Sjsg mod_reg2 = reg2;
537*f005ef32Sjsg
538*f005ef32Sjsg intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2,
539*f005ef32Sjsg serialized);
540*f005ef32Sjsg
541*f005ef32Sjsg if (reg1 != mod_reg1) {
542*f005ef32Sjsg intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(0),
543*f005ef32Sjsg mod_reg1);
544*f005ef32Sjsg changed = true;
545*f005ef32Sjsg }
546*f005ef32Sjsg
547*f005ef32Sjsg if (reg2 != mod_reg2) {
548*f005ef32Sjsg intel_de_write(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
549*f005ef32Sjsg mod_reg2);
550*f005ef32Sjsg changed = true;
551*f005ef32Sjsg }
552*f005ef32Sjsg
553*f005ef32Sjsg /* Initiate pm demand request only if register values are changed */
554*f005ef32Sjsg if (!changed)
555*f005ef32Sjsg goto unlock;
556*f005ef32Sjsg
557*f005ef32Sjsg drm_dbg_kms(&i915->drm,
558*f005ef32Sjsg "initate pmdemand request values: (0x%x 0x%x)\n",
559*f005ef32Sjsg mod_reg1, mod_reg2);
560*f005ef32Sjsg
561*f005ef32Sjsg intel_de_rmw(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1), 0,
562*f005ef32Sjsg XELPDP_PMDEMAND_REQ_ENABLE);
563*f005ef32Sjsg
564*f005ef32Sjsg intel_pmdemand_wait(i915);
565*f005ef32Sjsg
566*f005ef32Sjsg unlock:
567*f005ef32Sjsg mutex_unlock(&i915->display.pmdemand.lock);
568*f005ef32Sjsg }
569*f005ef32Sjsg
570*f005ef32Sjsg static bool
intel_pmdemand_state_changed(const struct intel_pmdemand_state * new,const struct intel_pmdemand_state * old)571*f005ef32Sjsg intel_pmdemand_state_changed(const struct intel_pmdemand_state *new,
572*f005ef32Sjsg const struct intel_pmdemand_state *old)
573*f005ef32Sjsg {
574*f005ef32Sjsg return memcmp(&new->params, &old->params, sizeof(new->params)) != 0;
575*f005ef32Sjsg }
576*f005ef32Sjsg
intel_pmdemand_pre_plane_update(struct intel_atomic_state * state)577*f005ef32Sjsg void intel_pmdemand_pre_plane_update(struct intel_atomic_state *state)
578*f005ef32Sjsg {
579*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
580*f005ef32Sjsg const struct intel_pmdemand_state *new_pmdemand_state =
581*f005ef32Sjsg intel_atomic_get_new_pmdemand_state(state);
582*f005ef32Sjsg const struct intel_pmdemand_state *old_pmdemand_state =
583*f005ef32Sjsg intel_atomic_get_old_pmdemand_state(state);
584*f005ef32Sjsg
585*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
586*f005ef32Sjsg return;
587*f005ef32Sjsg
588*f005ef32Sjsg if (!new_pmdemand_state ||
589*f005ef32Sjsg !intel_pmdemand_state_changed(new_pmdemand_state,
590*f005ef32Sjsg old_pmdemand_state))
591*f005ef32Sjsg return;
592*f005ef32Sjsg
593*f005ef32Sjsg WARN_ON(!new_pmdemand_state->base.changed);
594*f005ef32Sjsg
595*f005ef32Sjsg intel_pmdemand_program_params(i915, new_pmdemand_state,
596*f005ef32Sjsg old_pmdemand_state,
597*f005ef32Sjsg intel_atomic_global_state_is_serialized(state));
598*f005ef32Sjsg }
599*f005ef32Sjsg
intel_pmdemand_post_plane_update(struct intel_atomic_state * state)600*f005ef32Sjsg void intel_pmdemand_post_plane_update(struct intel_atomic_state *state)
601*f005ef32Sjsg {
602*f005ef32Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
603*f005ef32Sjsg const struct intel_pmdemand_state *new_pmdemand_state =
604*f005ef32Sjsg intel_atomic_get_new_pmdemand_state(state);
605*f005ef32Sjsg const struct intel_pmdemand_state *old_pmdemand_state =
606*f005ef32Sjsg intel_atomic_get_old_pmdemand_state(state);
607*f005ef32Sjsg
608*f005ef32Sjsg if (DISPLAY_VER(i915) < 14)
609*f005ef32Sjsg return;
610*f005ef32Sjsg
611*f005ef32Sjsg if (!new_pmdemand_state ||
612*f005ef32Sjsg !intel_pmdemand_state_changed(new_pmdemand_state,
613*f005ef32Sjsg old_pmdemand_state))
614*f005ef32Sjsg return;
615*f005ef32Sjsg
616*f005ef32Sjsg WARN_ON(!new_pmdemand_state->base.changed);
617*f005ef32Sjsg
618*f005ef32Sjsg intel_pmdemand_program_params(i915, new_pmdemand_state, NULL,
619*f005ef32Sjsg intel_atomic_global_state_is_serialized(state));
620*f005ef32Sjsg }
621