xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/display/intel_dpll_mgr.c (revision 2a8c33eaff5adddac3ef2c5cb48ee67ef6d5d6dc)
1 /*	$NetBSD: intel_dpll_mgr.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2006-2016 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23  * DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: intel_dpll_mgr.c,v 1.4 2021/12/19 12:32:15 riastradh Exp $");
28 
29 #include "intel_display_types.h"
30 #include "intel_dpio_phy.h"
31 #include "intel_dpll_mgr.h"
32 
33 #include <linux/nbsd-namespace.h>
34 
35 /**
36  * DOC: Display PLLs
37  *
38  * Display PLLs used for driving outputs vary by platform. While some have
39  * per-pipe or per-encoder dedicated PLLs, others allow the use of any PLL
40  * from a pool. In the latter scenario, it is possible that multiple pipes
41  * share a PLL if their configurations match.
42  *
43  * This file provides an abstraction over display PLLs. The function
44  * intel_shared_dpll_init() initializes the PLLs for the given platform.  The
45  * users of a PLL are tracked and that tracking is integrated with the atomic
46  * modset interface. During an atomic operation, required PLLs can be reserved
47  * for a given CRTC and encoder configuration by calling
48  * intel_reserve_shared_dplls() and previously reserved PLLs can be released
49  * with intel_release_shared_dplls().
50  * Changes to the users are first staged in the atomic state, and then made
51  * effective by calling intel_shared_dpll_swap_state() during the atomic
52  * commit phase.
53  */
54 
55 static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll_state * shared_dpll)56 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
57 				  struct intel_shared_dpll_state *shared_dpll)
58 {
59 	enum intel_dpll_id i;
60 
61 	/* Copy shared dpll state */
62 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
63 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
64 
65 		shared_dpll[i] = pll->state;
66 	}
67 }
68 
69 static struct intel_shared_dpll_state *
intel_atomic_get_shared_dpll_state(struct drm_atomic_state * s)70 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
71 {
72 	struct intel_atomic_state *state = to_intel_atomic_state(s);
73 
74 	WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
75 
76 	if (!state->dpll_set) {
77 		state->dpll_set = true;
78 
79 		intel_atomic_duplicate_dpll_state(to_i915(s->dev),
80 						  state->shared_dpll);
81 	}
82 
83 	return state->shared_dpll;
84 }
85 
86 /**
87  * intel_get_shared_dpll_by_id - get a DPLL given its id
88  * @dev_priv: i915 device instance
89  * @id: pll id
90  *
91  * Returns:
92  * A pointer to the DPLL with @id
93  */
94 struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private * dev_priv,enum intel_dpll_id id)95 intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
96 			    enum intel_dpll_id id)
97 {
98 	return &dev_priv->shared_dplls[id];
99 }
100 
101 /**
102  * intel_get_shared_dpll_id - get the id of a DPLL
103  * @dev_priv: i915 device instance
104  * @pll: the DPLL
105  *
106  * Returns:
107  * The id of @pll
108  */
109 enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)110 intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
111 			 struct intel_shared_dpll *pll)
112 {
113 	if (WARN_ON(pll < dev_priv->shared_dplls||
114 		    pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
115 		return -1;
116 
117 	return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
118 }
119 
120 /* For ILK+ */
assert_shared_dpll(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,bool state)121 void assert_shared_dpll(struct drm_i915_private *dev_priv,
122 			struct intel_shared_dpll *pll,
123 			bool state)
124 {
125 	bool cur_state;
126 	struct intel_dpll_hw_state hw_state;
127 
128 	if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
129 		return;
130 
131 	cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
132 	I915_STATE_WARN(cur_state != state,
133 	     "%s assertion failure (expected %s, current %s)\n",
134 			pll->info->name, onoff(state), onoff(cur_state));
135 }
136 
137 /**
138  * intel_prepare_shared_dpll - call a dpll's prepare hook
139  * @crtc_state: CRTC, and its state, which has a shared dpll
140  *
141  * This calls the PLL's prepare hook if it has one and if the PLL is not
142  * already enabled. The prepare hook is platform specific.
143  */
intel_prepare_shared_dpll(const struct intel_crtc_state * crtc_state)144 void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
145 {
146 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
147 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
148 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
149 
150 	if (WARN_ON(pll == NULL))
151 		return;
152 
153 	mutex_lock(&dev_priv->dpll_lock);
154 	WARN_ON(!pll->state.crtc_mask);
155 	if (!pll->active_mask) {
156 		DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
157 		WARN_ON(pll->on);
158 		assert_shared_dpll_disabled(dev_priv, pll);
159 
160 		pll->info->funcs->prepare(dev_priv, pll);
161 	}
162 	mutex_unlock(&dev_priv->dpll_lock);
163 }
164 
165 /**
166  * intel_enable_shared_dpll - enable a CRTC's shared DPLL
167  * @crtc_state: CRTC, and its state, which has a shared DPLL
168  *
169  * Enable the shared DPLL used by @crtc.
170  */
intel_enable_shared_dpll(const struct intel_crtc_state * crtc_state)171 void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
172 {
173 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
174 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
175 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
176 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
177 	unsigned int old_mask;
178 
179 	if (WARN_ON(pll == NULL))
180 		return;
181 
182 	mutex_lock(&dev_priv->dpll_lock);
183 	old_mask = pll->active_mask;
184 
185 	if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
186 	    WARN_ON(pll->active_mask & crtc_mask))
187 		goto out;
188 
189 	pll->active_mask |= crtc_mask;
190 
191 	DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
192 		      pll->info->name, pll->active_mask, pll->on,
193 		      crtc->base.base.id);
194 
195 	if (old_mask) {
196 		WARN_ON(!pll->on);
197 		assert_shared_dpll_enabled(dev_priv, pll);
198 		goto out;
199 	}
200 	WARN_ON(pll->on);
201 
202 	DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
203 	pll->info->funcs->enable(dev_priv, pll);
204 	pll->on = true;
205 
206 out:
207 	mutex_unlock(&dev_priv->dpll_lock);
208 }
209 
210 /**
211  * intel_disable_shared_dpll - disable a CRTC's shared DPLL
212  * @crtc_state: CRTC, and its state, which has a shared DPLL
213  *
214  * Disable the shared DPLL used by @crtc.
215  */
intel_disable_shared_dpll(const struct intel_crtc_state * crtc_state)216 void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
217 {
218 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
219 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
220 	struct intel_shared_dpll *pll = crtc_state->shared_dpll;
221 	unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
222 
223 	/* PCH only available on ILK+ */
224 	if (INTEL_GEN(dev_priv) < 5)
225 		return;
226 
227 	if (pll == NULL)
228 		return;
229 
230 	mutex_lock(&dev_priv->dpll_lock);
231 	if (WARN_ON(!(pll->active_mask & crtc_mask)))
232 		goto out;
233 
234 	DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
235 		      pll->info->name, pll->active_mask, pll->on,
236 		      crtc->base.base.id);
237 
238 	assert_shared_dpll_enabled(dev_priv, pll);
239 	WARN_ON(!pll->on);
240 
241 	pll->active_mask &= ~crtc_mask;
242 	if (pll->active_mask)
243 		goto out;
244 
245 	DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
246 	pll->info->funcs->disable(dev_priv, pll);
247 	pll->on = false;
248 
249 out:
250 	mutex_unlock(&dev_priv->dpll_lock);
251 }
252 
253 static struct intel_shared_dpll *
intel_find_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_dpll_hw_state * pll_state,unsigned long dpll_mask)254 intel_find_shared_dpll(struct intel_atomic_state *state,
255 		       const struct intel_crtc *crtc,
256 		       const struct intel_dpll_hw_state *pll_state,
257 		       unsigned long dpll_mask)
258 {
259 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
260 	struct intel_shared_dpll *pll, *unused_pll = NULL;
261 	struct intel_shared_dpll_state *shared_dpll;
262 	enum intel_dpll_id i;
263 
264 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
265 
266 	WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
267 
268 	for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
269 		pll = &dev_priv->shared_dplls[i];
270 
271 		/* Only want to check enabled timings first */
272 		if (shared_dpll[i].crtc_mask == 0) {
273 			if (!unused_pll)
274 				unused_pll = pll;
275 			continue;
276 		}
277 
278 		if (memcmp(pll_state,
279 			   &shared_dpll[i].hw_state,
280 			   sizeof(*pll_state)) == 0) {
281 			DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
282 				      crtc->base.base.id, crtc->base.name,
283 				      pll->info->name,
284 				      shared_dpll[i].crtc_mask,
285 				      pll->active_mask);
286 			return pll;
287 		}
288 	}
289 
290 	/* Ok no matching timings, maybe there's a free one? */
291 	if (unused_pll) {
292 		DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
293 			      crtc->base.base.id, crtc->base.name,
294 			      unused_pll->info->name);
295 		return unused_pll;
296 	}
297 
298 	return NULL;
299 }
300 
301 static void
intel_reference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll,const struct intel_dpll_hw_state * pll_state)302 intel_reference_shared_dpll(struct intel_atomic_state *state,
303 			    const struct intel_crtc *crtc,
304 			    const struct intel_shared_dpll *pll,
305 			    const struct intel_dpll_hw_state *pll_state)
306 {
307 	struct intel_shared_dpll_state *shared_dpll;
308 	const enum intel_dpll_id id = pll->info->id;
309 
310 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
311 
312 	if (shared_dpll[id].crtc_mask == 0)
313 		shared_dpll[id].hw_state = *pll_state;
314 
315 	DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
316 			 pipe_name(crtc->pipe));
317 
318 	shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
319 }
320 
intel_unreference_shared_dpll(struct intel_atomic_state * state,const struct intel_crtc * crtc,const struct intel_shared_dpll * pll)321 static void intel_unreference_shared_dpll(struct intel_atomic_state *state,
322 					  const struct intel_crtc *crtc,
323 					  const struct intel_shared_dpll *pll)
324 {
325 	struct intel_shared_dpll_state *shared_dpll;
326 
327 	shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
328 	shared_dpll[pll->info->id].crtc_mask &= ~(1 << crtc->pipe);
329 }
330 
intel_put_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)331 static void intel_put_dpll(struct intel_atomic_state *state,
332 			   struct intel_crtc *crtc)
333 {
334 	const struct intel_crtc_state *old_crtc_state =
335 		intel_atomic_get_old_crtc_state(state, crtc);
336 	struct intel_crtc_state *new_crtc_state =
337 		intel_atomic_get_new_crtc_state(state, crtc);
338 
339 	new_crtc_state->shared_dpll = NULL;
340 
341 	if (!old_crtc_state->shared_dpll)
342 		return;
343 
344 	intel_unreference_shared_dpll(state, crtc, old_crtc_state->shared_dpll);
345 }
346 
347 /**
348  * intel_shared_dpll_swap_state - make atomic DPLL configuration effective
349  * @state: atomic state
350  *
351  * This is the dpll version of drm_atomic_helper_swap_state() since the
352  * helper does not handle driver-specific global state.
353  *
354  * For consistency with atomic helpers this function does a complete swap,
355  * i.e. it also puts the current state into @state, even though there is no
356  * need for that at this moment.
357  */
intel_shared_dpll_swap_state(struct intel_atomic_state * state)358 void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
359 {
360 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
361 	struct intel_shared_dpll_state *shared_dpll = state->shared_dpll;
362 	enum intel_dpll_id i;
363 
364 	if (!state->dpll_set)
365 		return;
366 
367 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
368 		struct intel_shared_dpll *pll =
369 			&dev_priv->shared_dplls[i];
370 
371 		swap(pll->state, shared_dpll[i]);
372 	}
373 }
374 
ibx_pch_dpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)375 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
376 				      struct intel_shared_dpll *pll,
377 				      struct intel_dpll_hw_state *hw_state)
378 {
379 	const enum intel_dpll_id id = pll->info->id;
380 	intel_wakeref_t wakeref;
381 	u32 val;
382 
383 	wakeref = intel_display_power_get_if_enabled(dev_priv,
384 						     POWER_DOMAIN_DISPLAY_CORE);
385 	if (!wakeref)
386 		return false;
387 
388 	val = I915_READ(PCH_DPLL(id));
389 	hw_state->dpll = val;
390 	hw_state->fp0 = I915_READ(PCH_FP0(id));
391 	hw_state->fp1 = I915_READ(PCH_FP1(id));
392 
393 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
394 
395 	return val & DPLL_VCO_ENABLE;
396 }
397 
ibx_pch_dpll_prepare(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)398 static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
399 				 struct intel_shared_dpll *pll)
400 {
401 	const enum intel_dpll_id id = pll->info->id;
402 
403 	I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
404 	I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
405 }
406 
ibx_assert_pch_refclk_enabled(struct drm_i915_private * dev_priv)407 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
408 {
409 	u32 val;
410 	bool enabled;
411 
412 	I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
413 
414 	val = I915_READ(PCH_DREF_CONTROL);
415 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
416 			    DREF_SUPERSPREAD_SOURCE_MASK));
417 	I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
418 }
419 
ibx_pch_dpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)420 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
421 				struct intel_shared_dpll *pll)
422 {
423 	const enum intel_dpll_id id = pll->info->id;
424 
425 	/* PCH refclock must be enabled first */
426 	ibx_assert_pch_refclk_enabled(dev_priv);
427 
428 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
429 
430 	/* Wait for the clocks to stabilize. */
431 	POSTING_READ(PCH_DPLL(id));
432 	udelay(150);
433 
434 	/* The pixel multiplier can only be updated once the
435 	 * DPLL is enabled and the clocks are stable.
436 	 *
437 	 * So write it again.
438 	 */
439 	I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
440 	POSTING_READ(PCH_DPLL(id));
441 	udelay(200);
442 }
443 
ibx_pch_dpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)444 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
445 				 struct intel_shared_dpll *pll)
446 {
447 	const enum intel_dpll_id id = pll->info->id;
448 
449 	I915_WRITE(PCH_DPLL(id), 0);
450 	POSTING_READ(PCH_DPLL(id));
451 	udelay(200);
452 }
453 
ibx_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)454 static bool ibx_get_dpll(struct intel_atomic_state *state,
455 			 struct intel_crtc *crtc,
456 			 struct intel_encoder *encoder)
457 {
458 	struct intel_crtc_state *crtc_state =
459 		intel_atomic_get_new_crtc_state(state, crtc);
460 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
461 	struct intel_shared_dpll *pll;
462 	enum intel_dpll_id i;
463 
464 	if (HAS_PCH_IBX(dev_priv)) {
465 		/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
466 		i = (enum intel_dpll_id) crtc->pipe;
467 		pll = &dev_priv->shared_dplls[i];
468 
469 		DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
470 			      crtc->base.base.id, crtc->base.name,
471 			      pll->info->name);
472 	} else {
473 		pll = intel_find_shared_dpll(state, crtc,
474 					     &crtc_state->dpll_hw_state,
475 					     BIT(DPLL_ID_PCH_PLL_B) |
476 					     BIT(DPLL_ID_PCH_PLL_A));
477 	}
478 
479 	if (!pll)
480 		return false;
481 
482 	/* reference the pll */
483 	intel_reference_shared_dpll(state, crtc,
484 				    pll, &crtc_state->dpll_hw_state);
485 
486 	crtc_state->shared_dpll = pll;
487 
488 	return true;
489 }
490 
ibx_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)491 static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
492 			      const struct intel_dpll_hw_state *hw_state)
493 {
494 	DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
495 		      "fp0: 0x%x, fp1: 0x%x\n",
496 		      hw_state->dpll,
497 		      hw_state->dpll_md,
498 		      hw_state->fp0,
499 		      hw_state->fp1);
500 }
501 
502 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
503 	.prepare = ibx_pch_dpll_prepare,
504 	.enable = ibx_pch_dpll_enable,
505 	.disable = ibx_pch_dpll_disable,
506 	.get_hw_state = ibx_pch_dpll_get_hw_state,
507 };
508 
hsw_ddi_wrpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)509 static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
510 			       struct intel_shared_dpll *pll)
511 {
512 	const enum intel_dpll_id id = pll->info->id;
513 
514 	I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
515 	POSTING_READ(WRPLL_CTL(id));
516 	udelay(20);
517 }
518 
hsw_ddi_spll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)519 static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
520 				struct intel_shared_dpll *pll)
521 {
522 	I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
523 	POSTING_READ(SPLL_CTL);
524 	udelay(20);
525 }
526 
hsw_ddi_wrpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)527 static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
528 				  struct intel_shared_dpll *pll)
529 {
530 	const enum intel_dpll_id id = pll->info->id;
531 	u32 val;
532 
533 	val = I915_READ(WRPLL_CTL(id));
534 	I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
535 	POSTING_READ(WRPLL_CTL(id));
536 
537 	/*
538 	 * Try to set up the PCH reference clock once all DPLLs
539 	 * that depend on it have been shut down.
540 	 */
541 	if (dev_priv->pch_ssc_use & BIT(id))
542 		intel_init_pch_refclk(dev_priv);
543 }
544 
hsw_ddi_spll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)545 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
546 				 struct intel_shared_dpll *pll)
547 {
548 	enum intel_dpll_id id = pll->info->id;
549 	u32 val;
550 
551 	val = I915_READ(SPLL_CTL);
552 	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
553 	POSTING_READ(SPLL_CTL);
554 
555 	/*
556 	 * Try to set up the PCH reference clock once all DPLLs
557 	 * that depend on it have been shut down.
558 	 */
559 	if (dev_priv->pch_ssc_use & BIT(id))
560 		intel_init_pch_refclk(dev_priv);
561 }
562 
hsw_ddi_wrpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)563 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
564 				       struct intel_shared_dpll *pll,
565 				       struct intel_dpll_hw_state *hw_state)
566 {
567 	const enum intel_dpll_id id = pll->info->id;
568 	intel_wakeref_t wakeref;
569 	u32 val;
570 
571 	wakeref = intel_display_power_get_if_enabled(dev_priv,
572 						     POWER_DOMAIN_DISPLAY_CORE);
573 	if (!wakeref)
574 		return false;
575 
576 	val = I915_READ(WRPLL_CTL(id));
577 	hw_state->wrpll = val;
578 
579 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
580 
581 	return val & WRPLL_PLL_ENABLE;
582 }
583 
hsw_ddi_spll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)584 static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
585 				      struct intel_shared_dpll *pll,
586 				      struct intel_dpll_hw_state *hw_state)
587 {
588 	intel_wakeref_t wakeref;
589 	u32 val;
590 
591 	wakeref = intel_display_power_get_if_enabled(dev_priv,
592 						     POWER_DOMAIN_DISPLAY_CORE);
593 	if (!wakeref)
594 		return false;
595 
596 	val = I915_READ(SPLL_CTL);
597 	hw_state->spll = val;
598 
599 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
600 
601 	return val & SPLL_PLL_ENABLE;
602 }
603 
604 #define LC_FREQ 2700
605 #define LC_FREQ_2K U64_C(LC_FREQ * 2000)
606 
607 #define P_MIN 2
608 #define P_MAX 64
609 #define P_INC 2
610 
611 /* Constraints for PLL good behavior */
612 #define REF_MIN 48
613 #define REF_MAX 400
614 #define VCO_MIN 2400
615 #define VCO_MAX 4800
616 
617 struct hsw_wrpll_rnp {
618 	unsigned p, n2, r2;
619 };
620 
hsw_wrpll_get_budget_for_freq(int clock)621 static unsigned hsw_wrpll_get_budget_for_freq(int clock)
622 {
623 	unsigned budget;
624 
625 	switch (clock) {
626 	case 25175000:
627 	case 25200000:
628 	case 27000000:
629 	case 27027000:
630 	case 37762500:
631 	case 37800000:
632 	case 40500000:
633 	case 40541000:
634 	case 54000000:
635 	case 54054000:
636 	case 59341000:
637 	case 59400000:
638 	case 72000000:
639 	case 74176000:
640 	case 74250000:
641 	case 81000000:
642 	case 81081000:
643 	case 89012000:
644 	case 89100000:
645 	case 108000000:
646 	case 108108000:
647 	case 111264000:
648 	case 111375000:
649 	case 148352000:
650 	case 148500000:
651 	case 162000000:
652 	case 162162000:
653 	case 222525000:
654 	case 222750000:
655 	case 296703000:
656 	case 297000000:
657 		budget = 0;
658 		break;
659 	case 233500000:
660 	case 245250000:
661 	case 247750000:
662 	case 253250000:
663 	case 298000000:
664 		budget = 1500;
665 		break;
666 	case 169128000:
667 	case 169500000:
668 	case 179500000:
669 	case 202000000:
670 		budget = 2000;
671 		break;
672 	case 256250000:
673 	case 262500000:
674 	case 270000000:
675 	case 272500000:
676 	case 273750000:
677 	case 280750000:
678 	case 281250000:
679 	case 286000000:
680 	case 291750000:
681 		budget = 4000;
682 		break;
683 	case 267250000:
684 	case 268500000:
685 		budget = 5000;
686 		break;
687 	default:
688 		budget = 1000;
689 		break;
690 	}
691 
692 	return budget;
693 }
694 
hsw_wrpll_update_rnp(u64 freq2k,unsigned int budget,unsigned int r2,unsigned int n2,unsigned int p,struct hsw_wrpll_rnp * best)695 static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
696 				 unsigned int r2, unsigned int n2,
697 				 unsigned int p,
698 				 struct hsw_wrpll_rnp *best)
699 {
700 	u64 a, b, c, d, diff, diff_best;
701 
702 	/* No best (r,n,p) yet */
703 	if (best->p == 0) {
704 		best->p = p;
705 		best->n2 = n2;
706 		best->r2 = r2;
707 		return;
708 	}
709 
710 	/*
711 	 * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to
712 	 * freq2k.
713 	 *
714 	 * delta = 1e6 *
715 	 *	   abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) /
716 	 *	   freq2k;
717 	 *
718 	 * and we would like delta <= budget.
719 	 *
720 	 * If the discrepancy is above the PPM-based budget, always prefer to
721 	 * improve upon the previous solution.  However, if you're within the
722 	 * budget, try to maximize Ref * VCO, that is N / (P * R^2).
723 	 */
724 	a = freq2k * budget * p * r2;
725 	b = freq2k * budget * best->p * best->r2;
726 	diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2);
727 	diff_best = abs_diff(freq2k * best->p * best->r2,
728 			     LC_FREQ_2K * best->n2);
729 	c = 1000000 * diff;
730 	d = 1000000 * diff_best;
731 
732 	if (a < c && b < d) {
733 		/* If both are above the budget, pick the closer */
734 		if (best->p * best->r2 * diff < p * r2 * diff_best) {
735 			best->p = p;
736 			best->n2 = n2;
737 			best->r2 = r2;
738 		}
739 	} else if (a >= c && b < d) {
740 		/* If A is below the threshold but B is above it?  Update. */
741 		best->p = p;
742 		best->n2 = n2;
743 		best->r2 = r2;
744 	} else if (a >= c && b >= d) {
745 		/* Both are below the limit, so pick the higher n2/(r2*r2) */
746 		if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) {
747 			best->p = p;
748 			best->n2 = n2;
749 			best->r2 = r2;
750 		}
751 	}
752 	/* Otherwise a < c && b >= d, do nothing */
753 }
754 
755 static void
hsw_ddi_calculate_wrpll(int clock,unsigned * r2_out,unsigned * n2_out,unsigned * p_out)756 hsw_ddi_calculate_wrpll(int clock /* in Hz */,
757 			unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
758 {
759 	u64 freq2k;
760 	unsigned p, n2, r2;
761 	struct hsw_wrpll_rnp best = { 0, 0, 0 };
762 	unsigned budget;
763 
764 	freq2k = clock / 100;
765 
766 	budget = hsw_wrpll_get_budget_for_freq(clock);
767 
768 	/* Special case handling for 540 pixel clock: bypass WR PLL entirely
769 	 * and directly pass the LC PLL to it. */
770 	if (freq2k == 5400000) {
771 		*n2_out = 2;
772 		*p_out = 1;
773 		*r2_out = 2;
774 		return;
775 	}
776 
777 	/*
778 	 * Ref = LC_FREQ / R, where Ref is the actual reference input seen by
779 	 * the WR PLL.
780 	 *
781 	 * We want R so that REF_MIN <= Ref <= REF_MAX.
782 	 * Injecting R2 = 2 * R gives:
783 	 *   REF_MAX * r2 > LC_FREQ * 2 and
784 	 *   REF_MIN * r2 < LC_FREQ * 2
785 	 *
786 	 * Which means the desired boundaries for r2 are:
787 	 *  LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN
788 	 *
789 	 */
790 	for (r2 = LC_FREQ * 2 / REF_MAX + 1;
791 	     r2 <= LC_FREQ * 2 / REF_MIN;
792 	     r2++) {
793 
794 		/*
795 		 * VCO = N * Ref, that is: VCO = N * LC_FREQ / R
796 		 *
797 		 * Once again we want VCO_MIN <= VCO <= VCO_MAX.
798 		 * Injecting R2 = 2 * R and N2 = 2 * N, we get:
799 		 *   VCO_MAX * r2 > n2 * LC_FREQ and
800 		 *   VCO_MIN * r2 < n2 * LC_FREQ)
801 		 *
802 		 * Which means the desired boundaries for n2 are:
803 		 * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ
804 		 */
805 		for (n2 = VCO_MIN * r2 / LC_FREQ + 1;
806 		     n2 <= VCO_MAX * r2 / LC_FREQ;
807 		     n2++) {
808 
809 			for (p = P_MIN; p <= P_MAX; p += P_INC)
810 				hsw_wrpll_update_rnp(freq2k, budget,
811 						     r2, n2, p, &best);
812 		}
813 	}
814 
815 	*n2_out = best.n2;
816 	*p_out = best.p;
817 	*r2_out = best.r2;
818 }
819 
820 static struct intel_shared_dpll *
hsw_ddi_hdmi_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc)821 hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
822 		      struct intel_crtc *crtc)
823 {
824 	struct intel_crtc_state *crtc_state =
825 		intel_atomic_get_new_crtc_state(state, crtc);
826 	struct intel_shared_dpll *pll;
827 	u32 val;
828 	unsigned int p, n2, r2;
829 
830 	hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
831 
832 	val = WRPLL_PLL_ENABLE | WRPLL_REF_LCPLL |
833 	      WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
834 	      WRPLL_DIVIDER_POST(p);
835 
836 	crtc_state->dpll_hw_state.wrpll = val;
837 
838 	pll = intel_find_shared_dpll(state, crtc,
839 				     &crtc_state->dpll_hw_state,
840 				     BIT(DPLL_ID_WRPLL2) |
841 				     BIT(DPLL_ID_WRPLL1));
842 
843 	if (!pll)
844 		return NULL;
845 
846 	return pll;
847 }
848 
849 static struct intel_shared_dpll *
hsw_ddi_dp_get_dpll(struct intel_crtc_state * crtc_state)850 hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
851 {
852 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
853 	struct intel_shared_dpll *pll;
854 	enum intel_dpll_id pll_id;
855 	int clock = crtc_state->port_clock;
856 
857 	switch (clock / 2) {
858 	case 81000:
859 		pll_id = DPLL_ID_LCPLL_810;
860 		break;
861 	case 135000:
862 		pll_id = DPLL_ID_LCPLL_1350;
863 		break;
864 	case 270000:
865 		pll_id = DPLL_ID_LCPLL_2700;
866 		break;
867 	default:
868 		DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
869 		return NULL;
870 	}
871 
872 	pll = intel_get_shared_dpll_by_id(dev_priv, pll_id);
873 
874 	if (!pll)
875 		return NULL;
876 
877 	return pll;
878 }
879 
hsw_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)880 static bool hsw_get_dpll(struct intel_atomic_state *state,
881 			 struct intel_crtc *crtc,
882 			 struct intel_encoder *encoder)
883 {
884 	struct intel_crtc_state *crtc_state =
885 		intel_atomic_get_new_crtc_state(state, crtc);
886 	struct intel_shared_dpll *pll;
887 
888 	memset(&crtc_state->dpll_hw_state, 0,
889 	       sizeof(crtc_state->dpll_hw_state));
890 
891 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
892 		pll = hsw_ddi_hdmi_get_dpll(state, crtc);
893 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
894 		pll = hsw_ddi_dp_get_dpll(crtc_state);
895 	} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
896 		if (WARN_ON(crtc_state->port_clock / 2 != 135000))
897 			return false;
898 
899 		crtc_state->dpll_hw_state.spll =
900 			SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
901 
902 		pll = intel_find_shared_dpll(state, crtc,
903 					     &crtc_state->dpll_hw_state,
904 					     BIT(DPLL_ID_SPLL));
905 	} else {
906 		return false;
907 	}
908 
909 	if (!pll)
910 		return false;
911 
912 	intel_reference_shared_dpll(state, crtc,
913 				    pll, &crtc_state->dpll_hw_state);
914 
915 	crtc_state->shared_dpll = pll;
916 
917 	return true;
918 }
919 
hsw_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)920 static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
921 			      const struct intel_dpll_hw_state *hw_state)
922 {
923 	DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
924 		      hw_state->wrpll, hw_state->spll);
925 }
926 
927 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
928 	.enable = hsw_ddi_wrpll_enable,
929 	.disable = hsw_ddi_wrpll_disable,
930 	.get_hw_state = hsw_ddi_wrpll_get_hw_state,
931 };
932 
933 static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
934 	.enable = hsw_ddi_spll_enable,
935 	.disable = hsw_ddi_spll_disable,
936 	.get_hw_state = hsw_ddi_spll_get_hw_state,
937 };
938 
hsw_ddi_lcpll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)939 static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
940 				 struct intel_shared_dpll *pll)
941 {
942 }
943 
hsw_ddi_lcpll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)944 static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv,
945 				  struct intel_shared_dpll *pll)
946 {
947 }
948 
hsw_ddi_lcpll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)949 static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv,
950 				       struct intel_shared_dpll *pll,
951 				       struct intel_dpll_hw_state *hw_state)
952 {
953 	return true;
954 }
955 
956 static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
957 	.enable = hsw_ddi_lcpll_enable,
958 	.disable = hsw_ddi_lcpll_disable,
959 	.get_hw_state = hsw_ddi_lcpll_get_hw_state,
960 };
961 
962 struct skl_dpll_regs {
963 	i915_reg_t ctl, cfgcr1, cfgcr2;
964 };
965 
966 /* this array is indexed by the *shared* pll id */
967 static const struct skl_dpll_regs skl_dpll_regs[4] = {
968 	{
969 		/* DPLL 0 */
970 		.ctl = LCPLL1_CTL,
971 		/* DPLL 0 doesn't support HDMI mode */
972 	},
973 	{
974 		/* DPLL 1 */
975 		.ctl = LCPLL2_CTL,
976 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
977 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
978 	},
979 	{
980 		/* DPLL 2 */
981 		.ctl = WRPLL_CTL(0),
982 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
983 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
984 	},
985 	{
986 		/* DPLL 3 */
987 		.ctl = WRPLL_CTL(1),
988 		.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
989 		.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
990 	},
991 };
992 
skl_ddi_pll_write_ctrl1(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)993 static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
994 				    struct intel_shared_dpll *pll)
995 {
996 	const enum intel_dpll_id id = pll->info->id;
997 	u32 val;
998 
999 	val = I915_READ(DPLL_CTRL1);
1000 
1001 	val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
1002 		 DPLL_CTRL1_SSC(id) |
1003 		 DPLL_CTRL1_LINK_RATE_MASK(id));
1004 	val |= pll->state.hw_state.ctrl1 << (id * 6);
1005 
1006 	I915_WRITE(DPLL_CTRL1, val);
1007 	POSTING_READ(DPLL_CTRL1);
1008 }
1009 
skl_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1010 static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1011 			       struct intel_shared_dpll *pll)
1012 {
1013 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1014 	const enum intel_dpll_id id = pll->info->id;
1015 
1016 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1017 
1018 	I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
1019 	I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
1020 	POSTING_READ(regs[id].cfgcr1);
1021 	POSTING_READ(regs[id].cfgcr2);
1022 
1023 	/* the enable bit is always bit 31 */
1024 	I915_WRITE(regs[id].ctl,
1025 		   I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
1026 
1027 	if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
1028 		DRM_ERROR("DPLL %d not locked\n", id);
1029 }
1030 
skl_ddi_dpll0_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1031 static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
1032 				 struct intel_shared_dpll *pll)
1033 {
1034 	skl_ddi_pll_write_ctrl1(dev_priv, pll);
1035 }
1036 
skl_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1037 static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1038 				struct intel_shared_dpll *pll)
1039 {
1040 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1041 	const enum intel_dpll_id id = pll->info->id;
1042 
1043 	/* the enable bit is always bit 31 */
1044 	I915_WRITE(regs[id].ctl,
1045 		   I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
1046 	POSTING_READ(regs[id].ctl);
1047 }
1048 
skl_ddi_dpll0_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1049 static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
1050 				  struct intel_shared_dpll *pll)
1051 {
1052 }
1053 
skl_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1054 static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1055 				     struct intel_shared_dpll *pll,
1056 				     struct intel_dpll_hw_state *hw_state)
1057 {
1058 	u32 val;
1059 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1060 	const enum intel_dpll_id id = pll->info->id;
1061 	intel_wakeref_t wakeref;
1062 	bool ret;
1063 
1064 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1065 						     POWER_DOMAIN_DISPLAY_CORE);
1066 	if (!wakeref)
1067 		return false;
1068 
1069 	ret = false;
1070 
1071 	val = I915_READ(regs[id].ctl);
1072 	if (!(val & LCPLL_PLL_ENABLE))
1073 		goto out;
1074 
1075 	val = I915_READ(DPLL_CTRL1);
1076 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1077 
1078 	/* avoid reading back stale values if HDMI mode is not enabled */
1079 	if (val & DPLL_CTRL1_HDMI_MODE(id)) {
1080 		hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
1081 		hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
1082 	}
1083 	ret = true;
1084 
1085 out:
1086 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1087 
1088 	return ret;
1089 }
1090 
skl_ddi_dpll0_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1091 static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
1092 				       struct intel_shared_dpll *pll,
1093 				       struct intel_dpll_hw_state *hw_state)
1094 {
1095 	const struct skl_dpll_regs *regs = skl_dpll_regs;
1096 	const enum intel_dpll_id id = pll->info->id;
1097 	intel_wakeref_t wakeref;
1098 	u32 val;
1099 	bool ret;
1100 
1101 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1102 						     POWER_DOMAIN_DISPLAY_CORE);
1103 	if (!wakeref)
1104 		return false;
1105 
1106 	ret = false;
1107 
1108 	/* DPLL0 is always enabled since it drives CDCLK */
1109 	val = I915_READ(regs[id].ctl);
1110 	if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
1111 		goto out;
1112 
1113 	val = I915_READ(DPLL_CTRL1);
1114 	hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
1115 
1116 	ret = true;
1117 
1118 out:
1119 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1120 
1121 	return ret;
1122 }
1123 
1124 struct skl_wrpll_context {
1125 	u64 min_deviation;		/* current minimal deviation */
1126 	u64 central_freq;		/* chosen central freq */
1127 	u64 dco_freq;			/* chosen dco freq */
1128 	unsigned int p;			/* chosen divider */
1129 };
1130 
skl_wrpll_context_init(struct skl_wrpll_context * ctx)1131 static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
1132 {
1133 	memset(ctx, 0, sizeof(*ctx));
1134 
1135 	ctx->min_deviation = U64_MAX;
1136 }
1137 
1138 /* DCO freq must be within +1%/-6%  of the DCO central freq */
1139 #define SKL_DCO_MAX_PDEVIATION	100
1140 #define SKL_DCO_MAX_NDEVIATION	600
1141 
skl_wrpll_try_divider(struct skl_wrpll_context * ctx,u64 central_freq,u64 dco_freq,unsigned int divider)1142 static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
1143 				  u64 central_freq,
1144 				  u64 dco_freq,
1145 				  unsigned int divider)
1146 {
1147 	u64 deviation;
1148 
1149 	deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
1150 			      central_freq);
1151 
1152 	/* positive deviation */
1153 	if (dco_freq >= central_freq) {
1154 		if (deviation < SKL_DCO_MAX_PDEVIATION &&
1155 		    deviation < ctx->min_deviation) {
1156 			ctx->min_deviation = deviation;
1157 			ctx->central_freq = central_freq;
1158 			ctx->dco_freq = dco_freq;
1159 			ctx->p = divider;
1160 		}
1161 	/* negative deviation */
1162 	} else if (deviation < SKL_DCO_MAX_NDEVIATION &&
1163 		   deviation < ctx->min_deviation) {
1164 		ctx->min_deviation = deviation;
1165 		ctx->central_freq = central_freq;
1166 		ctx->dco_freq = dco_freq;
1167 		ctx->p = divider;
1168 	}
1169 }
1170 
skl_wrpll_get_multipliers(unsigned int p,unsigned int * p0,unsigned int * p1,unsigned int * p2)1171 static void skl_wrpll_get_multipliers(unsigned int p,
1172 				      unsigned int *p0 /* out */,
1173 				      unsigned int *p1 /* out */,
1174 				      unsigned int *p2 /* out */)
1175 {
1176 	/* even dividers */
1177 	if (p % 2 == 0) {
1178 		unsigned int half = p / 2;
1179 
1180 		if (half == 1 || half == 2 || half == 3 || half == 5) {
1181 			*p0 = 2;
1182 			*p1 = 1;
1183 			*p2 = half;
1184 		} else if (half % 2 == 0) {
1185 			*p0 = 2;
1186 			*p1 = half / 2;
1187 			*p2 = 2;
1188 		} else if (half % 3 == 0) {
1189 			*p0 = 3;
1190 			*p1 = half / 3;
1191 			*p2 = 2;
1192 		} else if (half % 7 == 0) {
1193 			*p0 = 7;
1194 			*p1 = half / 7;
1195 			*p2 = 2;
1196 		}
1197 	} else if (p == 3 || p == 9) {  /* 3, 5, 7, 9, 15, 21, 35 */
1198 		*p0 = 3;
1199 		*p1 = 1;
1200 		*p2 = p / 3;
1201 	} else if (p == 5 || p == 7) {
1202 		*p0 = p;
1203 		*p1 = 1;
1204 		*p2 = 1;
1205 	} else if (p == 15) {
1206 		*p0 = 3;
1207 		*p1 = 1;
1208 		*p2 = 5;
1209 	} else if (p == 21) {
1210 		*p0 = 7;
1211 		*p1 = 1;
1212 		*p2 = 3;
1213 	} else if (p == 35) {
1214 		*p0 = 7;
1215 		*p1 = 1;
1216 		*p2 = 5;
1217 	}
1218 }
1219 
1220 struct skl_wrpll_params {
1221 	u32 dco_fraction;
1222 	u32 dco_integer;
1223 	u32 qdiv_ratio;
1224 	u32 qdiv_mode;
1225 	u32 kdiv;
1226 	u32 pdiv;
1227 	u32 central_freq;
1228 };
1229 
skl_wrpll_params_populate(struct skl_wrpll_params * params,u64 afe_clock,u64 central_freq,u32 p0,u32 p1,u32 p2)1230 static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
1231 				      u64 afe_clock,
1232 				      u64 central_freq,
1233 				      u32 p0, u32 p1, u32 p2)
1234 {
1235 	u64 dco_freq;
1236 
1237 	switch (central_freq) {
1238 	case 9600000000ULL:
1239 		params->central_freq = 0;
1240 		break;
1241 	case 9000000000ULL:
1242 		params->central_freq = 1;
1243 		break;
1244 	case 8400000000ULL:
1245 		params->central_freq = 3;
1246 	}
1247 
1248 	switch (p0) {
1249 	case 1:
1250 		params->pdiv = 0;
1251 		break;
1252 	case 2:
1253 		params->pdiv = 1;
1254 		break;
1255 	case 3:
1256 		params->pdiv = 2;
1257 		break;
1258 	case 7:
1259 		params->pdiv = 4;
1260 		break;
1261 	default:
1262 		WARN(1, "Incorrect PDiv\n");
1263 	}
1264 
1265 	switch (p2) {
1266 	case 5:
1267 		params->kdiv = 0;
1268 		break;
1269 	case 2:
1270 		params->kdiv = 1;
1271 		break;
1272 	case 3:
1273 		params->kdiv = 2;
1274 		break;
1275 	case 1:
1276 		params->kdiv = 3;
1277 		break;
1278 	default:
1279 		WARN(1, "Incorrect KDiv\n");
1280 	}
1281 
1282 	params->qdiv_ratio = p1;
1283 	params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1;
1284 
1285 	dco_freq = p0 * p1 * p2 * afe_clock;
1286 
1287 	/*
1288 	 * Intermediate values are in Hz.
1289 	 * Divide by MHz to match bsepc
1290 	 */
1291 	params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
1292 	params->dco_fraction =
1293 		div_u64((div_u64(dco_freq, 24) -
1294 			 params->dco_integer * MHz(1)) * 0x8000, MHz(1));
1295 }
1296 
1297 static bool
skl_ddi_calculate_wrpll(int clock,struct skl_wrpll_params * wrpll_params)1298 skl_ddi_calculate_wrpll(int clock /* in Hz */,
1299 			struct skl_wrpll_params *wrpll_params)
1300 {
1301 	u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
1302 	u64 dco_central_freq[3] = { 8400000000ULL,
1303 				    9000000000ULL,
1304 				    9600000000ULL };
1305 	static const int even_dividers[] = {  4,  6,  8, 10, 12, 14, 16, 18, 20,
1306 					     24, 28, 30, 32, 36, 40, 42, 44,
1307 					     48, 52, 54, 56, 60, 64, 66, 68,
1308 					     70, 72, 76, 78, 80, 84, 88, 90,
1309 					     92, 96, 98 };
1310 	static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 };
1311 	static const struct {
1312 		const int *list;
1313 		int n_dividers;
1314 	} dividers[] = {
1315 		{ even_dividers, ARRAY_SIZE(even_dividers) },
1316 		{ odd_dividers, ARRAY_SIZE(odd_dividers) },
1317 	};
1318 	struct skl_wrpll_context ctx;
1319 	unsigned int dco, d, i;
1320 	unsigned int p0, p1, p2;
1321 
1322 	skl_wrpll_context_init(&ctx);
1323 
1324 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
1325 		for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
1326 			for (i = 0; i < dividers[d].n_dividers; i++) {
1327 				unsigned int p = dividers[d].list[i];
1328 				u64 dco_freq = p * afe_clock;
1329 
1330 				skl_wrpll_try_divider(&ctx,
1331 						      dco_central_freq[dco],
1332 						      dco_freq,
1333 						      p);
1334 				/*
1335 				 * Skip the remaining dividers if we're sure to
1336 				 * have found the definitive divider, we can't
1337 				 * improve a 0 deviation.
1338 				 */
1339 				if (ctx.min_deviation == 0)
1340 					goto skip_remaining_dividers;
1341 			}
1342 		}
1343 
1344 skip_remaining_dividers:
1345 		/*
1346 		 * If a solution is found with an even divider, prefer
1347 		 * this one.
1348 		 */
1349 		if (d == 0 && ctx.p)
1350 			break;
1351 	}
1352 
1353 	if (!ctx.p) {
1354 		DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
1355 		return false;
1356 	}
1357 
1358 	/*
1359 	 * gcc incorrectly analyses that these can be used without being
1360 	 * initialized. To be fair, it's hard to guess.
1361 	 */
1362 	p0 = p1 = p2 = 0;
1363 	skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
1364 	skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
1365 				  p0, p1, p2);
1366 
1367 	return true;
1368 }
1369 
skl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)1370 static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
1371 {
1372 	u32 ctrl1, cfgcr1, cfgcr2;
1373 	struct skl_wrpll_params wrpll_params = { 0, };
1374 
1375 	/*
1376 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1377 	 * as the DPLL id in this function.
1378 	 */
1379 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1380 
1381 	ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1382 
1383 	if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
1384 				     &wrpll_params))
1385 		return false;
1386 
1387 	cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1388 		DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1389 		wrpll_params.dco_integer;
1390 
1391 	cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1392 		DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1393 		DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1394 		DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1395 		wrpll_params.central_freq;
1396 
1397 	memset(&crtc_state->dpll_hw_state, 0,
1398 	       sizeof(crtc_state->dpll_hw_state));
1399 
1400 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1401 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
1402 	crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
1403 	return true;
1404 }
1405 
1406 static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1407 skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1408 {
1409 	u32 ctrl1;
1410 
1411 	/*
1412 	 * See comment in intel_dpll_hw_state to understand why we always use 0
1413 	 * as the DPLL id in this function.
1414 	 */
1415 	ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1416 	switch (crtc_state->port_clock / 2) {
1417 	case 81000:
1418 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
1419 		break;
1420 	case 135000:
1421 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
1422 		break;
1423 	case 270000:
1424 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
1425 		break;
1426 		/* eDP 1.4 rates */
1427 	case 162000:
1428 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1429 		break;
1430 	case 108000:
1431 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1432 		break;
1433 	case 216000:
1434 		ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0);
1435 		break;
1436 	}
1437 
1438 	memset(&crtc_state->dpll_hw_state, 0,
1439 	       sizeof(crtc_state->dpll_hw_state));
1440 
1441 	crtc_state->dpll_hw_state.ctrl1 = ctrl1;
1442 
1443 	return true;
1444 }
1445 
skl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1446 static bool skl_get_dpll(struct intel_atomic_state *state,
1447 			 struct intel_crtc *crtc,
1448 			 struct intel_encoder *encoder)
1449 {
1450 	struct intel_crtc_state *crtc_state =
1451 		intel_atomic_get_new_crtc_state(state, crtc);
1452 	struct intel_shared_dpll *pll;
1453 	bool bret;
1454 
1455 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
1456 		bret = skl_ddi_hdmi_pll_dividers(crtc_state);
1457 		if (!bret) {
1458 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
1459 			return false;
1460 		}
1461 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
1462 		bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
1463 		if (!bret) {
1464 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
1465 			return false;
1466 		}
1467 	} else {
1468 		return false;
1469 	}
1470 
1471 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
1472 		pll = intel_find_shared_dpll(state, crtc,
1473 					     &crtc_state->dpll_hw_state,
1474 					     BIT(DPLL_ID_SKL_DPLL0));
1475 	else
1476 		pll = intel_find_shared_dpll(state, crtc,
1477 					     &crtc_state->dpll_hw_state,
1478 					     BIT(DPLL_ID_SKL_DPLL3) |
1479 					     BIT(DPLL_ID_SKL_DPLL2) |
1480 					     BIT(DPLL_ID_SKL_DPLL1));
1481 	if (!pll)
1482 		return false;
1483 
1484 	intel_reference_shared_dpll(state, crtc,
1485 				    pll, &crtc_state->dpll_hw_state);
1486 
1487 	crtc_state->shared_dpll = pll;
1488 
1489 	return true;
1490 }
1491 
skl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1492 static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
1493 			      const struct intel_dpll_hw_state *hw_state)
1494 {
1495 	DRM_DEBUG_KMS("dpll_hw_state: "
1496 		      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
1497 		      hw_state->ctrl1,
1498 		      hw_state->cfgcr1,
1499 		      hw_state->cfgcr2);
1500 }
1501 
1502 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
1503 	.enable = skl_ddi_pll_enable,
1504 	.disable = skl_ddi_pll_disable,
1505 	.get_hw_state = skl_ddi_pll_get_hw_state,
1506 };
1507 
1508 static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
1509 	.enable = skl_ddi_dpll0_enable,
1510 	.disable = skl_ddi_dpll0_disable,
1511 	.get_hw_state = skl_ddi_dpll0_get_hw_state,
1512 };
1513 
bxt_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1514 static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
1515 				struct intel_shared_dpll *pll)
1516 {
1517 	u32 temp;
1518 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1519 	enum dpio_phy phy;
1520 	enum dpio_channel ch;
1521 
1522 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1523 
1524 	/* Non-SSC reference */
1525 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1526 	temp |= PORT_PLL_REF_SEL;
1527 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1528 
1529 	if (IS_GEMINILAKE(dev_priv)) {
1530 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1531 		temp |= PORT_PLL_POWER_ENABLE;
1532 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1533 
1534 		if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1535 				 PORT_PLL_POWER_STATE), 200))
1536 			DRM_ERROR("Power state not set for PLL:%d\n", port);
1537 	}
1538 
1539 	/* Disable 10 bit clock */
1540 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1541 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1542 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1543 
1544 	/* Write P1 & P2 */
1545 	temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1546 	temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
1547 	temp |= pll->state.hw_state.ebb0;
1548 	I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
1549 
1550 	/* Write M2 integer */
1551 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1552 	temp &= ~PORT_PLL_M2_MASK;
1553 	temp |= pll->state.hw_state.pll0;
1554 	I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
1555 
1556 	/* Write N */
1557 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1558 	temp &= ~PORT_PLL_N_MASK;
1559 	temp |= pll->state.hw_state.pll1;
1560 	I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
1561 
1562 	/* Write M2 fraction */
1563 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1564 	temp &= ~PORT_PLL_M2_FRAC_MASK;
1565 	temp |= pll->state.hw_state.pll2;
1566 	I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
1567 
1568 	/* Write M2 fraction enable */
1569 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1570 	temp &= ~PORT_PLL_M2_FRAC_ENABLE;
1571 	temp |= pll->state.hw_state.pll3;
1572 	I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
1573 
1574 	/* Write coeff */
1575 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1576 	temp &= ~PORT_PLL_PROP_COEFF_MASK;
1577 	temp &= ~PORT_PLL_INT_COEFF_MASK;
1578 	temp &= ~PORT_PLL_GAIN_CTL_MASK;
1579 	temp |= pll->state.hw_state.pll6;
1580 	I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
1581 
1582 	/* Write calibration val */
1583 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1584 	temp &= ~PORT_PLL_TARGET_CNT_MASK;
1585 	temp |= pll->state.hw_state.pll8;
1586 	I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
1587 
1588 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1589 	temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
1590 	temp |= pll->state.hw_state.pll9;
1591 	I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
1592 
1593 	temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1594 	temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
1595 	temp &= ~PORT_PLL_DCO_AMP_MASK;
1596 	temp |= pll->state.hw_state.pll10;
1597 	I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
1598 
1599 	/* Recalibrate with new settings */
1600 	temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1601 	temp |= PORT_PLL_RECALIBRATE;
1602 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1603 	temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
1604 	temp |= pll->state.hw_state.ebb4;
1605 	I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
1606 
1607 	/* Enable PLL */
1608 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1609 	temp |= PORT_PLL_ENABLE;
1610 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1611 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1612 
1613 	if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
1614 			200))
1615 		DRM_ERROR("PLL %d not locked\n", port);
1616 
1617 	if (IS_GEMINILAKE(dev_priv)) {
1618 		temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
1619 		temp |= DCC_DELAY_RANGE_2;
1620 		I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
1621 	}
1622 
1623 	/*
1624 	 * While we write to the group register to program all lanes at once we
1625 	 * can read only lane registers and we pick lanes 0/1 for that.
1626 	 */
1627 	temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1628 	temp &= ~LANE_STAGGER_MASK;
1629 	temp &= ~LANESTAGGER_STRAP_OVRD;
1630 	temp |= pll->state.hw_state.pcsdw12;
1631 	I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
1632 }
1633 
bxt_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)1634 static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
1635 					struct intel_shared_dpll *pll)
1636 {
1637 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1638 	u32 temp;
1639 
1640 	temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1641 	temp &= ~PORT_PLL_ENABLE;
1642 	I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1643 	POSTING_READ(BXT_PORT_PLL_ENABLE(port));
1644 
1645 	if (IS_GEMINILAKE(dev_priv)) {
1646 		temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
1647 		temp &= ~PORT_PLL_POWER_ENABLE;
1648 		I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
1649 
1650 		if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
1651 				PORT_PLL_POWER_STATE), 200))
1652 			DRM_ERROR("Power state not reset for PLL:%d\n", port);
1653 	}
1654 }
1655 
bxt_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)1656 static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1657 					struct intel_shared_dpll *pll,
1658 					struct intel_dpll_hw_state *hw_state)
1659 {
1660 	enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
1661 	intel_wakeref_t wakeref;
1662 	enum dpio_phy phy;
1663 	enum dpio_channel ch;
1664 	u32 val;
1665 	bool ret;
1666 
1667 	bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
1668 
1669 	wakeref = intel_display_power_get_if_enabled(dev_priv,
1670 						     POWER_DOMAIN_DISPLAY_CORE);
1671 	if (!wakeref)
1672 		return false;
1673 
1674 	ret = false;
1675 
1676 	val = I915_READ(BXT_PORT_PLL_ENABLE(port));
1677 	if (!(val & PORT_PLL_ENABLE))
1678 		goto out;
1679 
1680 	hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
1681 	hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
1682 
1683 	hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
1684 	hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
1685 
1686 	hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
1687 	hw_state->pll0 &= PORT_PLL_M2_MASK;
1688 
1689 	hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
1690 	hw_state->pll1 &= PORT_PLL_N_MASK;
1691 
1692 	hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
1693 	hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
1694 
1695 	hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
1696 	hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
1697 
1698 	hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
1699 	hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
1700 			  PORT_PLL_INT_COEFF_MASK |
1701 			  PORT_PLL_GAIN_CTL_MASK;
1702 
1703 	hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
1704 	hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
1705 
1706 	hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
1707 	hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
1708 
1709 	hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
1710 	hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
1711 			   PORT_PLL_DCO_AMP_MASK;
1712 
1713 	/*
1714 	 * While we write to the group register to program all lanes at once we
1715 	 * can read only lane registers. We configure all lanes the same way, so
1716 	 * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
1717 	 */
1718 	hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
1719 	if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
1720 		DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
1721 				 hw_state->pcsdw12,
1722 				 I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
1723 	hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
1724 
1725 	ret = true;
1726 
1727 out:
1728 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
1729 
1730 	return ret;
1731 }
1732 
1733 /* bxt clock parameters */
1734 struct bxt_clk_div {
1735 	int clock;
1736 	u32 p1;
1737 	u32 p2;
1738 	u32 m2_int;
1739 	u32 m2_frac;
1740 	bool m2_frac_en;
1741 	u32 n;
1742 
1743 	int vco;
1744 };
1745 
1746 /* pre-calculated values for DP linkrates */
1747 static const struct bxt_clk_div bxt_dp_clk_val[] = {
1748 	{162000, 4, 2, 32, 1677722, 1, 1},
1749 	{270000, 4, 1, 27,       0, 0, 1},
1750 	{540000, 2, 1, 27,       0, 0, 1},
1751 	{216000, 3, 2, 32, 1677722, 1, 1},
1752 	{243000, 4, 1, 24, 1258291, 1, 1},
1753 	{324000, 4, 1, 32, 1677722, 1, 1},
1754 	{432000, 3, 1, 32, 1677722, 1, 1}
1755 };
1756 
1757 static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state,struct bxt_clk_div * clk_div)1758 bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
1759 			  struct bxt_clk_div *clk_div)
1760 {
1761 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1762 	struct dpll best_clock;
1763 
1764 	/* Calculate HDMI div */
1765 	/*
1766 	 * FIXME: tie the following calculation into
1767 	 * i9xx_crtc_compute_clock
1768 	 */
1769 	if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
1770 		DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
1771 				 crtc_state->port_clock,
1772 				 pipe_name(crtc->pipe));
1773 		return false;
1774 	}
1775 
1776 	clk_div->p1 = best_clock.p1;
1777 	clk_div->p2 = best_clock.p2;
1778 	WARN_ON(best_clock.m1 != 2);
1779 	clk_div->n = best_clock.n;
1780 	clk_div->m2_int = best_clock.m2 >> 22;
1781 	clk_div->m2_frac = best_clock.m2 & ((1 << 22) - 1);
1782 	clk_div->m2_frac_en = clk_div->m2_frac != 0;
1783 
1784 	clk_div->vco = best_clock.vco;
1785 
1786 	return true;
1787 }
1788 
bxt_ddi_dp_pll_dividers(struct intel_crtc_state * crtc_state,struct bxt_clk_div * clk_div)1789 static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
1790 				    struct bxt_clk_div *clk_div)
1791 {
1792 	int clock = crtc_state->port_clock;
1793 	int i;
1794 
1795 	*clk_div = bxt_dp_clk_val[0];
1796 	for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) {
1797 		if (bxt_dp_clk_val[i].clock == clock) {
1798 			*clk_div = bxt_dp_clk_val[i];
1799 			break;
1800 		}
1801 	}
1802 
1803 	clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
1804 }
1805 
bxt_ddi_set_dpll_hw_state(struct intel_crtc_state * crtc_state,const struct bxt_clk_div * clk_div)1806 static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
1807 				      const struct bxt_clk_div *clk_div)
1808 {
1809 	struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
1810 	int clock = crtc_state->port_clock;
1811 	int vco = clk_div->vco;
1812 	u32 prop_coef, int_coef, gain_ctl, targ_cnt;
1813 	u32 lanestagger;
1814 
1815 	memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
1816 
1817 	if (vco >= 6200000 && vco <= 6700000) {
1818 		prop_coef = 4;
1819 		int_coef = 9;
1820 		gain_ctl = 3;
1821 		targ_cnt = 8;
1822 	} else if ((vco > 5400000 && vco < 6200000) ||
1823 			(vco >= 4800000 && vco < 5400000)) {
1824 		prop_coef = 5;
1825 		int_coef = 11;
1826 		gain_ctl = 3;
1827 		targ_cnt = 9;
1828 	} else if (vco == 5400000) {
1829 		prop_coef = 3;
1830 		int_coef = 8;
1831 		gain_ctl = 1;
1832 		targ_cnt = 9;
1833 	} else {
1834 		DRM_ERROR("Invalid VCO\n");
1835 		return false;
1836 	}
1837 
1838 	if (clock > 270000)
1839 		lanestagger = 0x18;
1840 	else if (clock > 135000)
1841 		lanestagger = 0x0d;
1842 	else if (clock > 67000)
1843 		lanestagger = 0x07;
1844 	else if (clock > 33000)
1845 		lanestagger = 0x04;
1846 	else
1847 		lanestagger = 0x02;
1848 
1849 	dpll_hw_state->ebb0 = PORT_PLL_P1(clk_div->p1) | PORT_PLL_P2(clk_div->p2);
1850 	dpll_hw_state->pll0 = clk_div->m2_int;
1851 	dpll_hw_state->pll1 = PORT_PLL_N(clk_div->n);
1852 	dpll_hw_state->pll2 = clk_div->m2_frac;
1853 
1854 	if (clk_div->m2_frac_en)
1855 		dpll_hw_state->pll3 = PORT_PLL_M2_FRAC_ENABLE;
1856 
1857 	dpll_hw_state->pll6 = prop_coef | PORT_PLL_INT_COEFF(int_coef);
1858 	dpll_hw_state->pll6 |= PORT_PLL_GAIN_CTL(gain_ctl);
1859 
1860 	dpll_hw_state->pll8 = targ_cnt;
1861 
1862 	dpll_hw_state->pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT;
1863 
1864 	dpll_hw_state->pll10 =
1865 		PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT)
1866 		| PORT_PLL_DCO_AMP_OVR_EN_H;
1867 
1868 	dpll_hw_state->ebb4 = PORT_PLL_10BIT_CLK_ENABLE;
1869 
1870 	dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
1871 
1872 	return true;
1873 }
1874 
1875 static bool
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1876 bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1877 {
1878 	struct bxt_clk_div clk_div = {};
1879 
1880 	bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
1881 
1882 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1883 }
1884 
1885 static bool
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state * crtc_state)1886 bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
1887 {
1888 	struct bxt_clk_div clk_div = {};
1889 
1890 	bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
1891 
1892 	return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
1893 }
1894 
bxt_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)1895 static bool bxt_get_dpll(struct intel_atomic_state *state,
1896 			 struct intel_crtc *crtc,
1897 			 struct intel_encoder *encoder)
1898 {
1899 	struct intel_crtc_state *crtc_state =
1900 		intel_atomic_get_new_crtc_state(state, crtc);
1901 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1902 	struct intel_shared_dpll *pll;
1903 	enum intel_dpll_id id;
1904 
1905 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
1906 	    !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
1907 		return false;
1908 
1909 	if (intel_crtc_has_dp_encoder(crtc_state) &&
1910 	    !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
1911 		return false;
1912 
1913 	/* 1:1 mapping between ports and PLLs */
1914 	id = (enum intel_dpll_id) encoder->port;
1915 	pll = intel_get_shared_dpll_by_id(dev_priv, id);
1916 
1917 	DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1918 		      crtc->base.base.id, crtc->base.name, pll->info->name);
1919 
1920 	intel_reference_shared_dpll(state, crtc,
1921 				    pll, &crtc_state->dpll_hw_state);
1922 
1923 	crtc_state->shared_dpll = pll;
1924 
1925 	return true;
1926 }
1927 
bxt_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)1928 static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
1929 			      const struct intel_dpll_hw_state *hw_state)
1930 {
1931 	DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
1932 		      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
1933 		      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
1934 		      hw_state->ebb0,
1935 		      hw_state->ebb4,
1936 		      hw_state->pll0,
1937 		      hw_state->pll1,
1938 		      hw_state->pll2,
1939 		      hw_state->pll3,
1940 		      hw_state->pll6,
1941 		      hw_state->pll8,
1942 		      hw_state->pll9,
1943 		      hw_state->pll10,
1944 		      hw_state->pcsdw12);
1945 }
1946 
1947 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1948 	.enable = bxt_ddi_pll_enable,
1949 	.disable = bxt_ddi_pll_disable,
1950 	.get_hw_state = bxt_ddi_pll_get_hw_state,
1951 };
1952 
1953 struct intel_dpll_mgr {
1954 	const struct dpll_info *dpll_info;
1955 
1956 	bool (*get_dplls)(struct intel_atomic_state *state,
1957 			  struct intel_crtc *crtc,
1958 			  struct intel_encoder *encoder);
1959 	void (*put_dplls)(struct intel_atomic_state *state,
1960 			  struct intel_crtc *crtc);
1961 	void (*update_active_dpll)(struct intel_atomic_state *state,
1962 				   struct intel_crtc *crtc,
1963 				   struct intel_encoder *encoder);
1964 	void (*dump_hw_state)(struct drm_i915_private *dev_priv,
1965 			      const struct intel_dpll_hw_state *hw_state);
1966 };
1967 
1968 static const struct dpll_info pch_plls[] = {
1969 	{ "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
1970 	{ "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
1971 	{ },
1972 };
1973 
1974 static const struct intel_dpll_mgr pch_pll_mgr = {
1975 	.dpll_info = pch_plls,
1976 	.get_dplls = ibx_get_dpll,
1977 	.put_dplls = intel_put_dpll,
1978 	.dump_hw_state = ibx_dump_hw_state,
1979 };
1980 
1981 static const struct dpll_info hsw_plls[] = {
1982 	{ "WRPLL 1",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1,     0 },
1983 	{ "WRPLL 2",    &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2,     0 },
1984 	{ "SPLL",       &hsw_ddi_spll_funcs,  DPLL_ID_SPLL,       0 },
1985 	{ "LCPLL 810",  &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810,  INTEL_DPLL_ALWAYS_ON },
1986 	{ "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
1987 	{ "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
1988 	{ },
1989 };
1990 
1991 static const struct intel_dpll_mgr hsw_pll_mgr = {
1992 	.dpll_info = hsw_plls,
1993 	.get_dplls = hsw_get_dpll,
1994 	.put_dplls = intel_put_dpll,
1995 	.dump_hw_state = hsw_dump_hw_state,
1996 };
1997 
1998 static const struct dpll_info skl_plls[] = {
1999 	{ "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
2000 	{ "DPLL 1", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL1, 0 },
2001 	{ "DPLL 2", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL2, 0 },
2002 	{ "DPLL 3", &skl_ddi_pll_funcs,   DPLL_ID_SKL_DPLL3, 0 },
2003 	{ },
2004 };
2005 
2006 static const struct intel_dpll_mgr skl_pll_mgr = {
2007 	.dpll_info = skl_plls,
2008 	.get_dplls = skl_get_dpll,
2009 	.put_dplls = intel_put_dpll,
2010 	.dump_hw_state = skl_dump_hw_state,
2011 };
2012 
2013 static const struct dpll_info bxt_plls[] = {
2014 	{ "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2015 	{ "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2016 	{ "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2017 	{ },
2018 };
2019 
2020 static const struct intel_dpll_mgr bxt_pll_mgr = {
2021 	.dpll_info = bxt_plls,
2022 	.get_dplls = bxt_get_dpll,
2023 	.put_dplls = intel_put_dpll,
2024 	.dump_hw_state = bxt_dump_hw_state,
2025 };
2026 
cnl_ddi_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)2027 static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
2028 			       struct intel_shared_dpll *pll)
2029 {
2030 	const enum intel_dpll_id id = pll->info->id;
2031 	u32 val;
2032 
2033 	/* 1. Enable DPLL power in DPLL_ENABLE. */
2034 	val = I915_READ(CNL_DPLL_ENABLE(id));
2035 	val |= PLL_POWER_ENABLE;
2036 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2037 
2038 	/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
2039 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
2040 				  PLL_POWER_STATE, 5))
2041 		DRM_ERROR("PLL %d Power not enabled\n", id);
2042 
2043 	/*
2044 	 * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
2045 	 * select DP mode, and set DP link rate.
2046 	 */
2047 	val = pll->state.hw_state.cfgcr0;
2048 	I915_WRITE(CNL_DPLL_CFGCR0(id), val);
2049 
2050 	/* 4. Reab back to ensure writes completed */
2051 	POSTING_READ(CNL_DPLL_CFGCR0(id));
2052 
2053 	/* 3. Configure DPLL_CFGCR0 */
2054 	/* Avoid touch CFGCR1 if HDMI mode is not enabled */
2055 	if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
2056 		val = pll->state.hw_state.cfgcr1;
2057 		I915_WRITE(CNL_DPLL_CFGCR1(id), val);
2058 		/* 4. Reab back to ensure writes completed */
2059 		POSTING_READ(CNL_DPLL_CFGCR1(id));
2060 	}
2061 
2062 	/*
2063 	 * 5. If the frequency will result in a change to the voltage
2064 	 * requirement, follow the Display Voltage Frequency Switching
2065 	 * Sequence Before Frequency Change
2066 	 *
2067 	 * Note: DVFS is actually handled via the cdclk code paths,
2068 	 * hence we do nothing here.
2069 	 */
2070 
2071 	/* 6. Enable DPLL in DPLL_ENABLE. */
2072 	val = I915_READ(CNL_DPLL_ENABLE(id));
2073 	val |= PLL_ENABLE;
2074 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2075 
2076 	/* 7. Wait for PLL lock status in DPLL_ENABLE. */
2077 	if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2078 		DRM_ERROR("PLL %d not locked\n", id);
2079 
2080 	/*
2081 	 * 8. If the frequency will result in a change to the voltage
2082 	 * requirement, follow the Display Voltage Frequency Switching
2083 	 * Sequence After Frequency Change
2084 	 *
2085 	 * Note: DVFS is actually handled via the cdclk code paths,
2086 	 * hence we do nothing here.
2087 	 */
2088 
2089 	/*
2090 	 * 9. turn on the clock for the DDI and map the DPLL to the DDI
2091 	 * Done at intel_ddi_clk_select
2092 	 */
2093 }
2094 
cnl_ddi_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)2095 static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
2096 				struct intel_shared_dpll *pll)
2097 {
2098 	const enum intel_dpll_id id = pll->info->id;
2099 	u32 val;
2100 
2101 	/*
2102 	 * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
2103 	 * Done at intel_ddi_post_disable
2104 	 */
2105 
2106 	/*
2107 	 * 2. If the frequency will result in a change to the voltage
2108 	 * requirement, follow the Display Voltage Frequency Switching
2109 	 * Sequence Before Frequency Change
2110 	 *
2111 	 * Note: DVFS is actually handled via the cdclk code paths,
2112 	 * hence we do nothing here.
2113 	 */
2114 
2115 	/* 3. Disable DPLL through DPLL_ENABLE. */
2116 	val = I915_READ(CNL_DPLL_ENABLE(id));
2117 	val &= ~PLL_ENABLE;
2118 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2119 
2120 	/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
2121 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
2122 		DRM_ERROR("PLL %d locked\n", id);
2123 
2124 	/*
2125 	 * 5. If the frequency will result in a change to the voltage
2126 	 * requirement, follow the Display Voltage Frequency Switching
2127 	 * Sequence After Frequency Change
2128 	 *
2129 	 * Note: DVFS is actually handled via the cdclk code paths,
2130 	 * hence we do nothing here.
2131 	 */
2132 
2133 	/* 6. Disable DPLL power in DPLL_ENABLE. */
2134 	val = I915_READ(CNL_DPLL_ENABLE(id));
2135 	val &= ~PLL_POWER_ENABLE;
2136 	I915_WRITE(CNL_DPLL_ENABLE(id), val);
2137 
2138 	/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
2139 	if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
2140 				    PLL_POWER_STATE, 5))
2141 		DRM_ERROR("PLL %d Power not disabled\n", id);
2142 }
2143 
cnl_ddi_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)2144 static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
2145 				     struct intel_shared_dpll *pll,
2146 				     struct intel_dpll_hw_state *hw_state)
2147 {
2148 	const enum intel_dpll_id id = pll->info->id;
2149 	intel_wakeref_t wakeref;
2150 	u32 val;
2151 	bool ret;
2152 
2153 	wakeref = intel_display_power_get_if_enabled(dev_priv,
2154 						     POWER_DOMAIN_DISPLAY_CORE);
2155 	if (!wakeref)
2156 		return false;
2157 
2158 	ret = false;
2159 
2160 	val = I915_READ(CNL_DPLL_ENABLE(id));
2161 	if (!(val & PLL_ENABLE))
2162 		goto out;
2163 
2164 	val = I915_READ(CNL_DPLL_CFGCR0(id));
2165 	hw_state->cfgcr0 = val;
2166 
2167 	/* avoid reading back stale values if HDMI mode is not enabled */
2168 	if (val & DPLL_CFGCR0_HDMI_MODE) {
2169 		hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
2170 	}
2171 	ret = true;
2172 
2173 out:
2174 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
2175 
2176 	return ret;
2177 }
2178 
cnl_wrpll_get_multipliers(int bestdiv,int * pdiv,int * qdiv,int * kdiv)2179 static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
2180 				      int *qdiv, int *kdiv)
2181 {
2182 	/* even dividers */
2183 	if (bestdiv % 2 == 0) {
2184 		if (bestdiv == 2) {
2185 			*pdiv = 2;
2186 			*qdiv = 1;
2187 			*kdiv = 1;
2188 		} else if (bestdiv % 4 == 0) {
2189 			*pdiv = 2;
2190 			*qdiv = bestdiv / 4;
2191 			*kdiv = 2;
2192 		} else if (bestdiv % 6 == 0) {
2193 			*pdiv = 3;
2194 			*qdiv = bestdiv / 6;
2195 			*kdiv = 2;
2196 		} else if (bestdiv % 5 == 0) {
2197 			*pdiv = 5;
2198 			*qdiv = bestdiv / 10;
2199 			*kdiv = 2;
2200 		} else if (bestdiv % 14 == 0) {
2201 			*pdiv = 7;
2202 			*qdiv = bestdiv / 14;
2203 			*kdiv = 2;
2204 		}
2205 	} else {
2206 		if (bestdiv == 3 || bestdiv == 5 || bestdiv == 7) {
2207 			*pdiv = bestdiv;
2208 			*qdiv = 1;
2209 			*kdiv = 1;
2210 		} else { /* 9, 15, 21 */
2211 			*pdiv = bestdiv / 3;
2212 			*qdiv = 1;
2213 			*kdiv = 3;
2214 		}
2215 	}
2216 }
2217 
cnl_wrpll_params_populate(struct skl_wrpll_params * params,u32 dco_freq,u32 ref_freq,int pdiv,int qdiv,int kdiv)2218 static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
2219 				      u32 dco_freq, u32 ref_freq,
2220 				      int pdiv, int qdiv, int kdiv)
2221 {
2222 	u32 dco;
2223 
2224 	switch (kdiv) {
2225 	case 1:
2226 		params->kdiv = 1;
2227 		break;
2228 	case 2:
2229 		params->kdiv = 2;
2230 		break;
2231 	case 3:
2232 		params->kdiv = 4;
2233 		break;
2234 	default:
2235 		WARN(1, "Incorrect KDiv\n");
2236 	}
2237 
2238 	switch (pdiv) {
2239 	case 2:
2240 		params->pdiv = 1;
2241 		break;
2242 	case 3:
2243 		params->pdiv = 2;
2244 		break;
2245 	case 5:
2246 		params->pdiv = 4;
2247 		break;
2248 	case 7:
2249 		params->pdiv = 8;
2250 		break;
2251 	default:
2252 		WARN(1, "Incorrect PDiv\n");
2253 	}
2254 
2255 	WARN_ON(kdiv != 2 && qdiv != 1);
2256 
2257 	params->qdiv_ratio = qdiv;
2258 	params->qdiv_mode = (qdiv == 1) ? 0 : 1;
2259 
2260 	dco = div_u64((u64)dco_freq << 15, ref_freq);
2261 
2262 	params->dco_integer = dco >> 15;
2263 	params->dco_fraction = dco & 0x7fff;
2264 }
2265 
cnl_hdmi_pll_ref_clock(struct drm_i915_private * dev_priv)2266 int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
2267 {
2268 	int ref_clock = dev_priv->cdclk.hw.ref;
2269 
2270 	/*
2271 	 * For ICL+, the spec states: if reference frequency is 38.4,
2272 	 * use 19.2 because the DPLL automatically divides that by 2.
2273 	 */
2274 	if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
2275 		ref_clock = 19200;
2276 
2277 	return ref_clock;
2278 }
2279 
2280 static bool
cnl_ddi_calculate_wrpll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * wrpll_params)2281 cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
2282 			struct skl_wrpll_params *wrpll_params)
2283 {
2284 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2285 	u32 afe_clock = crtc_state->port_clock * 5;
2286 	u32 ref_clock;
2287 	u32 dco_min = 7998000;
2288 	u32 dco_max = 10000000;
2289 	u32 dco_mid = (dco_min + dco_max) / 2;
2290 	static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
2291 					 18, 20, 24, 28, 30, 32,  36,  40,
2292 					 42, 44, 48, 50, 52, 54,  56,  60,
2293 					 64, 66, 68, 70, 72, 76,  78,  80,
2294 					 84, 88, 90, 92, 96, 98, 100, 102,
2295 					  3,  5,  7,  9, 15, 21 };
2296 	u32 dco, best_dco = 0, dco_centrality = 0;
2297 	u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
2298 	int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
2299 
2300 	for (d = 0; d < ARRAY_SIZE(dividers); d++) {
2301 		dco = afe_clock * dividers[d];
2302 
2303 		if ((dco <= dco_max) && (dco >= dco_min)) {
2304 			dco_centrality = abs(dco - dco_mid);
2305 
2306 			if (dco_centrality < best_dco_centrality) {
2307 				best_dco_centrality = dco_centrality;
2308 				best_div = dividers[d];
2309 				best_dco = dco;
2310 			}
2311 		}
2312 	}
2313 
2314 	if (best_div == 0)
2315 		return false;
2316 
2317 	cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
2318 
2319 	ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
2320 
2321 	cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
2322 				  pdiv, qdiv, kdiv);
2323 
2324 	return true;
2325 }
2326 
cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state * crtc_state)2327 static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
2328 {
2329 	u32 cfgcr0, cfgcr1;
2330 	struct skl_wrpll_params wrpll_params = { 0, };
2331 
2332 	cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
2333 
2334 	if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
2335 		return false;
2336 
2337 	cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
2338 		wrpll_params.dco_integer;
2339 
2340 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
2341 		DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
2342 		DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
2343 		DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
2344 		DPLL_CFGCR1_CENTRAL_FREQ;
2345 
2346 	memset(&crtc_state->dpll_hw_state, 0,
2347 	       sizeof(crtc_state->dpll_hw_state));
2348 
2349 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2350 	crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
2351 	return true;
2352 }
2353 
2354 static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state * crtc_state)2355 cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
2356 {
2357 	u32 cfgcr0;
2358 
2359 	cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
2360 
2361 	switch (crtc_state->port_clock / 2) {
2362 	case 81000:
2363 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
2364 		break;
2365 	case 135000:
2366 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
2367 		break;
2368 	case 270000:
2369 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
2370 		break;
2371 		/* eDP 1.4 rates */
2372 	case 162000:
2373 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
2374 		break;
2375 	case 108000:
2376 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
2377 		break;
2378 	case 216000:
2379 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
2380 		break;
2381 	case 324000:
2382 		/* Some SKUs may require elevated I/O voltage to support this */
2383 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
2384 		break;
2385 	case 405000:
2386 		/* Some SKUs may require elevated I/O voltage to support this */
2387 		cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
2388 		break;
2389 	}
2390 
2391 	memset(&crtc_state->dpll_hw_state, 0,
2392 	       sizeof(crtc_state->dpll_hw_state));
2393 
2394 	crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
2395 
2396 	return true;
2397 }
2398 
cnl_get_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2399 static bool cnl_get_dpll(struct intel_atomic_state *state,
2400 			 struct intel_crtc *crtc,
2401 			 struct intel_encoder *encoder)
2402 {
2403 	struct intel_crtc_state *crtc_state =
2404 		intel_atomic_get_new_crtc_state(state, crtc);
2405 	struct intel_shared_dpll *pll;
2406 	bool bret;
2407 
2408 	if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
2409 		bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
2410 		if (!bret) {
2411 			DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
2412 			return false;
2413 		}
2414 	} else if (intel_crtc_has_dp_encoder(crtc_state)) {
2415 		bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
2416 		if (!bret) {
2417 			DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
2418 			return false;
2419 		}
2420 	} else {
2421 		DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
2422 			      crtc_state->output_types);
2423 		return false;
2424 	}
2425 
2426 	pll = intel_find_shared_dpll(state, crtc,
2427 				     &crtc_state->dpll_hw_state,
2428 				     BIT(DPLL_ID_SKL_DPLL2) |
2429 				     BIT(DPLL_ID_SKL_DPLL1) |
2430 				     BIT(DPLL_ID_SKL_DPLL0));
2431 	if (!pll) {
2432 		DRM_DEBUG_KMS("No PLL selected\n");
2433 		return false;
2434 	}
2435 
2436 	intel_reference_shared_dpll(state, crtc,
2437 				    pll, &crtc_state->dpll_hw_state);
2438 
2439 	crtc_state->shared_dpll = pll;
2440 
2441 	return true;
2442 }
2443 
cnl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)2444 static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
2445 			      const struct intel_dpll_hw_state *hw_state)
2446 {
2447 	DRM_DEBUG_KMS("dpll_hw_state: "
2448 		      "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
2449 		      hw_state->cfgcr0,
2450 		      hw_state->cfgcr1);
2451 }
2452 
2453 static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
2454 	.enable = cnl_ddi_pll_enable,
2455 	.disable = cnl_ddi_pll_disable,
2456 	.get_hw_state = cnl_ddi_pll_get_hw_state,
2457 };
2458 
2459 static const struct dpll_info cnl_plls[] = {
2460 	{ "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
2461 	{ "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
2462 	{ "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
2463 	{ },
2464 };
2465 
2466 static const struct intel_dpll_mgr cnl_pll_mgr = {
2467 	.dpll_info = cnl_plls,
2468 	.get_dplls = cnl_get_dpll,
2469 	.put_dplls = intel_put_dpll,
2470 	.dump_hw_state = cnl_dump_hw_state,
2471 };
2472 
2473 struct icl_combo_pll_params {
2474 	int clock;
2475 	struct skl_wrpll_params wrpll;
2476 };
2477 
2478 /*
2479  * These values alrea already adjusted: they're the bits we write to the
2480  * registers, not the logical values.
2481  */
2482 static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
2483 	{ 540000,
2484 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [0]: 5.4 */
2485 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2486 	{ 270000,
2487 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [1]: 2.7 */
2488 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2489 	{ 162000,
2490 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [2]: 1.62 */
2491 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2492 	{ 324000,
2493 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [3]: 3.24 */
2494 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2495 	{ 216000,
2496 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2497 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2498 	{ 432000,
2499 	  { .dco_integer = 0x168, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2500 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2501 	{ 648000,
2502 	  { .dco_integer = 0x195, .dco_fraction = 0x0000,		/* [6]: 6.48 */
2503 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2504 	{ 810000,
2505 	  { .dco_integer = 0x151, .dco_fraction = 0x4000,		/* [7]: 8.1 */
2506 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2507 };
2508 
2509 
2510 /* Also used for 38.4 MHz values. */
2511 static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
2512 	{ 540000,
2513 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [0]: 5.4 */
2514 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2515 	{ 270000,
2516 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [1]: 2.7 */
2517 	    .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2518 	{ 162000,
2519 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [2]: 1.62 */
2520 	    .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2521 	{ 324000,
2522 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [3]: 3.24 */
2523 	    .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2524 	{ 216000,
2525 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [4]: 2.16 */
2526 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
2527 	{ 432000,
2528 	  { .dco_integer = 0x1C2, .dco_fraction = 0x0000,		/* [5]: 4.32 */
2529 	    .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2530 	{ 648000,
2531 	  { .dco_integer = 0x1FA, .dco_fraction = 0x2000,		/* [6]: 6.48 */
2532 	    .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2533 	{ 810000,
2534 	  { .dco_integer = 0x1A5, .dco_fraction = 0x7000,		/* [7]: 8.1 */
2535 	    .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
2536 };
2537 
2538 static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
2539 	.dco_integer = 0x151, .dco_fraction = 0x4000,
2540 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2541 };
2542 
2543 static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
2544 	.dco_integer = 0x1A5, .dco_fraction = 0x7000,
2545 	.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
2546 };
2547 
2548 static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = {
2549 	.dco_integer = 0x54, .dco_fraction = 0x3000,
2550 	/* the following params are unused */
2551 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2552 };
2553 
2554 static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
2555 	.dco_integer = 0x43, .dco_fraction = 0x4000,
2556 	/* the following params are unused */
2557 	.pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0,
2558 };
2559 
icl_calc_dp_combo_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2560 static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
2561 				  struct skl_wrpll_params *pll_params)
2562 {
2563 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2564 	const struct icl_combo_pll_params *params =
2565 		dev_priv->cdclk.hw.ref == 24000 ?
2566 		icl_dp_combo_pll_24MHz_values :
2567 		icl_dp_combo_pll_19_2MHz_values;
2568 	int clock = crtc_state->port_clock;
2569 	int i;
2570 
2571 	for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
2572 		if (clock == params[i].clock) {
2573 			*pll_params = params[i].wrpll;
2574 			return true;
2575 		}
2576 	}
2577 
2578 	MISSING_CASE(clock);
2579 	return false;
2580 }
2581 
icl_calc_tbt_pll(struct intel_crtc_state * crtc_state,struct skl_wrpll_params * pll_params)2582 static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
2583 			     struct skl_wrpll_params *pll_params)
2584 {
2585 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2586 
2587 	if (INTEL_GEN(dev_priv) >= 12) {
2588 		switch (dev_priv->cdclk.hw.ref) {
2589 		default:
2590 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2591 			/* fall-through */
2592 		case 19200:
2593 		case 38400:
2594 			*pll_params = tgl_tbt_pll_19_2MHz_values;
2595 			break;
2596 		case 24000:
2597 			*pll_params = tgl_tbt_pll_24MHz_values;
2598 			break;
2599 		}
2600 	} else {
2601 		switch (dev_priv->cdclk.hw.ref) {
2602 		default:
2603 			MISSING_CASE(dev_priv->cdclk.hw.ref);
2604 			/* fall-through */
2605 		case 19200:
2606 		case 38400:
2607 			*pll_params = icl_tbt_pll_19_2MHz_values;
2608 			break;
2609 		case 24000:
2610 			*pll_params = icl_tbt_pll_24MHz_values;
2611 			break;
2612 		}
2613 	}
2614 
2615 	return true;
2616 }
2617 
icl_calc_dpll_state(struct intel_crtc_state * crtc_state,struct intel_encoder * encoder,struct intel_dpll_hw_state * pll_state)2618 static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
2619 				struct intel_encoder *encoder,
2620 				struct intel_dpll_hw_state *pll_state)
2621 {
2622 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2623 	u32 cfgcr0, cfgcr1;
2624 	struct skl_wrpll_params pll_params = { 0 };
2625 	bool ret;
2626 
2627 	if (intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv,
2628 							encoder->port)))
2629 		ret = icl_calc_tbt_pll(crtc_state, &pll_params);
2630 	else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
2631 		 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
2632 		ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
2633 	else
2634 		ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
2635 
2636 	if (!ret)
2637 		return false;
2638 
2639 	cfgcr0 = DPLL_CFGCR0_DCO_FRACTION(pll_params.dco_fraction) |
2640 		 pll_params.dco_integer;
2641 
2642 	cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(pll_params.qdiv_ratio) |
2643 		 DPLL_CFGCR1_QDIV_MODE(pll_params.qdiv_mode) |
2644 		 DPLL_CFGCR1_KDIV(pll_params.kdiv) |
2645 		 DPLL_CFGCR1_PDIV(pll_params.pdiv);
2646 
2647 	if (INTEL_GEN(dev_priv) >= 12)
2648 		cfgcr1 |= TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL;
2649 	else
2650 		cfgcr1 |= DPLL_CFGCR1_CENTRAL_FREQ_8400;
2651 
2652 	memset(pll_state, 0, sizeof(*pll_state));
2653 
2654 	pll_state->cfgcr0 = cfgcr0;
2655 	pll_state->cfgcr1 = cfgcr1;
2656 
2657 	return true;
2658 }
2659 
2660 
icl_pll_id_to_tc_port(enum intel_dpll_id id)2661 static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
2662 {
2663 	return id - DPLL_ID_ICL_MGPLL1;
2664 }
2665 
icl_tc_port_to_pll_id(enum tc_port tc_port)2666 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
2667 {
2668 	return tc_port + DPLL_ID_ICL_MGPLL1;
2669 }
2670 
icl_mg_pll_find_divisors(int clock_khz,bool is_dp,bool use_ssc,u32 * target_dco_khz,struct intel_dpll_hw_state * state,bool is_dkl)2671 static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
2672 				     u32 *target_dco_khz,
2673 				     struct intel_dpll_hw_state *state,
2674 				     bool is_dkl)
2675 {
2676 	u32 dco_min_freq, dco_max_freq;
2677 	int div1_vals[] = {7, 5, 3, 2};
2678 	unsigned int i;
2679 	int div2;
2680 
2681 	dco_min_freq = is_dp ? 8100000 : use_ssc ? 8000000 : 7992000;
2682 	dco_max_freq = is_dp ? 8100000 : 10000000;
2683 
2684 	for (i = 0; i < ARRAY_SIZE(div1_vals); i++) {
2685 		int div1 = div1_vals[i];
2686 
2687 		for (div2 = 10; div2 > 0; div2--) {
2688 			int dco = div1 * div2 * clock_khz * 5;
2689 			int a_divratio, tlinedrv, inputsel;
2690 			u32 hsdiv;
2691 
2692 			if (dco < dco_min_freq || dco > dco_max_freq)
2693 				continue;
2694 
2695 			if (div2 >= 2) {
2696 				/*
2697 				 * Note: a_divratio not matching TGL BSpec
2698 				 * algorithm but matching hardcoded values and
2699 				 * working on HW for DP alt-mode at least
2700 				 */
2701 				a_divratio = is_dp ? 10 : 5;
2702 				tlinedrv = is_dkl ? 1 : 2;
2703 			} else {
2704 				a_divratio = 5;
2705 				tlinedrv = 0;
2706 			}
2707 			inputsel = is_dp ? 0 : 1;
2708 
2709 			switch (div1) {
2710 			default:
2711 				MISSING_CASE(div1);
2712 				/* fall through */
2713 			case 2:
2714 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2;
2715 				break;
2716 			case 3:
2717 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3;
2718 				break;
2719 			case 5:
2720 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5;
2721 				break;
2722 			case 7:
2723 				hsdiv = MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7;
2724 				break;
2725 			}
2726 
2727 			*target_dco_khz = dco;
2728 
2729 			state->mg_refclkin_ctl = MG_REFCLKIN_CTL_OD_2_MUX(1);
2730 
2731 			state->mg_clktop2_coreclkctl1 =
2732 				MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(a_divratio);
2733 
2734 			state->mg_clktop2_hsclkctl =
2735 				MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(tlinedrv) |
2736 				MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(inputsel) |
2737 				hsdiv |
2738 				MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
2739 
2740 			return true;
2741 		}
2742 	}
2743 
2744 	return false;
2745 }
2746 
2747 /*
2748  * The specification for this function uses real numbers, so the math had to be
2749  * adapted to integer-only calculation, that's why it looks so different.
2750  */
icl_calc_mg_pll_state(struct intel_crtc_state * crtc_state,struct intel_dpll_hw_state * pll_state)2751 static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
2752 				  struct intel_dpll_hw_state *pll_state)
2753 {
2754 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2755 	int refclk_khz = dev_priv->cdclk.hw.ref;
2756 	int clock = crtc_state->port_clock;
2757 	u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
2758 	u32 iref_ndiv, iref_trim, iref_pulse_w;
2759 	u32 prop_coeff, int_coeff;
2760 	u32 tdc_targetcnt, feedfwgain;
2761 	u64 ssc_stepsize, ssc_steplen, ssc_steplog;
2762 	u64 tmp;
2763 	bool use_ssc = false;
2764 	bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
2765 	bool is_dkl = INTEL_GEN(dev_priv) >= 12;
2766 
2767 	memset(pll_state, 0, sizeof(*pll_state));
2768 
2769 	if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
2770 				      pll_state, is_dkl)) {
2771 		DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
2772 		return false;
2773 	}
2774 
2775 	m1div = 2;
2776 	m2div_int = dco_khz / (refclk_khz * m1div);
2777 	if (m2div_int > 255) {
2778 		if (!is_dkl) {
2779 			m1div = 4;
2780 			m2div_int = dco_khz / (refclk_khz * m1div);
2781 		}
2782 
2783 		if (m2div_int > 255) {
2784 			DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
2785 				      clock);
2786 			return false;
2787 		}
2788 	}
2789 	m2div_rem = dco_khz % (refclk_khz * m1div);
2790 
2791 	tmp = (u64)m2div_rem * (1 << 22);
2792 	do_div(tmp, refclk_khz * m1div);
2793 	m2div_frac = tmp;
2794 
2795 	switch (refclk_khz) {
2796 	case 19200:
2797 		iref_ndiv = 1;
2798 		iref_trim = 28;
2799 		iref_pulse_w = 1;
2800 		break;
2801 	case 24000:
2802 		iref_ndiv = 1;
2803 		iref_trim = 25;
2804 		iref_pulse_w = 2;
2805 		break;
2806 	case 38400:
2807 		iref_ndiv = 2;
2808 		iref_trim = 28;
2809 		iref_pulse_w = 1;
2810 		break;
2811 	default:
2812 		MISSING_CASE(refclk_khz);
2813 		return false;
2814 	}
2815 
2816 	/*
2817 	 * tdc_res = 0.000003
2818 	 * tdc_targetcnt = int(2 / (tdc_res * 8 * 50 * 1.1) / refclk_mhz + 0.5)
2819 	 *
2820 	 * The multiplication by 1000 is due to refclk MHz to KHz conversion. It
2821 	 * was supposed to be a division, but we rearranged the operations of
2822 	 * the formula to avoid early divisions so we don't multiply the
2823 	 * rounding errors.
2824 	 *
2825 	 * 0.000003 * 8 * 50 * 1.1 = 0.00132, also known as 132 / 100000, which
2826 	 * we also rearrange to work with integers.
2827 	 *
2828 	 * The 0.5 transformed to 5 results in a multiplication by 10 and the
2829 	 * last division by 10.
2830 	 */
2831 	tdc_targetcnt = (2 * 1000 * 100000 * 10 / (132 * refclk_khz) + 5) / 10;
2832 
2833 	/*
2834 	 * Here we divide dco_khz by 10 in order to allow the dividend to fit in
2835 	 * 32 bits. That's not a problem since we round the division down
2836 	 * anyway.
2837 	 */
2838 	feedfwgain = (use_ssc || m2div_rem > 0) ?
2839 		m1div * 1000000 * 100 / (dco_khz * 3 / 10) : 0;
2840 
2841 	if (dco_khz >= 9000000) {
2842 		prop_coeff = 5;
2843 		int_coeff = 10;
2844 	} else {
2845 		prop_coeff = 4;
2846 		int_coeff = 8;
2847 	}
2848 
2849 	if (use_ssc) {
2850 		tmp = mul_u32_u32(dco_khz, 47 * 32);
2851 		do_div(tmp, refclk_khz * m1div * 10000);
2852 		ssc_stepsize = tmp;
2853 
2854 		tmp = mul_u32_u32(dco_khz, 1000);
2855 		ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
2856 	} else {
2857 		ssc_stepsize = 0;
2858 		ssc_steplen = 0;
2859 	}
2860 	ssc_steplog = 4;
2861 
2862 	/* write pll_state calculations */
2863 	if (is_dkl) {
2864 		pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) |
2865 					 DKL_PLL_DIV0_PROP_COEFF(prop_coeff) |
2866 					 DKL_PLL_DIV0_FBPREDIV(m1div) |
2867 					 DKL_PLL_DIV0_FBDIV_INT(m2div_int);
2868 
2869 		pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) |
2870 					 DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt);
2871 
2872 		pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) |
2873 					DKL_PLL_SSC_STEP_LEN(ssc_steplen) |
2874 					DKL_PLL_SSC_STEP_NUM(ssc_steplog) |
2875 					(use_ssc ? DKL_PLL_SSC_EN : 0);
2876 
2877 		pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) |
2878 					  DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac);
2879 
2880 		pll_state->mg_pll_tdc_coldst_bias =
2881 				DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) |
2882 				DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain);
2883 
2884 	} else {
2885 		pll_state->mg_pll_div0 =
2886 			(m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) |
2887 			MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) |
2888 			MG_PLL_DIV0_FBDIV_INT(m2div_int);
2889 
2890 		pll_state->mg_pll_div1 =
2891 			MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) |
2892 			MG_PLL_DIV1_DITHER_DIV_2 |
2893 			MG_PLL_DIV1_NDIVRATIO(1) |
2894 			MG_PLL_DIV1_FBPREDIV(m1div);
2895 
2896 		pll_state->mg_pll_lf =
2897 			MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) |
2898 			MG_PLL_LF_AFCCNTSEL_512 |
2899 			MG_PLL_LF_GAINCTRL(1) |
2900 			MG_PLL_LF_INT_COEFF(int_coeff) |
2901 			MG_PLL_LF_PROP_COEFF(prop_coeff);
2902 
2903 		pll_state->mg_pll_frac_lock =
2904 			MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 |
2905 			MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 |
2906 			MG_PLL_FRAC_LOCK_LOCKTHRESH(10) |
2907 			MG_PLL_FRAC_LOCK_DCODITHEREN |
2908 			MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain);
2909 		if (use_ssc || m2div_rem > 0)
2910 			pll_state->mg_pll_frac_lock |=
2911 				MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN;
2912 
2913 		pll_state->mg_pll_ssc =
2914 			(use_ssc ? MG_PLL_SSC_EN : 0) |
2915 			MG_PLL_SSC_TYPE(2) |
2916 			MG_PLL_SSC_STEPLENGTH(ssc_steplen) |
2917 			MG_PLL_SSC_STEPNUM(ssc_steplog) |
2918 			MG_PLL_SSC_FLLEN |
2919 			MG_PLL_SSC_STEPSIZE(ssc_stepsize);
2920 
2921 		pll_state->mg_pll_tdc_coldst_bias =
2922 			MG_PLL_TDC_COLDST_COLDSTART |
2923 			MG_PLL_TDC_COLDST_IREFINT_EN |
2924 			MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) |
2925 			MG_PLL_TDC_TDCOVCCORR_EN |
2926 			MG_PLL_TDC_TDCSEL(3);
2927 
2928 		pll_state->mg_pll_bias =
2929 			MG_PLL_BIAS_BIAS_GB_SEL(3) |
2930 			MG_PLL_BIAS_INIT_DCOAMP(0x3F) |
2931 			MG_PLL_BIAS_BIAS_BONUS(10) |
2932 			MG_PLL_BIAS_BIASCAL_EN |
2933 			MG_PLL_BIAS_CTRIM(12) |
2934 			MG_PLL_BIAS_VREF_RDAC(4) |
2935 			MG_PLL_BIAS_IREFTRIM(iref_trim);
2936 
2937 		if (refclk_khz == 38400) {
2938 			pll_state->mg_pll_tdc_coldst_bias_mask =
2939 				MG_PLL_TDC_COLDST_COLDSTART;
2940 			pll_state->mg_pll_bias_mask = 0;
2941 		} else {
2942 			pll_state->mg_pll_tdc_coldst_bias_mask = -1U;
2943 			pll_state->mg_pll_bias_mask = -1U;
2944 		}
2945 
2946 		pll_state->mg_pll_tdc_coldst_bias &=
2947 			pll_state->mg_pll_tdc_coldst_bias_mask;
2948 		pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
2949 	}
2950 
2951 	return true;
2952 }
2953 
2954 /**
2955  * icl_set_active_port_dpll - select the active port DPLL for a given CRTC
2956  * @crtc_state: state for the CRTC to select the DPLL for
2957  * @port_dpll_id: the active @port_dpll_id to select
2958  *
2959  * Select the given @port_dpll_id instance from the DPLLs reserved for the
2960  * CRTC.
2961  */
icl_set_active_port_dpll(struct intel_crtc_state * crtc_state,enum icl_port_dpll_id port_dpll_id)2962 void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
2963 			      enum icl_port_dpll_id port_dpll_id)
2964 {
2965 	struct icl_port_dpll *port_dpll =
2966 		&crtc_state->icl_port_dplls[port_dpll_id];
2967 
2968 	crtc_state->shared_dpll = port_dpll->pll;
2969 	crtc_state->dpll_hw_state = port_dpll->hw_state;
2970 }
2971 
icl_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2972 static void icl_update_active_dpll(struct intel_atomic_state *state,
2973 				   struct intel_crtc *crtc,
2974 				   struct intel_encoder *encoder)
2975 {
2976 	struct intel_crtc_state *crtc_state =
2977 		intel_atomic_get_new_crtc_state(state, crtc);
2978 	struct intel_digital_port *primary_port;
2979 	enum icl_port_dpll_id port_dpll_id = ICL_PORT_DPLL_DEFAULT;
2980 
2981 	primary_port = encoder->type == INTEL_OUTPUT_DP_MST ?
2982 		enc_to_mst(encoder)->primary :
2983 		enc_to_dig_port(encoder);
2984 
2985 	if (primary_port &&
2986 	    (primary_port->tc_mode == TC_PORT_DP_ALT ||
2987 	     primary_port->tc_mode == TC_PORT_LEGACY))
2988 		port_dpll_id = ICL_PORT_DPLL_MG_PHY;
2989 
2990 	icl_set_active_port_dpll(crtc_state, port_dpll_id);
2991 }
2992 
icl_get_combo_phy_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)2993 static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
2994 				   struct intel_crtc *crtc,
2995 				   struct intel_encoder *encoder)
2996 {
2997 	struct intel_crtc_state *crtc_state =
2998 		intel_atomic_get_new_crtc_state(state, crtc);
2999 	struct icl_port_dpll *port_dpll =
3000 		&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3001 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3002 	enum port port = encoder->port;
3003 	unsigned long dpll_mask;
3004 
3005 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3006 		DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
3007 
3008 		return false;
3009 	}
3010 
3011 	if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A)
3012 		dpll_mask =
3013 			BIT(DPLL_ID_EHL_DPLL4) |
3014 			BIT(DPLL_ID_ICL_DPLL1) |
3015 			BIT(DPLL_ID_ICL_DPLL0);
3016 	else
3017 		dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0);
3018 
3019 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3020 						&port_dpll->hw_state,
3021 						dpll_mask);
3022 	if (!port_dpll->pll) {
3023 		DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
3024 			      encoder->base.base.id, encoder->base.name);
3025 		return false;
3026 	}
3027 
3028 	intel_reference_shared_dpll(state, crtc,
3029 				    port_dpll->pll, &port_dpll->hw_state);
3030 
3031 	icl_update_active_dpll(state, crtc, encoder);
3032 
3033 	return true;
3034 }
3035 
icl_get_tc_phy_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3036 static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
3037 				 struct intel_crtc *crtc,
3038 				 struct intel_encoder *encoder)
3039 {
3040 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3041 	struct intel_crtc_state *crtc_state =
3042 		intel_atomic_get_new_crtc_state(state, crtc);
3043 	struct icl_port_dpll *port_dpll;
3044 	enum intel_dpll_id dpll_id;
3045 
3046 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3047 	if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
3048 		DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
3049 		return false;
3050 	}
3051 
3052 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3053 						&port_dpll->hw_state,
3054 						BIT(DPLL_ID_ICL_TBTPLL));
3055 	if (!port_dpll->pll) {
3056 		DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
3057 		return false;
3058 	}
3059 	intel_reference_shared_dpll(state, crtc,
3060 				    port_dpll->pll, &port_dpll->hw_state);
3061 
3062 
3063 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
3064 	if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
3065 		DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
3066 		goto err_unreference_tbt_pll;
3067 	}
3068 
3069 	dpll_id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
3070 							 encoder->port));
3071 	port_dpll->pll = intel_find_shared_dpll(state, crtc,
3072 						&port_dpll->hw_state,
3073 						BIT(dpll_id));
3074 	if (!port_dpll->pll) {
3075 		DRM_DEBUG_KMS("No MG PHY PLL found\n");
3076 		goto err_unreference_tbt_pll;
3077 	}
3078 	intel_reference_shared_dpll(state, crtc,
3079 				    port_dpll->pll, &port_dpll->hw_state);
3080 
3081 	icl_update_active_dpll(state, crtc, encoder);
3082 
3083 	return true;
3084 
3085 err_unreference_tbt_pll:
3086 	port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
3087 	intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
3088 
3089 	return false;
3090 }
3091 
icl_get_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3092 static bool icl_get_dplls(struct intel_atomic_state *state,
3093 			  struct intel_crtc *crtc,
3094 			  struct intel_encoder *encoder)
3095 {
3096 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3097 	enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
3098 
3099 	if (intel_phy_is_combo(dev_priv, phy))
3100 		return icl_get_combo_phy_dpll(state, crtc, encoder);
3101 	else if (intel_phy_is_tc(dev_priv, phy))
3102 		return icl_get_tc_phy_dplls(state, crtc, encoder);
3103 
3104 	MISSING_CASE(phy);
3105 
3106 	return false;
3107 }
3108 
icl_put_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3109 static void icl_put_dplls(struct intel_atomic_state *state,
3110 			  struct intel_crtc *crtc)
3111 {
3112 	const struct intel_crtc_state *old_crtc_state =
3113 		intel_atomic_get_old_crtc_state(state, crtc);
3114 	struct intel_crtc_state *new_crtc_state =
3115 		intel_atomic_get_new_crtc_state(state, crtc);
3116 	enum icl_port_dpll_id id;
3117 
3118 	new_crtc_state->shared_dpll = NULL;
3119 
3120 	for (id = ICL_PORT_DPLL_DEFAULT; id < ICL_PORT_DPLL_COUNT; id++) {
3121 		const struct icl_port_dpll *old_port_dpll =
3122 			&old_crtc_state->icl_port_dplls[id];
3123 		struct icl_port_dpll *new_port_dpll =
3124 			&new_crtc_state->icl_port_dplls[id];
3125 
3126 		new_port_dpll->pll = NULL;
3127 
3128 		if (!old_port_dpll->pll)
3129 			continue;
3130 
3131 		intel_unreference_shared_dpll(state, crtc, old_port_dpll->pll);
3132 	}
3133 }
3134 
mg_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3135 static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
3136 				struct intel_shared_dpll *pll,
3137 				struct intel_dpll_hw_state *hw_state)
3138 {
3139 	const enum intel_dpll_id id = pll->info->id;
3140 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3141 	intel_wakeref_t wakeref;
3142 	bool ret = false;
3143 	u32 val;
3144 
3145 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3146 						     POWER_DOMAIN_DISPLAY_CORE);
3147 	if (!wakeref)
3148 		return false;
3149 
3150 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3151 	if (!(val & PLL_ENABLE))
3152 		goto out;
3153 
3154 	hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
3155 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3156 
3157 	hw_state->mg_clktop2_coreclkctl1 =
3158 		I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3159 	hw_state->mg_clktop2_coreclkctl1 &=
3160 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3161 
3162 	hw_state->mg_clktop2_hsclkctl =
3163 		I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3164 	hw_state->mg_clktop2_hsclkctl &=
3165 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3166 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3167 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3168 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3169 
3170 	hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
3171 	hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
3172 	hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
3173 	hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
3174 	hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
3175 
3176 	hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
3177 	hw_state->mg_pll_tdc_coldst_bias =
3178 		I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3179 
3180 	if (dev_priv->cdclk.hw.ref == 38400) {
3181 		hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
3182 		hw_state->mg_pll_bias_mask = 0;
3183 	} else {
3184 		hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
3185 		hw_state->mg_pll_bias_mask = -1U;
3186 	}
3187 
3188 	hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
3189 	hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
3190 
3191 	ret = true;
3192 out:
3193 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3194 	return ret;
3195 }
3196 
dkl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3197 static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3198 				 struct intel_shared_dpll *pll,
3199 				 struct intel_dpll_hw_state *hw_state)
3200 {
3201 	const enum intel_dpll_id id = pll->info->id;
3202 	enum tc_port tc_port = icl_pll_id_to_tc_port(id);
3203 	intel_wakeref_t wakeref;
3204 	bool ret = false;
3205 	u32 val;
3206 
3207 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3208 						     POWER_DOMAIN_DISPLAY_CORE);
3209 	if (!wakeref)
3210 		return false;
3211 
3212 	val = I915_READ(MG_PLL_ENABLE(tc_port));
3213 	if (!(val & PLL_ENABLE))
3214 		goto out;
3215 
3216 	/*
3217 	 * All registers read here have the same HIP_INDEX_REG even though
3218 	 * they are on different building blocks
3219 	 */
3220 	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
3221 
3222 	hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
3223 	hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3224 
3225 	hw_state->mg_clktop2_hsclkctl =
3226 		I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
3227 	hw_state->mg_clktop2_hsclkctl &=
3228 		MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3229 		MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3230 		MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3231 		MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
3232 
3233 	hw_state->mg_clktop2_coreclkctl1 =
3234 		I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
3235 	hw_state->mg_clktop2_coreclkctl1 &=
3236 		MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3237 
3238 	hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
3239 	hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
3240 				  DKL_PLL_DIV0_PROP_COEFF_MASK |
3241 				  DKL_PLL_DIV0_FBPREDIV_MASK |
3242 				  DKL_PLL_DIV0_FBDIV_INT_MASK);
3243 
3244 	hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
3245 	hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
3246 				  DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3247 
3248 	hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
3249 	hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3250 				 DKL_PLL_SSC_STEP_LEN_MASK |
3251 				 DKL_PLL_SSC_STEP_NUM_MASK |
3252 				 DKL_PLL_SSC_EN);
3253 
3254 	hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
3255 	hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
3256 				  DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3257 
3258 	hw_state->mg_pll_tdc_coldst_bias =
3259 		I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3260 	hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3261 					     DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3262 
3263 	ret = true;
3264 out:
3265 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3266 	return ret;
3267 }
3268 
icl_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state,i915_reg_t enable_reg)3269 static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
3270 				 struct intel_shared_dpll *pll,
3271 				 struct intel_dpll_hw_state *hw_state,
3272 				 i915_reg_t enable_reg)
3273 {
3274 	const enum intel_dpll_id id = pll->info->id;
3275 	intel_wakeref_t wakeref;
3276 	bool ret = false;
3277 	u32 val;
3278 
3279 	wakeref = intel_display_power_get_if_enabled(dev_priv,
3280 						     POWER_DOMAIN_DISPLAY_CORE);
3281 	if (!wakeref)
3282 		return false;
3283 
3284 	val = I915_READ(enable_reg);
3285 	if (!(val & PLL_ENABLE))
3286 		goto out;
3287 
3288 	if (INTEL_GEN(dev_priv) >= 12) {
3289 		hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
3290 		hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
3291 	} else {
3292 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3293 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
3294 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
3295 		} else {
3296 			hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
3297 			hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
3298 		}
3299 	}
3300 
3301 	ret = true;
3302 out:
3303 	intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
3304 	return ret;
3305 }
3306 
combo_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3307 static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
3308 				   struct intel_shared_dpll *pll,
3309 				   struct intel_dpll_hw_state *hw_state)
3310 {
3311 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3312 
3313 	if (IS_ELKHARTLAKE(dev_priv) &&
3314 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3315 		enable_reg = MG_PLL_ENABLE(0);
3316 	}
3317 
3318 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, enable_reg);
3319 }
3320 
tbt_pll_get_hw_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_dpll_hw_state * hw_state)3321 static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
3322 				 struct intel_shared_dpll *pll,
3323 				 struct intel_dpll_hw_state *hw_state)
3324 {
3325 	return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
3326 }
3327 
icl_dpll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3328 static void icl_dpll_write(struct drm_i915_private *dev_priv,
3329 			   struct intel_shared_dpll *pll)
3330 {
3331 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3332 	const enum intel_dpll_id id = pll->info->id;
3333 	i915_reg_t cfgcr0_reg, cfgcr1_reg;
3334 
3335 	if (INTEL_GEN(dev_priv) >= 12) {
3336 		cfgcr0_reg = TGL_DPLL_CFGCR0(id);
3337 		cfgcr1_reg = TGL_DPLL_CFGCR1(id);
3338 	} else {
3339 		if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
3340 			cfgcr0_reg = ICL_DPLL_CFGCR0(4);
3341 			cfgcr1_reg = ICL_DPLL_CFGCR1(4);
3342 		} else {
3343 			cfgcr0_reg = ICL_DPLL_CFGCR0(id);
3344 			cfgcr1_reg = ICL_DPLL_CFGCR1(id);
3345 		}
3346 	}
3347 
3348 	I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
3349 	I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
3350 	POSTING_READ(cfgcr1_reg);
3351 }
3352 
icl_mg_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3353 static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
3354 			     struct intel_shared_dpll *pll)
3355 {
3356 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3357 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3358 	u32 val;
3359 
3360 	/*
3361 	 * Some of the following registers have reserved fields, so program
3362 	 * these with RMW based on a mask. The mask can be fixed or generated
3363 	 * during the calc/readout phase if the mask depends on some other HW
3364 	 * state like refclk, see icl_calc_mg_pll_state().
3365 	 */
3366 	val = I915_READ(MG_REFCLKIN_CTL(tc_port));
3367 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3368 	val |= hw_state->mg_refclkin_ctl;
3369 	I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
3370 
3371 	val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
3372 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3373 	val |= hw_state->mg_clktop2_coreclkctl1;
3374 	I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
3375 
3376 	val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
3377 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3378 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3379 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3380 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3381 	val |= hw_state->mg_clktop2_hsclkctl;
3382 	I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
3383 
3384 	I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
3385 	I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
3386 	I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
3387 	I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
3388 	I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
3389 
3390 	val = I915_READ(MG_PLL_BIAS(tc_port));
3391 	val &= ~hw_state->mg_pll_bias_mask;
3392 	val |= hw_state->mg_pll_bias;
3393 	I915_WRITE(MG_PLL_BIAS(tc_port), val);
3394 
3395 	val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3396 	val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
3397 	val |= hw_state->mg_pll_tdc_coldst_bias;
3398 	I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
3399 
3400 	POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
3401 }
3402 
dkl_pll_write(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3403 static void dkl_pll_write(struct drm_i915_private *dev_priv,
3404 			  struct intel_shared_dpll *pll)
3405 {
3406 	struct intel_dpll_hw_state *hw_state = &pll->state.hw_state;
3407 	enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id);
3408 	u32 val;
3409 
3410 	/*
3411 	 * All registers programmed here have the same HIP_INDEX_REG even
3412 	 * though on different building block
3413 	 */
3414 	I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
3415 
3416 	/* All the registers are RMW */
3417 	val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
3418 	val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
3419 	val |= hw_state->mg_refclkin_ctl;
3420 	I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
3421 
3422 	val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
3423 	val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
3424 	val |= hw_state->mg_clktop2_coreclkctl1;
3425 	I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
3426 
3427 	val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
3428 	val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
3429 		 MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
3430 		 MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
3431 		 MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
3432 	val |= hw_state->mg_clktop2_hsclkctl;
3433 	I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
3434 
3435 	val = I915_READ(DKL_PLL_DIV0(tc_port));
3436 	val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
3437 		 DKL_PLL_DIV0_PROP_COEFF_MASK |
3438 		 DKL_PLL_DIV0_FBPREDIV_MASK |
3439 		 DKL_PLL_DIV0_FBDIV_INT_MASK);
3440 	val |= hw_state->mg_pll_div0;
3441 	I915_WRITE(DKL_PLL_DIV0(tc_port), val);
3442 
3443 	val = I915_READ(DKL_PLL_DIV1(tc_port));
3444 	val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
3445 		 DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
3446 	val |= hw_state->mg_pll_div1;
3447 	I915_WRITE(DKL_PLL_DIV1(tc_port), val);
3448 
3449 	val = I915_READ(DKL_PLL_SSC(tc_port));
3450 	val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
3451 		 DKL_PLL_SSC_STEP_LEN_MASK |
3452 		 DKL_PLL_SSC_STEP_NUM_MASK |
3453 		 DKL_PLL_SSC_EN);
3454 	val |= hw_state->mg_pll_ssc;
3455 	I915_WRITE(DKL_PLL_SSC(tc_port), val);
3456 
3457 	val = I915_READ(DKL_PLL_BIAS(tc_port));
3458 	val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
3459 		 DKL_PLL_BIAS_FBDIV_FRAC_MASK);
3460 	val |= hw_state->mg_pll_bias;
3461 	I915_WRITE(DKL_PLL_BIAS(tc_port), val);
3462 
3463 	val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3464 	val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
3465 		 DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
3466 	val |= hw_state->mg_pll_tdc_coldst_bias;
3467 	I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
3468 
3469 	POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
3470 }
3471 
icl_pll_power_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3472 static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
3473 				 struct intel_shared_dpll *pll,
3474 				 i915_reg_t enable_reg)
3475 {
3476 	u32 val;
3477 
3478 	val = I915_READ(enable_reg);
3479 	val |= PLL_POWER_ENABLE;
3480 	I915_WRITE(enable_reg, val);
3481 
3482 	/*
3483 	 * The spec says we need to "wait" but it also says it should be
3484 	 * immediate.
3485 	 */
3486 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3487 		DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
3488 }
3489 
icl_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3490 static void icl_pll_enable(struct drm_i915_private *dev_priv,
3491 			   struct intel_shared_dpll *pll,
3492 			   i915_reg_t enable_reg)
3493 {
3494 	u32 val;
3495 
3496 	val = I915_READ(enable_reg);
3497 	val |= PLL_ENABLE;
3498 	I915_WRITE(enable_reg, val);
3499 
3500 	/* Timeout is actually 600us. */
3501 	if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
3502 		DRM_ERROR("PLL %d not locked\n", pll->info->id);
3503 }
3504 
combo_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3505 static void combo_pll_enable(struct drm_i915_private *dev_priv,
3506 			     struct intel_shared_dpll *pll)
3507 {
3508 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3509 
3510 	if (IS_ELKHARTLAKE(dev_priv) &&
3511 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3512 		enable_reg = MG_PLL_ENABLE(0);
3513 
3514 		/*
3515 		 * We need to disable DC states when this DPLL is enabled.
3516 		 * This can be done by taking a reference on DPLL4 power
3517 		 * domain.
3518 		 */
3519 		pll->wakeref = intel_display_power_get(dev_priv,
3520 						       POWER_DOMAIN_DPLL_DC_OFF);
3521 	}
3522 
3523 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3524 
3525 	icl_dpll_write(dev_priv, pll);
3526 
3527 	/*
3528 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3529 	 * paths should already be setting the appropriate voltage, hence we do
3530 	 * nothing here.
3531 	 */
3532 
3533 	icl_pll_enable(dev_priv, pll, enable_reg);
3534 
3535 	/* DVFS post sequence would be here. See the comment above. */
3536 }
3537 
tbt_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3538 static void tbt_pll_enable(struct drm_i915_private *dev_priv,
3539 			   struct intel_shared_dpll *pll)
3540 {
3541 	icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
3542 
3543 	icl_dpll_write(dev_priv, pll);
3544 
3545 	/*
3546 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3547 	 * paths should already be setting the appropriate voltage, hence we do
3548 	 * nothing here.
3549 	 */
3550 
3551 	icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
3552 
3553 	/* DVFS post sequence would be here. See the comment above. */
3554 }
3555 
mg_pll_enable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3556 static void mg_pll_enable(struct drm_i915_private *dev_priv,
3557 			  struct intel_shared_dpll *pll)
3558 {
3559 	i915_reg_t enable_reg =
3560 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3561 
3562 	icl_pll_power_enable(dev_priv, pll, enable_reg);
3563 
3564 	if (INTEL_GEN(dev_priv) >= 12)
3565 		dkl_pll_write(dev_priv, pll);
3566 	else
3567 		icl_mg_pll_write(dev_priv, pll);
3568 
3569 	/*
3570 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3571 	 * paths should already be setting the appropriate voltage, hence we do
3572 	 * nothing here.
3573 	 */
3574 
3575 	icl_pll_enable(dev_priv, pll, enable_reg);
3576 
3577 	/* DVFS post sequence would be here. See the comment above. */
3578 }
3579 
icl_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,i915_reg_t enable_reg)3580 static void icl_pll_disable(struct drm_i915_private *dev_priv,
3581 			    struct intel_shared_dpll *pll,
3582 			    i915_reg_t enable_reg)
3583 {
3584 	u32 val;
3585 
3586 	/* The first steps are done by intel_ddi_post_disable(). */
3587 
3588 	/*
3589 	 * DVFS pre sequence would be here, but in our driver the cdclk code
3590 	 * paths should already be setting the appropriate voltage, hence we do
3591 	 * nothign here.
3592 	 */
3593 
3594 	val = I915_READ(enable_reg);
3595 	val &= ~PLL_ENABLE;
3596 	I915_WRITE(enable_reg, val);
3597 
3598 	/* Timeout is actually 1us. */
3599 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
3600 		DRM_ERROR("PLL %d locked\n", pll->info->id);
3601 
3602 	/* DVFS post sequence would be here. See the comment above. */
3603 
3604 	val = I915_READ(enable_reg);
3605 	val &= ~PLL_POWER_ENABLE;
3606 	I915_WRITE(enable_reg, val);
3607 
3608 	/*
3609 	 * The spec says we need to "wait" but it also says it should be
3610 	 * immediate.
3611 	 */
3612 	if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
3613 		DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
3614 }
3615 
combo_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3616 static void combo_pll_disable(struct drm_i915_private *dev_priv,
3617 			      struct intel_shared_dpll *pll)
3618 {
3619 	i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
3620 
3621 	if (IS_ELKHARTLAKE(dev_priv) &&
3622 	    pll->info->id == DPLL_ID_EHL_DPLL4) {
3623 		enable_reg = MG_PLL_ENABLE(0);
3624 		icl_pll_disable(dev_priv, pll, enable_reg);
3625 
3626 		intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF,
3627 					pll->wakeref);
3628 		return;
3629 	}
3630 
3631 	icl_pll_disable(dev_priv, pll, enable_reg);
3632 }
3633 
tbt_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3634 static void tbt_pll_disable(struct drm_i915_private *dev_priv,
3635 			    struct intel_shared_dpll *pll)
3636 {
3637 	icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
3638 }
3639 
mg_pll_disable(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll)3640 static void mg_pll_disable(struct drm_i915_private *dev_priv,
3641 			   struct intel_shared_dpll *pll)
3642 {
3643 	i915_reg_t enable_reg =
3644 		MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
3645 
3646 	icl_pll_disable(dev_priv, pll, enable_reg);
3647 }
3648 
icl_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)3649 static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
3650 			      const struct intel_dpll_hw_state *hw_state)
3651 {
3652 	DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
3653 		      "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
3654 		      "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
3655 		      "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
3656 		      "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
3657 		      "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
3658 		      hw_state->cfgcr0, hw_state->cfgcr1,
3659 		      hw_state->mg_refclkin_ctl,
3660 		      hw_state->mg_clktop2_coreclkctl1,
3661 		      hw_state->mg_clktop2_hsclkctl,
3662 		      hw_state->mg_pll_div0,
3663 		      hw_state->mg_pll_div1,
3664 		      hw_state->mg_pll_lf,
3665 		      hw_state->mg_pll_frac_lock,
3666 		      hw_state->mg_pll_ssc,
3667 		      hw_state->mg_pll_bias,
3668 		      hw_state->mg_pll_tdc_coldst_bias);
3669 }
3670 
3671 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
3672 	.enable = combo_pll_enable,
3673 	.disable = combo_pll_disable,
3674 	.get_hw_state = combo_pll_get_hw_state,
3675 };
3676 
3677 static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
3678 	.enable = tbt_pll_enable,
3679 	.disable = tbt_pll_disable,
3680 	.get_hw_state = tbt_pll_get_hw_state,
3681 };
3682 
3683 static const struct intel_shared_dpll_funcs mg_pll_funcs = {
3684 	.enable = mg_pll_enable,
3685 	.disable = mg_pll_disable,
3686 	.get_hw_state = mg_pll_get_hw_state,
3687 };
3688 
3689 static const struct dpll_info icl_plls[] = {
3690 	{ "DPLL 0",   &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3691 	{ "DPLL 1",   &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3692 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3693 	{ "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3694 	{ "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3695 	{ "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3696 	{ "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3697 	{ },
3698 };
3699 
3700 static const struct intel_dpll_mgr icl_pll_mgr = {
3701 	.dpll_info = icl_plls,
3702 	.get_dplls = icl_get_dplls,
3703 	.put_dplls = icl_put_dplls,
3704 	.update_active_dpll = icl_update_active_dpll,
3705 	.dump_hw_state = icl_dump_hw_state,
3706 };
3707 
3708 static const struct dpll_info ehl_plls[] = {
3709 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
3710 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
3711 	{ "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
3712 	{ },
3713 };
3714 
3715 static const struct intel_dpll_mgr ehl_pll_mgr = {
3716 	.dpll_info = ehl_plls,
3717 	.get_dplls = icl_get_dplls,
3718 	.put_dplls = icl_put_dplls,
3719 	.dump_hw_state = icl_dump_hw_state,
3720 };
3721 
3722 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
3723 	.enable = mg_pll_enable,
3724 	.disable = mg_pll_disable,
3725 	.get_hw_state = dkl_pll_get_hw_state,
3726 };
3727 
3728 static const struct dpll_info tgl_plls[] = {
3729 	{ "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0,  0 },
3730 	{ "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1,  0 },
3731 	{ "TBT PLL",  &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
3732 	{ "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
3733 	{ "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
3734 	{ "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
3735 	{ "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
3736 	{ "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
3737 	{ "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
3738 	{ },
3739 };
3740 
3741 static const struct intel_dpll_mgr tgl_pll_mgr = {
3742 	.dpll_info = tgl_plls,
3743 	.get_dplls = icl_get_dplls,
3744 	.put_dplls = icl_put_dplls,
3745 	.update_active_dpll = icl_update_active_dpll,
3746 	.dump_hw_state = icl_dump_hw_state,
3747 };
3748 
3749 /**
3750  * intel_shared_dpll_init - Initialize shared DPLLs
3751  * @dev: drm device
3752  *
3753  * Initialize shared DPLLs for @dev.
3754  */
intel_shared_dpll_init(struct drm_device * dev)3755 void intel_shared_dpll_init(struct drm_device *dev)
3756 {
3757 	struct drm_i915_private *dev_priv = to_i915(dev);
3758 	const struct intel_dpll_mgr *dpll_mgr = NULL;
3759 	const struct dpll_info *dpll_info;
3760 	int i;
3761 
3762 	if (INTEL_GEN(dev_priv) >= 12)
3763 		dpll_mgr = &tgl_pll_mgr;
3764 	else if (IS_ELKHARTLAKE(dev_priv))
3765 		dpll_mgr = &ehl_pll_mgr;
3766 	else if (INTEL_GEN(dev_priv) >= 11)
3767 		dpll_mgr = &icl_pll_mgr;
3768 	else if (IS_CANNONLAKE(dev_priv))
3769 		dpll_mgr = &cnl_pll_mgr;
3770 	else if (IS_GEN9_BC(dev_priv))
3771 		dpll_mgr = &skl_pll_mgr;
3772 	else if (IS_GEN9_LP(dev_priv))
3773 		dpll_mgr = &bxt_pll_mgr;
3774 	else if (HAS_DDI(dev_priv))
3775 		dpll_mgr = &hsw_pll_mgr;
3776 	else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
3777 		dpll_mgr = &pch_pll_mgr;
3778 
3779 	if (!dpll_mgr) {
3780 		dev_priv->num_shared_dpll = 0;
3781 		return;
3782 	}
3783 
3784 	dpll_info = dpll_mgr->dpll_info;
3785 
3786 	for (i = 0; dpll_info[i].name; i++) {
3787 		WARN_ON(i != dpll_info[i].id);
3788 		dev_priv->shared_dplls[i].info = &dpll_info[i];
3789 	}
3790 
3791 	dev_priv->dpll_mgr = dpll_mgr;
3792 	dev_priv->num_shared_dpll = i;
3793 	mutex_init(&dev_priv->dpll_lock);
3794 
3795 	BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
3796 }
3797 
3798 void
intel_shared_dpll_cleanup(struct drm_device * dev)3799 intel_shared_dpll_cleanup(struct drm_device *dev)
3800 {
3801 	struct drm_i915_private *dev_priv = to_i915(dev);
3802 
3803 	mutex_destroy(&dev_priv->dpll_lock);
3804 }
3805 
3806 /**
3807  * intel_reserve_shared_dplls - reserve DPLLs for CRTC and encoder combination
3808  * @state: atomic state
3809  * @crtc: CRTC to reserve DPLLs for
3810  * @encoder: encoder
3811  *
3812  * This function reserves all required DPLLs for the given CRTC and encoder
3813  * combination in the current atomic commit @state and the new @crtc atomic
3814  * state.
3815  *
3816  * The new configuration in the atomic commit @state is made effective by
3817  * calling intel_shared_dpll_swap_state().
3818  *
3819  * The reserved DPLLs should be released by calling
3820  * intel_release_shared_dplls().
3821  *
3822  * Returns:
3823  * True if all required DPLLs were successfully reserved.
3824  */
intel_reserve_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3825 bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
3826 				struct intel_crtc *crtc,
3827 				struct intel_encoder *encoder)
3828 {
3829 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3830 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3831 
3832 	if (WARN_ON(!dpll_mgr))
3833 		return false;
3834 
3835 	return dpll_mgr->get_dplls(state, crtc, encoder);
3836 }
3837 
3838 /**
3839  * intel_release_shared_dplls - end use of DPLLs by CRTC in atomic state
3840  * @state: atomic state
3841  * @crtc: crtc from which the DPLLs are to be released
3842  *
3843  * This function releases all DPLLs reserved by intel_reserve_shared_dplls()
3844  * from the current atomic commit @state and the old @crtc atomic state.
3845  *
3846  * The new configuration in the atomic commit @state is made effective by
3847  * calling intel_shared_dpll_swap_state().
3848  */
intel_release_shared_dplls(struct intel_atomic_state * state,struct intel_crtc * crtc)3849 void intel_release_shared_dplls(struct intel_atomic_state *state,
3850 				struct intel_crtc *crtc)
3851 {
3852 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
3853 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3854 
3855 	/*
3856 	 * FIXME: this function is called for every platform having a
3857 	 * compute_clock hook, even though the platform doesn't yet support
3858 	 * the shared DPLL framework and intel_reserve_shared_dplls() is not
3859 	 * called on those.
3860 	 */
3861 	if (!dpll_mgr)
3862 		return;
3863 
3864 	dpll_mgr->put_dplls(state, crtc);
3865 }
3866 
3867 /**
3868  * intel_update_active_dpll - update the active DPLL for a CRTC/encoder
3869  * @state: atomic state
3870  * @crtc: the CRTC for which to update the active DPLL
3871  * @encoder: encoder determining the type of port DPLL
3872  *
3873  * Update the active DPLL for the given @crtc/@encoder in @crtc's atomic state,
3874  * from the port DPLLs reserved previously by intel_reserve_shared_dplls(). The
3875  * DPLL selected will be based on the current mode of the encoder's port.
3876  */
intel_update_active_dpll(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)3877 void intel_update_active_dpll(struct intel_atomic_state *state,
3878 			      struct intel_crtc *crtc,
3879 			      struct intel_encoder *encoder)
3880 {
3881 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3882 	const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
3883 
3884 	if (WARN_ON(!dpll_mgr))
3885 		return;
3886 
3887 	dpll_mgr->update_active_dpll(state, crtc, encoder);
3888 }
3889 
3890 /**
3891  * intel_shared_dpll_dump_hw_state - write hw_state to dmesg
3892  * @dev_priv: i915 drm device
3893  * @hw_state: hw state to be written to the log
3894  *
3895  * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
3896  */
intel_dpll_dump_hw_state(struct drm_i915_private * dev_priv,const struct intel_dpll_hw_state * hw_state)3897 void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
3898 			      const struct intel_dpll_hw_state *hw_state)
3899 {
3900 	if (dev_priv->dpll_mgr) {
3901 		dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
3902 	} else {
3903 		/* fallback for platforms that don't use the shared dpll
3904 		 * infrastructure
3905 		 */
3906 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
3907 			      "fp0: 0x%x, fp1: 0x%x\n",
3908 			      hw_state->dpll,
3909 			      hw_state->dpll_md,
3910 			      hw_state->fp0,
3911 			      hw_state->fp1);
3912 	}
3913 }
3914