xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_tc.c (revision 1ad61ae0a79a724d2d3ec69e69c8e1d1ff6b53a0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_de.h"
9 #include "intel_display.h"
10 #include "intel_display_power_map.h"
11 #include "intel_display_types.h"
12 #include "intel_dp_mst.h"
13 #include "intel_tc.h"
14 #include "intel_tc_phy_regs.h"
15 
16 static const char *tc_port_mode_name(enum tc_port_mode mode)
17 {
18 	static const char * const names[] = {
19 		[TC_PORT_DISCONNECTED] = "disconnected",
20 		[TC_PORT_TBT_ALT] = "tbt-alt",
21 		[TC_PORT_DP_ALT] = "dp-alt",
22 		[TC_PORT_LEGACY] = "legacy",
23 	};
24 
25 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
26 		mode = TC_PORT_DISCONNECTED;
27 
28 	return names[mode];
29 }
30 
31 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
32 				  enum tc_port_mode mode)
33 {
34 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
35 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
36 
37 	return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
38 }
39 
40 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
41 {
42 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
43 }
44 
45 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
46 {
47 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
48 }
49 
50 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
51 {
52 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
53 }
54 
55 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
56 {
57 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
58 
59 	return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
60 		IS_ALDERLAKE_P(i915);
61 }
62 
63 static enum intel_display_power_domain
64 tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
65 {
66 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
67 
68 	if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
69 		return POWER_DOMAIN_TC_COLD_OFF;
70 
71 	return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
72 }
73 
74 static intel_wakeref_t
75 tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
76 		      enum intel_display_power_domain *domain)
77 {
78 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
79 
80 	*domain = tc_cold_get_power_domain(dig_port, mode);
81 
82 	return intel_display_power_get(i915, *domain);
83 }
84 
85 static intel_wakeref_t
86 tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
87 {
88 	return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
89 }
90 
91 static void
92 tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
93 		intel_wakeref_t wakeref)
94 {
95 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
96 
97 	/*
98 	 * wakeref == -1, means some error happened saving save_depot_stack but
99 	 * power should still be put down and 0 is a invalid save_depot_stack
100 	 * id so can be used to skip it for non TC legacy ports.
101 	 */
102 	if (wakeref == 0)
103 		return;
104 
105 	intel_display_power_put(i915, domain, wakeref);
106 }
107 
108 static void
109 assert_tc_cold_blocked(struct intel_digital_port *dig_port)
110 {
111 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
112 	bool enabled;
113 
114 	enabled = intel_display_power_is_enabled(i915,
115 						 tc_cold_get_power_domain(dig_port,
116 									  dig_port->tc_mode));
117 	drm_WARN_ON(&i915->drm, !enabled);
118 }
119 
120 static enum intel_display_power_domain
121 tc_port_power_domain(struct intel_digital_port *dig_port)
122 {
123 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
124 	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
125 
126 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
127 }
128 
129 static void
130 assert_tc_port_power_enabled(struct intel_digital_port *dig_port)
131 {
132 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
133 
134 	drm_WARN_ON(&i915->drm,
135 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(dig_port)));
136 }
137 
138 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
139 {
140 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
141 	struct intel_uncore *uncore = &i915->uncore;
142 	u32 lane_mask;
143 
144 	lane_mask = intel_uncore_read(uncore,
145 				      PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
146 
147 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
148 	assert_tc_cold_blocked(dig_port);
149 
150 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
151 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
152 }
153 
154 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
155 {
156 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
157 	struct intel_uncore *uncore = &i915->uncore;
158 	u32 pin_mask;
159 
160 	pin_mask = intel_uncore_read(uncore,
161 				     PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
162 
163 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
164 	assert_tc_cold_blocked(dig_port);
165 
166 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
167 	       DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
168 }
169 
170 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
171 {
172 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
173 	intel_wakeref_t wakeref;
174 	u32 lane_mask;
175 
176 	if (dig_port->tc_mode != TC_PORT_DP_ALT)
177 		return 4;
178 
179 	assert_tc_cold_blocked(dig_port);
180 
181 	lane_mask = 0;
182 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
183 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
184 
185 	switch (lane_mask) {
186 	default:
187 		MISSING_CASE(lane_mask);
188 		fallthrough;
189 	case 0x1:
190 	case 0x2:
191 	case 0x4:
192 	case 0x8:
193 		return 1;
194 	case 0x3:
195 	case 0xc:
196 		return 2;
197 	case 0xf:
198 		return 4;
199 	}
200 }
201 
202 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
203 				      int required_lanes)
204 {
205 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
206 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
207 	struct intel_uncore *uncore = &i915->uncore;
208 	u32 val;
209 
210 	drm_WARN_ON(&i915->drm,
211 		    lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
212 
213 	assert_tc_cold_blocked(dig_port);
214 
215 	val = intel_uncore_read(uncore,
216 				PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
217 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
218 
219 	switch (required_lanes) {
220 	case 1:
221 		val |= lane_reversal ?
222 			DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) :
223 			DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx);
224 		break;
225 	case 2:
226 		val |= lane_reversal ?
227 			DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) :
228 			DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx);
229 		break;
230 	case 4:
231 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx);
232 		break;
233 	default:
234 		MISSING_CASE(required_lanes);
235 	}
236 
237 	intel_uncore_write(uncore,
238 			   PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
239 }
240 
241 static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
242 				      u32 live_status_mask)
243 {
244 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
245 	u32 valid_hpd_mask;
246 
247 	if (dig_port->tc_legacy_port)
248 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
249 	else
250 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
251 				 BIT(TC_PORT_TBT_ALT);
252 
253 	if (!(live_status_mask & ~valid_hpd_mask))
254 		return;
255 
256 	/* If live status mismatches the VBT flag, trust the live status. */
257 	drm_dbg_kms(&i915->drm,
258 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
259 		    dig_port->tc_port_name, live_status_mask, valid_hpd_mask);
260 
261 	dig_port->tc_legacy_port = !dig_port->tc_legacy_port;
262 }
263 
264 static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
265 {
266 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
267 	struct intel_uncore *uncore = &i915->uncore;
268 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
269 	u32 mask = 0;
270 	u32 val;
271 
272 	val = intel_uncore_read(uncore,
273 				PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
274 
275 	if (val == 0xffffffff) {
276 		drm_dbg_kms(&i915->drm,
277 			    "Port %s: PHY in TCCOLD, nothing connected\n",
278 			    dig_port->tc_port_name);
279 		return mask;
280 	}
281 
282 	if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx))
283 		mask |= BIT(TC_PORT_TBT_ALT);
284 	if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
285 		mask |= BIT(TC_PORT_DP_ALT);
286 
287 	if (intel_uncore_read(uncore, SDEISR) & isr_bit)
288 		mask |= BIT(TC_PORT_LEGACY);
289 
290 	/* The sink can be connected only in a single mode. */
291 	if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1))
292 		tc_port_fixup_legacy_flag(dig_port, mask);
293 
294 	return mask;
295 }
296 
297 static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
298 {
299 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
300 	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
301 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
302 	struct intel_uncore *uncore = &i915->uncore;
303 	u32 val, mask = 0;
304 
305 	/*
306 	 * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
307 	 * registers in IOM. Note that this doesn't apply to PHY and FIA
308 	 * registers.
309 	 */
310 	val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
311 	if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
312 		mask |= BIT(TC_PORT_DP_ALT);
313 	if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
314 		mask |= BIT(TC_PORT_TBT_ALT);
315 
316 	if (intel_uncore_read(uncore, SDEISR) & isr_bit)
317 		mask |= BIT(TC_PORT_LEGACY);
318 
319 	/* The sink can be connected only in a single mode. */
320 	if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
321 		tc_port_fixup_legacy_flag(dig_port, mask);
322 
323 	return mask;
324 }
325 
326 static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
327 {
328 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
329 
330 	if (IS_ALDERLAKE_P(i915))
331 		return adl_tc_port_live_status_mask(dig_port);
332 
333 	return icl_tc_port_live_status_mask(dig_port);
334 }
335 
336 /*
337  * Return the PHY status complete flag indicating that display can acquire the
338  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
339  * is connected and it's ready to switch the ownership to display. The flag
340  * will be left cleared when a TBT-alt sink is connected, where the PHY is
341  * owned by the TBT subsystem and so switching the ownership to display is not
342  * required.
343  */
344 static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
345 {
346 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
347 	struct intel_uncore *uncore = &i915->uncore;
348 	u32 val;
349 
350 	val = intel_uncore_read(uncore,
351 				PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
352 	if (val == 0xffffffff) {
353 		drm_dbg_kms(&i915->drm,
354 			    "Port %s: PHY in TCCOLD, assuming not complete\n",
355 			    dig_port->tc_port_name);
356 		return false;
357 	}
358 
359 	return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
360 }
361 
362 /*
363  * Return the PHY status complete flag indicating that display can acquire the
364  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
365  * the ownership to display, regardless of what sink is connected (TBT-alt,
366  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
367  * subsystem and so switching the ownership to display is not required.
368  */
369 static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
370 {
371 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
372 	enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
373 	struct intel_uncore *uncore = &i915->uncore;
374 	u32 val;
375 
376 	val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
377 	if (val == 0xffffffff) {
378 		drm_dbg_kms(&i915->drm,
379 			    "Port %s: PHY in TCCOLD, assuming not complete\n",
380 			    dig_port->tc_port_name);
381 		return false;
382 	}
383 
384 	return val & TCSS_DDI_STATUS_READY;
385 }
386 
387 static bool tc_phy_status_complete(struct intel_digital_port *dig_port)
388 {
389 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
390 
391 	if (IS_ALDERLAKE_P(i915))
392 		return adl_tc_phy_status_complete(dig_port);
393 
394 	return icl_tc_phy_status_complete(dig_port);
395 }
396 
397 static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
398 				      bool take)
399 {
400 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
401 	struct intel_uncore *uncore = &i915->uncore;
402 	u32 val;
403 
404 	val = intel_uncore_read(uncore,
405 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
406 	if (val == 0xffffffff) {
407 		drm_dbg_kms(&i915->drm,
408 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
409 			    dig_port->tc_port_name, take ? "take" : "release");
410 
411 		return false;
412 	}
413 
414 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
415 	if (take)
416 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
417 
418 	intel_uncore_write(uncore,
419 			   PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
420 
421 	return true;
422 }
423 
424 static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
425 				      bool take)
426 {
427 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
428 	struct intel_uncore *uncore = &i915->uncore;
429 	enum port port = dig_port->base.port;
430 	u32 val;
431 
432 	val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
433 	if (take)
434 		val |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
435 	else
436 		val &= ~DDI_BUF_CTL_TC_PHY_OWNERSHIP;
437 	intel_uncore_write(uncore, DDI_BUF_CTL(port), val);
438 
439 	return true;
440 }
441 
442 static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take)
443 {
444 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
445 
446 	if (IS_ALDERLAKE_P(i915))
447 		return adl_tc_phy_take_ownership(dig_port, take);
448 
449 	return icl_tc_phy_take_ownership(dig_port, take);
450 }
451 
452 static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
453 {
454 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
455 	struct intel_uncore *uncore = &i915->uncore;
456 	u32 val;
457 
458 	val = intel_uncore_read(uncore,
459 				PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
460 	if (val == 0xffffffff) {
461 		drm_dbg_kms(&i915->drm,
462 			    "Port %s: PHY in TCCOLD, assume not owned\n",
463 			    dig_port->tc_port_name);
464 		return false;
465 	}
466 
467 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
468 }
469 
470 static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
471 {
472 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
473 	struct intel_uncore *uncore = &i915->uncore;
474 	enum port port = dig_port->base.port;
475 	u32 val;
476 
477 	val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
478 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
479 }
480 
481 static bool tc_phy_is_owned(struct intel_digital_port *dig_port)
482 {
483 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
484 
485 	if (IS_ALDERLAKE_P(i915))
486 		return adl_tc_phy_is_owned(dig_port);
487 
488 	return icl_tc_phy_is_owned(dig_port);
489 }
490 
491 /*
492  * This function implements the first part of the Connect Flow described by our
493  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
494  * lanes, EDID, etc) is done as needed in the typical places.
495  *
496  * Unlike the other ports, type-C ports are not available to use as soon as we
497  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
498  * display, USB, etc. As a result, handshaking through FIA is required around
499  * connect and disconnect to cleanly transfer ownership with the controller and
500  * set the type-C power state.
501  */
502 static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
503 			       int required_lanes)
504 {
505 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
506 	u32 live_status_mask;
507 	int max_lanes;
508 
509 	if (!tc_phy_status_complete(dig_port)) {
510 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
511 			    dig_port->tc_port_name);
512 		goto out_set_tbt_alt_mode;
513 	}
514 
515 	live_status_mask = tc_port_live_status_mask(dig_port);
516 	if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY))) &&
517 	    !dig_port->tc_legacy_port) {
518 		drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
519 			    dig_port->tc_port_name, live_status_mask);
520 		goto out_set_tbt_alt_mode;
521 	}
522 
523 	if (!tc_phy_take_ownership(dig_port, true) &&
524 	    !drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
525 		goto out_set_tbt_alt_mode;
526 
527 	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
528 	if (dig_port->tc_legacy_port) {
529 		drm_WARN_ON(&i915->drm, max_lanes != 4);
530 		dig_port->tc_mode = TC_PORT_LEGACY;
531 
532 		return;
533 	}
534 
535 	/*
536 	 * Now we have to re-check the live state, in case the port recently
537 	 * became disconnected. Not necessary for legacy mode.
538 	 */
539 	if (!(tc_port_live_status_mask(dig_port) & BIT(TC_PORT_DP_ALT))) {
540 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
541 			    dig_port->tc_port_name);
542 		goto out_release_phy;
543 	}
544 
545 	if (max_lanes < required_lanes) {
546 		drm_dbg_kms(&i915->drm,
547 			    "Port %s: PHY max lanes %d < required lanes %d\n",
548 			    dig_port->tc_port_name,
549 			    max_lanes, required_lanes);
550 		goto out_release_phy;
551 	}
552 
553 	dig_port->tc_mode = TC_PORT_DP_ALT;
554 
555 	return;
556 
557 out_release_phy:
558 	tc_phy_take_ownership(dig_port, false);
559 out_set_tbt_alt_mode:
560 	dig_port->tc_mode = TC_PORT_TBT_ALT;
561 }
562 
563 /*
564  * See the comment at the connect function. This implements the Disconnect
565  * Flow.
566  */
567 static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
568 {
569 	switch (dig_port->tc_mode) {
570 	case TC_PORT_LEGACY:
571 	case TC_PORT_DP_ALT:
572 		tc_phy_take_ownership(dig_port, false);
573 		fallthrough;
574 	case TC_PORT_TBT_ALT:
575 		dig_port->tc_mode = TC_PORT_DISCONNECTED;
576 		fallthrough;
577 	case TC_PORT_DISCONNECTED:
578 		break;
579 	default:
580 		MISSING_CASE(dig_port->tc_mode);
581 	}
582 }
583 
584 static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
585 {
586 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
587 
588 	if (!tc_phy_status_complete(dig_port)) {
589 		drm_dbg_kms(&i915->drm, "Port %s: PHY status not complete\n",
590 			    dig_port->tc_port_name);
591 		return dig_port->tc_mode == TC_PORT_TBT_ALT;
592 	}
593 
594 	/* On ADL-P the PHY complete flag is set in TBT mode as well. */
595 	if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
596 		return true;
597 
598 	if (!tc_phy_is_owned(dig_port)) {
599 		drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
600 			    dig_port->tc_port_name);
601 
602 		return false;
603 	}
604 
605 	return dig_port->tc_mode == TC_PORT_DP_ALT ||
606 	       dig_port->tc_mode == TC_PORT_LEGACY;
607 }
608 
609 static enum tc_port_mode
610 intel_tc_port_get_current_mode(struct intel_digital_port *dig_port)
611 {
612 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
613 	u32 live_status_mask = tc_port_live_status_mask(dig_port);
614 	enum tc_port_mode mode;
615 
616 	if (!tc_phy_is_owned(dig_port) ||
617 	    drm_WARN_ON(&i915->drm, !tc_phy_status_complete(dig_port)))
618 		return TC_PORT_TBT_ALT;
619 
620 	mode = dig_port->tc_legacy_port ? TC_PORT_LEGACY : TC_PORT_DP_ALT;
621 	if (live_status_mask) {
622 		enum tc_port_mode live_mode = fls(live_status_mask) - 1;
623 
624 		if (!drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT))
625 			mode = live_mode;
626 	}
627 
628 	return mode;
629 }
630 
631 static enum tc_port_mode
632 intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
633 {
634 	u32 live_status_mask = tc_port_live_status_mask(dig_port);
635 
636 	if (live_status_mask)
637 		return fls(live_status_mask) - 1;
638 
639 	return TC_PORT_TBT_ALT;
640 }
641 
642 static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
643 				     int required_lanes, bool force_disconnect)
644 {
645 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
646 	enum tc_port_mode old_tc_mode = dig_port->tc_mode;
647 
648 	intel_display_power_flush_work(i915);
649 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
650 		enum intel_display_power_domain aux_domain;
651 		bool aux_powered;
652 
653 		aux_domain = intel_aux_power_domain(dig_port);
654 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
655 		drm_WARN_ON(&i915->drm, aux_powered);
656 	}
657 
658 	icl_tc_phy_disconnect(dig_port);
659 	if (!force_disconnect)
660 		icl_tc_phy_connect(dig_port, required_lanes);
661 
662 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
663 		    dig_port->tc_port_name,
664 		    tc_port_mode_name(old_tc_mode),
665 		    tc_port_mode_name(dig_port->tc_mode));
666 }
667 
668 static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
669 {
670 	return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
671 }
672 
673 static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
674 				      int required_lanes, bool force_disconnect)
675 {
676 	enum intel_display_power_domain domain;
677 	intel_wakeref_t wref;
678 	bool needs_reset = force_disconnect;
679 
680 	if (!needs_reset) {
681 		/* Get power domain required to check the hotplug live status. */
682 		wref = tc_cold_block(dig_port, &domain);
683 		needs_reset = intel_tc_port_needs_reset(dig_port);
684 		tc_cold_unblock(dig_port, domain, wref);
685 	}
686 
687 	if (!needs_reset)
688 		return;
689 
690 	/* Get power domain required for resetting the mode. */
691 	wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
692 
693 	intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
694 
695 	/* Get power domain matching the new mode after reset. */
696 	tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
697 			fetch_and_zero(&dig_port->tc_lock_wakeref));
698 	if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
699 		dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
700 							  &dig_port->tc_lock_power_domain);
701 
702 	tc_cold_unblock(dig_port, domain, wref);
703 }
704 
705 static void __intel_tc_port_get_link(struct intel_digital_port *dig_port)
706 {
707 	dig_port->tc_link_refcount++;
708 }
709 
710 static void __intel_tc_port_put_link(struct intel_digital_port *dig_port)
711 {
712 	dig_port->tc_link_refcount--;
713 }
714 
715 static bool tc_port_is_enabled(struct intel_digital_port *dig_port)
716 {
717 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
718 
719 	assert_tc_port_power_enabled(dig_port);
720 
721 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
722 	       DDI_BUF_CTL_ENABLE;
723 }
724 
725 /**
726  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
727  * @dig_port: digital port
728  *
729  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
730  * will be locked until intel_tc_port_sanitize_mode() is called.
731  */
732 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
733 {
734 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
735 	intel_wakeref_t tc_cold_wref;
736 	enum intel_display_power_domain domain;
737 
738 	mutex_lock(&dig_port->tc_lock);
739 
740 	drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
741 	drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
742 	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount);
743 
744 	tc_cold_wref = tc_cold_block(dig_port, &domain);
745 
746 	dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
747 	/*
748 	 * Save the initial mode for the state check in
749 	 * intel_tc_port_sanitize_mode().
750 	 */
751 	dig_port->tc_init_mode = dig_port->tc_mode;
752 	dig_port->tc_lock_wakeref = tc_cold_block(dig_port, &dig_port->tc_lock_power_domain);
753 
754 	/*
755 	 * The PHY needs to be connected for AUX to work during HW readout and
756 	 * MST topology resume, but the PHY mode can only be changed if the
757 	 * port is disabled.
758 	 */
759 	if (!tc_port_is_enabled(dig_port))
760 		intel_tc_port_update_mode(dig_port, 1, false);
761 
762 	/* Prevent changing dig_port->tc_mode until intel_tc_port_sanitize_mode() is called. */
763 	__intel_tc_port_get_link(dig_port);
764 
765 	tc_cold_unblock(dig_port, domain, tc_cold_wref);
766 
767 	drm_dbg_kms(&i915->drm, "Port %s: init mode (%s)\n",
768 		    dig_port->tc_port_name,
769 		    tc_port_mode_name(dig_port->tc_mode));
770 
771 	mutex_unlock(&dig_port->tc_lock);
772 }
773 
774 /**
775  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
776  * @dig_port: digital port
777  *
778  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
779  * loading and system resume:
780  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
781  * the encoder is disabled.
782  * If the encoder is disabled make sure the PHY is disconnected.
783  */
784 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port)
785 {
786 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
787 	struct intel_encoder *encoder = &dig_port->base;
788 	int active_links = 0;
789 
790 	mutex_lock(&dig_port->tc_lock);
791 
792 	if (dig_port->dp.is_mst)
793 		active_links = intel_dp_mst_encoder_active_links(dig_port);
794 	else if (encoder->base.crtc)
795 		active_links = to_intel_crtc(encoder->base.crtc)->active;
796 
797 	drm_WARN_ON(&i915->drm, dig_port->tc_link_refcount != 1);
798 	if (active_links) {
799 		if (!icl_tc_phy_is_connected(dig_port))
800 			drm_dbg_kms(&i915->drm,
801 				    "Port %s: PHY disconnected with %d active link(s)\n",
802 				    dig_port->tc_port_name, active_links);
803 	} else {
804 		/*
805 		 * TBT-alt is the default mode in any case the PHY ownership is not
806 		 * held (regardless of the sink's connected live state), so
807 		 * we'll just switch to disconnected mode from it here without
808 		 * a note.
809 		 */
810 		if (dig_port->tc_init_mode != TC_PORT_TBT_ALT)
811 			drm_dbg_kms(&i915->drm,
812 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
813 				    dig_port->tc_port_name,
814 				    tc_port_mode_name(dig_port->tc_init_mode));
815 		icl_tc_phy_disconnect(dig_port);
816 		__intel_tc_port_put_link(dig_port);
817 
818 		tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
819 				fetch_and_zero(&dig_port->tc_lock_wakeref));
820 	}
821 
822 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
823 		    dig_port->tc_port_name,
824 		    tc_port_mode_name(dig_port->tc_mode));
825 
826 	mutex_unlock(&dig_port->tc_lock);
827 }
828 
829 /*
830  * The type-C ports are different because even when they are connected, they may
831  * not be available/usable by the graphics driver: see the comment on
832  * icl_tc_phy_connect(). So in our driver instead of adding the additional
833  * concept of "usable" and make everything check for "connected and usable" we
834  * define a port as "connected" when it is not only connected, but also when it
835  * is usable by the rest of the driver. That maintains the old assumption that
836  * connected ports are usable, and avoids exposing to the users objects they
837  * can't really use.
838  */
839 bool intel_tc_port_connected(struct intel_encoder *encoder)
840 {
841 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
842 	bool is_connected;
843 
844 	intel_tc_port_lock(dig_port);
845 
846 	is_connected = tc_port_live_status_mask(dig_port) &
847 		       BIT(dig_port->tc_mode);
848 
849 	intel_tc_port_unlock(dig_port);
850 
851 	return is_connected;
852 }
853 
854 static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
855 				 int required_lanes)
856 {
857 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
858 
859 	mutex_lock(&dig_port->tc_lock);
860 
861 	cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
862 
863 	if (!dig_port->tc_link_refcount)
864 		intel_tc_port_update_mode(dig_port, required_lanes,
865 					  false);
866 
867 	drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
868 	drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
869 				!tc_phy_is_owned(dig_port));
870 }
871 
872 void intel_tc_port_lock(struct intel_digital_port *dig_port)
873 {
874 	__intel_tc_port_lock(dig_port, 1);
875 }
876 
877 /**
878  * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
879  * @dig_port: digital port
880  *
881  * Disconnect the given digital port from its TypeC PHY (handing back the
882  * control of the PHY to the TypeC subsystem). This will happen in a delayed
883  * manner after each aux transactions and modeset disables.
884  */
885 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
886 {
887 	struct intel_digital_port *dig_port =
888 		container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
889 
890 	mutex_lock(&dig_port->tc_lock);
891 
892 	if (!dig_port->tc_link_refcount)
893 		intel_tc_port_update_mode(dig_port, 1, true);
894 
895 	mutex_unlock(&dig_port->tc_lock);
896 }
897 
898 /**
899  * intel_tc_port_flush_work: flush the work disconnecting the PHY
900  * @dig_port: digital port
901  *
902  * Flush the delayed work disconnecting an idle PHY.
903  */
904 void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
905 {
906 	flush_delayed_work(&dig_port->tc_disconnect_phy_work);
907 }
908 
909 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
910 {
911 	if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
912 		queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
913 				   msecs_to_jiffies(1000));
914 
915 	mutex_unlock(&dig_port->tc_lock);
916 }
917 
918 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
919 {
920 	return mutex_is_locked(&dig_port->tc_lock) ||
921 	       dig_port->tc_link_refcount;
922 }
923 
924 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
925 			    int required_lanes)
926 {
927 	__intel_tc_port_lock(dig_port, required_lanes);
928 	__intel_tc_port_get_link(dig_port);
929 	intel_tc_port_unlock(dig_port);
930 }
931 
932 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
933 {
934 	intel_tc_port_lock(dig_port);
935 	__intel_tc_port_put_link(dig_port);
936 	intel_tc_port_unlock(dig_port);
937 
938 	/*
939 	 * Disconnecting the PHY after the PHY's PLL gets disabled may
940 	 * hang the system on ADL-P, so disconnect the PHY here synchronously.
941 	 * TODO: remove this once the root cause of the ordering requirement
942 	 * is found/fixed.
943 	 */
944 	intel_tc_port_flush_work(dig_port);
945 }
946 
947 static bool
948 tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
949 {
950 	enum intel_display_power_domain domain;
951 	intel_wakeref_t wakeref;
952 	u32 val;
953 
954 	if (!INTEL_INFO(i915)->display.has_modular_fia)
955 		return false;
956 
957 	mutex_lock(&dig_port->tc_lock);
958 	wakeref = tc_cold_block(dig_port, &domain);
959 	val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
960 	tc_cold_unblock(dig_port, domain, wakeref);
961 	mutex_unlock(&dig_port->tc_lock);
962 
963 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
964 
965 	return val & MODULAR_FIA_MASK;
966 }
967 
968 static void
969 tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
970 {
971 	enum port port = dig_port->base.port;
972 	enum tc_port tc_port = intel_port_to_tc(i915, port);
973 
974 	/*
975 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
976 	 * than two TC ports, there are multiple instances of Modular FIA.
977 	 */
978 	if (tc_has_modular_fia(i915, dig_port)) {
979 		dig_port->tc_phy_fia = tc_port / 2;
980 		dig_port->tc_phy_fia_idx = tc_port % 2;
981 	} else {
982 		dig_port->tc_phy_fia = FIA1;
983 		dig_port->tc_phy_fia_idx = tc_port;
984 	}
985 }
986 
987 void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
988 {
989 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
990 	enum port port = dig_port->base.port;
991 	enum tc_port tc_port = intel_port_to_tc(i915, port);
992 
993 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
994 		return;
995 
996 	snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
997 		 "%c/TC#%d", port_name(port), tc_port + 1);
998 
999 	rw_init(&dig_port->tc_lock, "itcp");
1000 	INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1001 	dig_port->tc_legacy_port = is_legacy;
1002 	dig_port->tc_mode = TC_PORT_DISCONNECTED;
1003 	dig_port->tc_link_refcount = 0;
1004 	tc_port_load_fia_params(i915, dig_port);
1005 
1006 	intel_tc_port_init_mode(dig_port);
1007 }
1008