1 /* $NetBSD: drm_dp_mst_topology.c,v 1.11 2021/12/19 09:45:10 riastradh Exp $ */ 2 3 /* 4 * Copyright © 2014 Red Hat 5 * 6 * Permission to use, copy, modify, distribute, and sell this software and its 7 * documentation for any purpose is hereby granted without fee, provided that 8 * the above copyright notice appear in all copies and that both that copyright 9 * notice and this permission notice appear in supporting documentation, and 10 * that the name of the copyright holders not be used in advertising or 11 * publicity pertaining to distribution of the software without specific, 12 * written prior permission. The copyright holders make no representations 13 * about the suitability of this software for any purpose. It is provided "as 14 * is" without express or implied warranty. 15 * 16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 17 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 18 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 19 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 22 * OF THIS SOFTWARE. 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.11 2021/12/19 09:45:10 riastradh Exp $"); 27 28 #include <linux/delay.h> 29 #include <linux/errno.h> 30 #include <linux/i2c.h> 31 #include <linux/init.h> 32 #include <linux/kernel.h> 33 #include <linux/sched.h> 34 #include <linux/seq_file.h> 35 36 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 37 #include <linux/stacktrace.h> 38 #include <linux/sort.h> 39 #include <linux/timekeeping.h> 40 #include <linux/math64.h> 41 #endif 42 43 #include <drm/drm_atomic.h> 44 #include <drm/drm_atomic_helper.h> 45 #include <drm/drm_dp_mst_helper.h> 46 #include <drm/drm_drv.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_probe_helper.h> 49 50 #include "drm_crtc_helper_internal.h" 51 #include "drm_dp_mst_topology_internal.h" 52 53 #include <linux/nbsd-namespace.h> 54 55 /** 56 * DOC: dp mst helper 57 * 58 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 59 * protocol. The helpers contain a topology manager and bandwidth manager. 60 * The helpers encapsulate the sending and received of sideband msgs. 61 */ 62 struct drm_dp_pending_up_req { 63 struct drm_dp_sideband_msg_hdr hdr; 64 struct drm_dp_sideband_msg_req_body msg; 65 struct list_head next; 66 }; 67 68 #if IS_ENABLED(CONFIG_DEBUG_FS) 69 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 70 char *buf); 71 #endif 72 73 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); 74 75 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 76 int id, 77 struct drm_dp_payload *payload); 78 79 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 80 struct drm_dp_mst_port *port, 81 int offset, int size, u8 *bytes); 82 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 83 struct drm_dp_mst_port *port, 84 int offset, int size, u8 *bytes); 85 86 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 87 struct drm_dp_mst_branch *mstb); 88 89 static void 90 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 91 struct drm_dp_mst_branch *mstb); 92 93 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 94 struct drm_dp_mst_branch *mstb, 95 struct drm_dp_mst_port *port); 96 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 97 u8 *guid); 98 99 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); 100 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); 101 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 102 103 #define DBG_PREFIX "[dp_mst]" 104 105 #define DP_STR(x) [DP_ ## x] = #x 106 107 static const char *drm_dp_mst_req_type_str(u8 req_type) 108 { 109 static const char * const req_type_str[] = { 110 DP_STR(GET_MSG_TRANSACTION_VERSION), 111 DP_STR(LINK_ADDRESS), 112 DP_STR(CONNECTION_STATUS_NOTIFY), 113 DP_STR(ENUM_PATH_RESOURCES), 114 DP_STR(ALLOCATE_PAYLOAD), 115 DP_STR(QUERY_PAYLOAD), 116 DP_STR(RESOURCE_STATUS_NOTIFY), 117 DP_STR(CLEAR_PAYLOAD_ID_TABLE), 118 DP_STR(REMOTE_DPCD_READ), 119 DP_STR(REMOTE_DPCD_WRITE), 120 DP_STR(REMOTE_I2C_READ), 121 DP_STR(REMOTE_I2C_WRITE), 122 DP_STR(POWER_UP_PHY), 123 DP_STR(POWER_DOWN_PHY), 124 DP_STR(SINK_EVENT_NOTIFY), 125 DP_STR(QUERY_STREAM_ENC_STATUS), 126 }; 127 128 if (req_type >= ARRAY_SIZE(req_type_str) || 129 !req_type_str[req_type]) 130 return "unknown"; 131 132 return req_type_str[req_type]; 133 } 134 135 #undef DP_STR 136 #define DP_STR(x) [DP_NAK_ ## x] = #x 137 138 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason) 139 { 140 static const char * const nak_reason_str[] = { 141 DP_STR(WRITE_FAILURE), 142 DP_STR(INVALID_READ), 143 DP_STR(CRC_FAILURE), 144 DP_STR(BAD_PARAM), 145 DP_STR(DEFER), 146 DP_STR(LINK_FAILURE), 147 DP_STR(NO_RESOURCES), 148 DP_STR(DPCD_FAIL), 149 DP_STR(I2C_NAK), 150 DP_STR(ALLOCATE_FAIL), 151 }; 152 153 if (nak_reason >= ARRAY_SIZE(nak_reason_str) || 154 !nak_reason_str[nak_reason]) 155 return "unknown"; 156 157 return nak_reason_str[nak_reason]; 158 } 159 160 #undef DP_STR 161 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x 162 163 static const char *drm_dp_mst_sideband_tx_state_str(int state) 164 { 165 static const char * const sideband_reason_str[] = { 166 DP_STR(QUEUED), 167 DP_STR(START_SEND), 168 DP_STR(SENT), 169 DP_STR(RX), 170 DP_STR(TIMEOUT), 171 }; 172 173 if (state >= ARRAY_SIZE(sideband_reason_str) || 174 !sideband_reason_str[state]) 175 return "unknown"; 176 177 return sideband_reason_str[state]; 178 } 179 180 static int 181 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) 182 { 183 int i; 184 u8 unpacked_rad[16]; 185 186 for (i = 0; i < lct; i++) { 187 if (i % 2) 188 unpacked_rad[i] = rad[i / 2] >> 4; 189 else 190 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); 191 } 192 193 /* TODO: Eventually add something to printk so we can format the rad 194 * like this: 1.2.3 195 */ 196 return snprintf(out, len, "%*phC", lct, unpacked_rad); 197 } 198 199 /* sideband msg handling */ 200 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 201 { 202 u8 bitmask = 0x80; 203 u8 bitshift = 7; 204 u8 array_index = 0; 205 int number_of_bits = num_nibbles * 4; 206 u8 remainder = 0; 207 208 while (number_of_bits != 0) { 209 number_of_bits--; 210 remainder <<= 1; 211 remainder |= (data[array_index] & bitmask) >> bitshift; 212 bitmask >>= 1; 213 bitshift--; 214 if (bitmask == 0) { 215 bitmask = 0x80; 216 bitshift = 7; 217 array_index++; 218 } 219 if ((remainder & 0x10) == 0x10) 220 remainder ^= 0x13; 221 } 222 223 number_of_bits = 4; 224 while (number_of_bits != 0) { 225 number_of_bits--; 226 remainder <<= 1; 227 if ((remainder & 0x10) != 0) 228 remainder ^= 0x13; 229 } 230 231 return remainder; 232 } 233 234 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 235 { 236 u8 bitmask = 0x80; 237 u8 bitshift = 7; 238 u8 array_index = 0; 239 int number_of_bits = number_of_bytes * 8; 240 u16 remainder = 0; 241 242 while (number_of_bits != 0) { 243 number_of_bits--; 244 remainder <<= 1; 245 remainder |= (data[array_index] & bitmask) >> bitshift; 246 bitmask >>= 1; 247 bitshift--; 248 if (bitmask == 0) { 249 bitmask = 0x80; 250 bitshift = 7; 251 array_index++; 252 } 253 if ((remainder & 0x100) == 0x100) 254 remainder ^= 0xd5; 255 } 256 257 number_of_bits = 8; 258 while (number_of_bits != 0) { 259 number_of_bits--; 260 remainder <<= 1; 261 if ((remainder & 0x100) != 0) 262 remainder ^= 0xd5; 263 } 264 265 return remainder & 0xff; 266 } 267 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 268 { 269 u8 size = 3; 270 size += (hdr->lct / 2); 271 return size; 272 } 273 274 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 275 u8 *buf, int *len) 276 { 277 int idx = 0; 278 int i; 279 u8 crc4; 280 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 281 for (i = 0; i < (hdr->lct / 2); i++) 282 buf[idx++] = hdr->rad[i]; 283 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 284 (hdr->msg_len & 0x3f); 285 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 286 287 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 288 buf[idx - 1] |= (crc4 & 0xf); 289 290 *len = idx; 291 } 292 293 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 294 u8 *buf, int buflen, u8 *hdrlen) 295 { 296 u8 crc4; 297 u8 len; 298 int i; 299 u8 idx; 300 if (buf[0] == 0) 301 return false; 302 len = 3; 303 len += ((buf[0] & 0xf0) >> 4) / 2; 304 if (len > buflen) 305 return false; 306 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 307 308 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 309 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 310 return false; 311 } 312 313 hdr->lct = (buf[0] & 0xf0) >> 4; 314 hdr->lcr = (buf[0] & 0xf); 315 idx = 1; 316 for (i = 0; i < (hdr->lct / 2); i++) 317 hdr->rad[i] = buf[idx++]; 318 hdr->broadcast = (buf[idx] >> 7) & 0x1; 319 hdr->path_msg = (buf[idx] >> 6) & 0x1; 320 hdr->msg_len = buf[idx] & 0x3f; 321 idx++; 322 hdr->somt = (buf[idx] >> 7) & 0x1; 323 hdr->eomt = (buf[idx] >> 6) & 0x1; 324 hdr->seqno = (buf[idx] >> 4) & 0x1; 325 idx++; 326 *hdrlen = idx; 327 return true; 328 } 329 330 static void 331 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, 332 struct drm_dp_sideband_msg_tx *raw) 333 { 334 int idx = 0; 335 int i; 336 u8 *buf = raw->msg; 337 buf[idx++] = req->req_type & 0x7f; 338 339 switch (req->req_type) { 340 case DP_ENUM_PATH_RESOURCES: 341 case DP_POWER_DOWN_PHY: 342 case DP_POWER_UP_PHY: 343 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 344 idx++; 345 break; 346 case DP_ALLOCATE_PAYLOAD: 347 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 348 (req->u.allocate_payload.number_sdp_streams & 0xf); 349 idx++; 350 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 351 idx++; 352 buf[idx] = (req->u.allocate_payload.pbn >> 8); 353 idx++; 354 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 355 idx++; 356 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 357 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 358 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 359 idx++; 360 } 361 if (req->u.allocate_payload.number_sdp_streams & 1) { 362 i = req->u.allocate_payload.number_sdp_streams - 1; 363 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 364 idx++; 365 } 366 break; 367 case DP_QUERY_PAYLOAD: 368 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 369 idx++; 370 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 371 idx++; 372 break; 373 case DP_REMOTE_DPCD_READ: 374 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 375 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 376 idx++; 377 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 378 idx++; 379 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 380 idx++; 381 buf[idx] = (req->u.dpcd_read.num_bytes); 382 idx++; 383 break; 384 385 case DP_REMOTE_DPCD_WRITE: 386 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 387 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 388 idx++; 389 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 390 idx++; 391 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 392 idx++; 393 buf[idx] = (req->u.dpcd_write.num_bytes); 394 idx++; 395 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 396 idx += req->u.dpcd_write.num_bytes; 397 break; 398 case DP_REMOTE_I2C_READ: 399 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 400 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 401 idx++; 402 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 403 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 404 idx++; 405 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 406 idx++; 407 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 408 idx += req->u.i2c_read.transactions[i].num_bytes; 409 410 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; 411 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 412 idx++; 413 } 414 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 415 idx++; 416 buf[idx] = (req->u.i2c_read.num_bytes_read); 417 idx++; 418 break; 419 420 case DP_REMOTE_I2C_WRITE: 421 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 422 idx++; 423 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 424 idx++; 425 buf[idx] = (req->u.i2c_write.num_bytes); 426 idx++; 427 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 428 idx += req->u.i2c_write.num_bytes; 429 break; 430 } 431 raw->cur_len = idx; 432 } 433 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req); 434 435 /* Decode a sideband request we've encoded, mainly used for debugging */ 436 static int 437 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, 438 struct drm_dp_sideband_msg_req_body *req) 439 { 440 const u8 *buf = raw->msg; 441 int i, idx = 0; 442 443 req->req_type = buf[idx++] & 0x7f; 444 switch (req->req_type) { 445 case DP_ENUM_PATH_RESOURCES: 446 case DP_POWER_DOWN_PHY: 447 case DP_POWER_UP_PHY: 448 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; 449 break; 450 case DP_ALLOCATE_PAYLOAD: 451 { 452 struct drm_dp_allocate_payload *a = 453 &req->u.allocate_payload; 454 455 a->number_sdp_streams = buf[idx] & 0xf; 456 a->port_number = (buf[idx] >> 4) & 0xf; 457 458 WARN_ON(buf[++idx] & 0x80); 459 a->vcpi = buf[idx] & 0x7f; 460 461 a->pbn = buf[++idx] << 8; 462 a->pbn |= buf[++idx]; 463 464 idx++; 465 for (i = 0; i < a->number_sdp_streams; i++) { 466 a->sdp_stream_sink[i] = 467 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf; 468 } 469 } 470 break; 471 case DP_QUERY_PAYLOAD: 472 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; 473 WARN_ON(buf[++idx] & 0x80); 474 req->u.query_payload.vcpi = buf[idx] & 0x7f; 475 break; 476 case DP_REMOTE_DPCD_READ: 477 { 478 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; 479 480 r->port_number = (buf[idx] >> 4) & 0xf; 481 482 r->dpcd_address = (buf[idx] << 16) & 0xf0000; 483 r->dpcd_address |= (buf[++idx] << 8) & 0xff00; 484 r->dpcd_address |= buf[++idx] & 0xff; 485 486 r->num_bytes = buf[++idx]; 487 } 488 break; 489 case DP_REMOTE_DPCD_WRITE: 490 { 491 struct drm_dp_remote_dpcd_write *w = 492 &req->u.dpcd_write; 493 494 w->port_number = (buf[idx] >> 4) & 0xf; 495 496 w->dpcd_address = (buf[idx] << 16) & 0xf0000; 497 w->dpcd_address |= (buf[++idx] << 8) & 0xff00; 498 w->dpcd_address |= buf[++idx] & 0xff; 499 500 w->num_bytes = buf[++idx]; 501 502 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 503 GFP_KERNEL); 504 if (!w->bytes) 505 return -ENOMEM; 506 } 507 break; 508 case DP_REMOTE_I2C_READ: 509 { 510 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; 511 struct drm_dp_remote_i2c_read_tx *tx; 512 bool failed = false; 513 514 r->num_transactions = buf[idx] & 0x3; 515 r->port_number = (buf[idx] >> 4) & 0xf; 516 for (i = 0; i < r->num_transactions; i++) { 517 tx = &r->transactions[i]; 518 519 tx->i2c_dev_id = buf[++idx] & 0x7f; 520 tx->num_bytes = buf[++idx]; 521 tx->bytes = kmemdup(&buf[++idx], 522 tx->num_bytes, 523 GFP_KERNEL); 524 if (!tx->bytes) { 525 failed = true; 526 break; 527 } 528 idx += tx->num_bytes; 529 tx->no_stop_bit = (buf[idx] >> 5) & 0x1; 530 tx->i2c_transaction_delay = buf[idx] & 0xf; 531 } 532 533 if (failed) { 534 for (i = 0; i < r->num_transactions; i++) { 535 tx = &r->transactions[i]; 536 kfree(tx->bytes); 537 } 538 return -ENOMEM; 539 } 540 541 r->read_i2c_device_id = buf[++idx] & 0x7f; 542 r->num_bytes_read = buf[++idx]; 543 } 544 break; 545 case DP_REMOTE_I2C_WRITE: 546 { 547 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; 548 549 w->port_number = (buf[idx] >> 4) & 0xf; 550 w->write_i2c_device_id = buf[++idx] & 0x7f; 551 w->num_bytes = buf[++idx]; 552 w->bytes = kmemdup(&buf[++idx], w->num_bytes, 553 GFP_KERNEL); 554 if (!w->bytes) 555 return -ENOMEM; 556 } 557 break; 558 } 559 560 return 0; 561 } 562 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req); 563 564 static void 565 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req, 566 int indent, struct drm_printer *printer) 567 { 568 int i; 569 570 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__) 571 if (req->req_type == DP_LINK_ADDRESS) { 572 /* No contents to print */ 573 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); 574 return; 575 } 576 577 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); 578 indent++; 579 580 switch (req->req_type) { 581 case DP_ENUM_PATH_RESOURCES: 582 case DP_POWER_DOWN_PHY: 583 case DP_POWER_UP_PHY: 584 P("port=%d\n", req->u.port_num.port_number); 585 break; 586 case DP_ALLOCATE_PAYLOAD: 587 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n", 588 req->u.allocate_payload.port_number, 589 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, 590 req->u.allocate_payload.number_sdp_streams, 591 req->u.allocate_payload.number_sdp_streams, 592 req->u.allocate_payload.sdp_stream_sink); 593 break; 594 case DP_QUERY_PAYLOAD: 595 P("port=%d vcpi=%d\n", 596 req->u.query_payload.port_number, 597 req->u.query_payload.vcpi); 598 break; 599 case DP_REMOTE_DPCD_READ: 600 P("port=%d dpcd_addr=%05x len=%d\n", 601 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, 602 req->u.dpcd_read.num_bytes); 603 break; 604 case DP_REMOTE_DPCD_WRITE: 605 P("port=%d addr=%05x len=%d: %*ph\n", 606 req->u.dpcd_write.port_number, 607 req->u.dpcd_write.dpcd_address, 608 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, 609 req->u.dpcd_write.bytes); 610 break; 611 case DP_REMOTE_I2C_READ: 612 P("port=%d num_tx=%d id=%d size=%d:\n", 613 req->u.i2c_read.port_number, 614 req->u.i2c_read.num_transactions, 615 req->u.i2c_read.read_i2c_device_id, 616 req->u.i2c_read.num_bytes_read); 617 618 indent++; 619 for (i = 0; i < req->u.i2c_read.num_transactions; i++) { 620 const struct drm_dp_remote_i2c_read_tx *rtx = 621 &req->u.i2c_read.transactions[i]; 622 623 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n", 624 i, rtx->i2c_dev_id, rtx->num_bytes, 625 rtx->no_stop_bit, rtx->i2c_transaction_delay, 626 rtx->num_bytes, rtx->bytes); 627 } 628 break; 629 case DP_REMOTE_I2C_WRITE: 630 P("port=%d id=%d size=%d: %*ph\n", 631 req->u.i2c_write.port_number, 632 req->u.i2c_write.write_i2c_device_id, 633 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, 634 req->u.i2c_write.bytes); 635 break; 636 default: 637 P("???\n"); 638 break; 639 } 640 #undef P 641 } 642 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body); 643 644 static inline void 645 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p, 646 const struct drm_dp_sideband_msg_tx *txmsg) 647 { 648 struct drm_dp_sideband_msg_req_body req; 649 char buf[64]; 650 int ret; 651 int i; 652 653 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, 654 sizeof(buf)); 655 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n", 656 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, 657 drm_dp_mst_sideband_tx_state_str(txmsg->state), 658 txmsg->path_msg, buf); 659 660 ret = drm_dp_decode_sideband_req(txmsg, &req); 661 if (ret) { 662 drm_printf(p, "<failed to decode sideband req: %d>\n", ret); 663 return; 664 } 665 drm_dp_dump_sideband_msg_req_body(&req, 1, p); 666 667 switch (req.req_type) { 668 case DP_REMOTE_DPCD_WRITE: 669 kfree(req.u.dpcd_write.bytes); 670 break; 671 case DP_REMOTE_I2C_READ: 672 for (i = 0; i < req.u.i2c_read.num_transactions; i++) 673 kfree(req.u.i2c_read.transactions[i].bytes); 674 break; 675 case DP_REMOTE_I2C_WRITE: 676 kfree(req.u.i2c_write.bytes); 677 break; 678 } 679 } 680 681 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 682 { 683 u8 crc4; 684 crc4 = drm_dp_msg_data_crc4(msg, len); 685 msg[len] = crc4; 686 } 687 688 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 689 struct drm_dp_sideband_msg_tx *raw) 690 { 691 int idx = 0; 692 u8 *buf = raw->msg; 693 694 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 695 696 raw->cur_len = idx; 697 } 698 699 /* this adds a chunk of msg to the builder to get the final msg */ 700 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, 701 u8 *replybuf, u8 replybuflen, bool hdr) 702 { 703 int ret; 704 u8 crc4 __unused; /* XXX Mistake? */ 705 706 if (hdr) { 707 u8 hdrlen; 708 struct drm_dp_sideband_msg_hdr recv_hdr; 709 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); 710 if (ret == false) { 711 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); 712 return false; 713 } 714 715 /* 716 * ignore out-of-order messages or messages that are part of a 717 * failed transaction 718 */ 719 if (!recv_hdr.somt && !msg->have_somt) 720 return false; 721 722 /* get length contained in this portion */ 723 msg->curchunk_len = recv_hdr.msg_len; 724 msg->curchunk_hdrlen = hdrlen; 725 726 /* we have already gotten an somt - don't bother parsing */ 727 if (recv_hdr.somt && msg->have_somt) 728 return false; 729 730 if (recv_hdr.somt) { 731 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); 732 msg->have_somt = true; 733 } 734 if (recv_hdr.eomt) 735 msg->have_eomt = true; 736 737 /* copy the bytes for the remainder of this header chunk */ 738 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); 739 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); 740 } else { 741 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 742 msg->curchunk_idx += replybuflen; 743 } 744 745 if (msg->curchunk_idx >= msg->curchunk_len) { 746 /* do CRC */ 747 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 748 /* copy chunk into bigger msg */ 749 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 750 msg->curlen += msg->curchunk_len - 1; 751 } 752 return true; 753 } 754 755 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, 756 struct drm_dp_sideband_msg_reply_body *repmsg) 757 { 758 int idx = 1; 759 int i; 760 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 761 idx += 16; 762 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 763 idx++; 764 if (idx > raw->curlen) 765 goto fail_len; 766 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 767 if (raw->msg[idx] & 0x80) 768 repmsg->u.link_addr.ports[i].input_port = 1; 769 770 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 771 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 772 773 idx++; 774 if (idx > raw->curlen) 775 goto fail_len; 776 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 777 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 778 if (repmsg->u.link_addr.ports[i].input_port == 0) 779 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 780 idx++; 781 if (idx > raw->curlen) 782 goto fail_len; 783 if (repmsg->u.link_addr.ports[i].input_port == 0) { 784 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 785 idx++; 786 if (idx > raw->curlen) 787 goto fail_len; 788 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); 789 idx += 16; 790 if (idx > raw->curlen) 791 goto fail_len; 792 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 793 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 794 idx++; 795 796 } 797 if (idx > raw->curlen) 798 goto fail_len; 799 } 800 801 return true; 802 fail_len: 803 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 804 return false; 805 } 806 807 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 808 struct drm_dp_sideband_msg_reply_body *repmsg) 809 { 810 int idx = 1; 811 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 812 idx++; 813 if (idx > raw->curlen) 814 goto fail_len; 815 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 816 idx++; 817 if (idx > raw->curlen) 818 goto fail_len; 819 820 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 821 return true; 822 fail_len: 823 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 824 return false; 825 } 826 827 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 828 struct drm_dp_sideband_msg_reply_body *repmsg) 829 { 830 int idx = 1; 831 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 832 idx++; 833 if (idx > raw->curlen) 834 goto fail_len; 835 return true; 836 fail_len: 837 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 838 return false; 839 } 840 841 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 842 struct drm_dp_sideband_msg_reply_body *repmsg) 843 { 844 int idx = 1; 845 846 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 847 idx++; 848 if (idx > raw->curlen) 849 goto fail_len; 850 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 851 idx++; 852 /* TODO check */ 853 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 854 return true; 855 fail_len: 856 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 857 return false; 858 } 859 860 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 861 struct drm_dp_sideband_msg_reply_body *repmsg) 862 { 863 int idx = 1; 864 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 865 repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; 866 idx++; 867 if (idx > raw->curlen) 868 goto fail_len; 869 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 870 idx += 2; 871 if (idx > raw->curlen) 872 goto fail_len; 873 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 874 idx += 2; 875 if (idx > raw->curlen) 876 goto fail_len; 877 return true; 878 fail_len: 879 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 880 return false; 881 } 882 883 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 884 struct drm_dp_sideband_msg_reply_body *repmsg) 885 { 886 int idx = 1; 887 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 888 idx++; 889 if (idx > raw->curlen) 890 goto fail_len; 891 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 892 idx++; 893 if (idx > raw->curlen) 894 goto fail_len; 895 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 896 idx += 2; 897 if (idx > raw->curlen) 898 goto fail_len; 899 return true; 900 fail_len: 901 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 902 return false; 903 } 904 905 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 906 struct drm_dp_sideband_msg_reply_body *repmsg) 907 { 908 int idx = 1; 909 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 910 idx++; 911 if (idx > raw->curlen) 912 goto fail_len; 913 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 914 idx += 2; 915 if (idx > raw->curlen) 916 goto fail_len; 917 return true; 918 fail_len: 919 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 920 return false; 921 } 922 923 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw, 924 struct drm_dp_sideband_msg_reply_body *repmsg) 925 { 926 int idx = 1; 927 928 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf; 929 idx++; 930 if (idx > raw->curlen) { 931 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n", 932 idx, raw->curlen); 933 return false; 934 } 935 return true; 936 } 937 938 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, 939 struct drm_dp_sideband_msg_reply_body *msg) 940 { 941 memset(msg, 0, sizeof(*msg)); 942 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 943 msg->req_type = (raw->msg[0] & 0x7f); 944 945 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) { 946 memcpy(msg->u.nak.guid, &raw->msg[1], 16); 947 msg->u.nak.reason = raw->msg[17]; 948 msg->u.nak.nak_data = raw->msg[18]; 949 return false; 950 } 951 952 switch (msg->req_type) { 953 case DP_LINK_ADDRESS: 954 return drm_dp_sideband_parse_link_address(raw, msg); 955 case DP_QUERY_PAYLOAD: 956 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 957 case DP_REMOTE_DPCD_READ: 958 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 959 case DP_REMOTE_DPCD_WRITE: 960 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 961 case DP_REMOTE_I2C_READ: 962 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 963 case DP_ENUM_PATH_RESOURCES: 964 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 965 case DP_ALLOCATE_PAYLOAD: 966 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 967 case DP_POWER_DOWN_PHY: 968 case DP_POWER_UP_PHY: 969 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); 970 case DP_CLEAR_PAYLOAD_ID_TABLE: 971 return true; /* since there's nothing to parse */ 972 default: 973 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, 974 drm_dp_mst_req_type_str(msg->req_type)); 975 return false; 976 } 977 } 978 979 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, 980 struct drm_dp_sideband_msg_req_body *msg) 981 { 982 int idx = 1; 983 984 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 985 idx++; 986 if (idx > raw->curlen) 987 goto fail_len; 988 989 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); 990 idx += 16; 991 if (idx > raw->curlen) 992 goto fail_len; 993 994 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 995 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 996 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 997 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 998 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 999 idx++; 1000 return true; 1001 fail_len: 1002 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); 1003 return false; 1004 } 1005 1006 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, 1007 struct drm_dp_sideband_msg_req_body *msg) 1008 { 1009 int idx = 1; 1010 1011 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 1012 idx++; 1013 if (idx > raw->curlen) 1014 goto fail_len; 1015 1016 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); 1017 idx += 16; 1018 if (idx > raw->curlen) 1019 goto fail_len; 1020 1021 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 1022 idx++; 1023 return true; 1024 fail_len: 1025 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); 1026 return false; 1027 } 1028 1029 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, 1030 struct drm_dp_sideband_msg_req_body *msg) 1031 { 1032 memset(msg, 0, sizeof(*msg)); 1033 msg->req_type = (raw->msg[0] & 0x7f); 1034 1035 switch (msg->req_type) { 1036 case DP_CONNECTION_STATUS_NOTIFY: 1037 return drm_dp_sideband_parse_connection_status_notify(raw, msg); 1038 case DP_RESOURCE_STATUS_NOTIFY: 1039 return drm_dp_sideband_parse_resource_status_notify(raw, msg); 1040 default: 1041 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type, 1042 drm_dp_mst_req_type_str(msg->req_type)); 1043 return false; 1044 } 1045 } 1046 1047 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 1048 { 1049 struct drm_dp_sideband_msg_req_body req; 1050 1051 req.req_type = DP_REMOTE_DPCD_WRITE; 1052 req.u.dpcd_write.port_number = port_num; 1053 req.u.dpcd_write.dpcd_address = offset; 1054 req.u.dpcd_write.num_bytes = num_bytes; 1055 req.u.dpcd_write.bytes = bytes; 1056 drm_dp_encode_sideband_req(&req, msg); 1057 1058 return 0; 1059 } 1060 1061 static int build_link_address(struct drm_dp_sideband_msg_tx *msg) 1062 { 1063 struct drm_dp_sideband_msg_req_body req; 1064 1065 req.req_type = DP_LINK_ADDRESS; 1066 drm_dp_encode_sideband_req(&req, msg); 1067 return 0; 1068 } 1069 1070 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) 1071 { 1072 struct drm_dp_sideband_msg_req_body req; 1073 1074 req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; 1075 drm_dp_encode_sideband_req(&req, msg); 1076 return 0; 1077 } 1078 1079 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) 1080 { 1081 struct drm_dp_sideband_msg_req_body req; 1082 1083 req.req_type = DP_ENUM_PATH_RESOURCES; 1084 req.u.port_num.port_number = port_num; 1085 drm_dp_encode_sideband_req(&req, msg); 1086 msg->path_msg = true; 1087 return 0; 1088 } 1089 1090 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, 1091 u8 vcpi, uint16_t pbn, 1092 u8 number_sdp_streams, 1093 u8 *sdp_stream_sink) 1094 { 1095 struct drm_dp_sideband_msg_req_body req; 1096 memset(&req, 0, sizeof(req)); 1097 req.req_type = DP_ALLOCATE_PAYLOAD; 1098 req.u.allocate_payload.port_number = port_num; 1099 req.u.allocate_payload.vcpi = vcpi; 1100 req.u.allocate_payload.pbn = pbn; 1101 req.u.allocate_payload.number_sdp_streams = number_sdp_streams; 1102 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink, 1103 number_sdp_streams); 1104 drm_dp_encode_sideband_req(&req, msg); 1105 msg->path_msg = true; 1106 return 0; 1107 } 1108 1109 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg, 1110 int port_num, bool power_up) 1111 { 1112 struct drm_dp_sideband_msg_req_body req; 1113 1114 if (power_up) 1115 req.req_type = DP_POWER_UP_PHY; 1116 else 1117 req.req_type = DP_POWER_DOWN_PHY; 1118 1119 req.u.port_num.port_number = port_num; 1120 drm_dp_encode_sideband_req(&req, msg); 1121 msg->path_msg = true; 1122 return 0; 1123 } 1124 1125 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 1126 struct drm_dp_vcpi *vcpi) 1127 { 1128 int ret, vcpi_ret; 1129 1130 mutex_lock(&mgr->payload_lock); 1131 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 1132 if (ret > mgr->max_payloads) { 1133 ret = -EINVAL; 1134 DRM_DEBUG_KMS("out of payload ids %d\n", ret); 1135 goto out_unlock; 1136 } 1137 1138 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); 1139 if (vcpi_ret > mgr->max_payloads) { 1140 ret = -EINVAL; 1141 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret); 1142 goto out_unlock; 1143 } 1144 1145 set_bit(ret, &mgr->payload_mask); 1146 set_bit(vcpi_ret, &mgr->vcpi_mask); 1147 vcpi->vcpi = vcpi_ret + 1; 1148 mgr->proposed_vcpis[ret - 1] = vcpi; 1149 out_unlock: 1150 mutex_unlock(&mgr->payload_lock); 1151 return ret; 1152 } 1153 1154 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 1155 int vcpi) 1156 { 1157 int i; 1158 if (vcpi == 0) 1159 return; 1160 1161 mutex_lock(&mgr->payload_lock); 1162 DRM_DEBUG_KMS("putting payload %d\n", vcpi); 1163 clear_bit(vcpi - 1, &mgr->vcpi_mask); 1164 1165 for (i = 0; i < mgr->max_payloads; i++) { 1166 if (mgr->proposed_vcpis[i] && 1167 mgr->proposed_vcpis[i]->vcpi == vcpi) { 1168 mgr->proposed_vcpis[i] = NULL; 1169 clear_bit(i + 1, &mgr->payload_mask); 1170 } 1171 } 1172 mutex_unlock(&mgr->payload_lock); 1173 } 1174 1175 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 1176 struct drm_dp_sideband_msg_tx *txmsg) 1177 { 1178 unsigned int state; 1179 1180 /* 1181 * All updates to txmsg->state are protected by mgr->qlock, and the two 1182 * cases we check here are terminal states. For those the barriers 1183 * provided by the wake_up/wait_event pair are enough. 1184 */ 1185 state = READ_ONCE(txmsg->state); 1186 return (state == DRM_DP_SIDEBAND_TX_RX || 1187 state == DRM_DP_SIDEBAND_TX_TIMEOUT); 1188 } 1189 1190 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 1191 struct drm_dp_sideband_msg_tx *txmsg) 1192 { 1193 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1194 int ret; 1195 1196 #ifdef __NetBSD__ 1197 mutex_lock(&mstb->mgr->qlock); 1198 DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ, 1199 check_txmsg_state(mgr, txmsg)); 1200 #else 1201 ret = wait_event_timeout(mgr->tx_waitq, 1202 check_txmsg_state(mgr, txmsg), 1203 (4 * HZ)); 1204 mutex_lock(&mstb->mgr->qlock); 1205 #endif 1206 if (ret > 0) { 1207 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 1208 ret = -EIO; 1209 goto out; 1210 } 1211 } else { 1212 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); 1213 1214 /* dump some state */ 1215 ret = -EIO; 1216 1217 /* remove from q */ 1218 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 1219 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { 1220 list_del(&txmsg->next); 1221 } 1222 1223 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 1224 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 1225 mstb->tx_slots[txmsg->seqno] = NULL; 1226 } 1227 mgr->is_waiting_for_dwn_reply = false; 1228 1229 } 1230 out: 1231 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { 1232 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 1233 1234 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 1235 } 1236 mutex_unlock(&mgr->qlock); 1237 1238 drm_dp_mst_kick_tx(mgr); 1239 return ret; 1240 } 1241 1242 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 1243 { 1244 struct drm_dp_mst_branch *mstb; 1245 1246 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 1247 if (!mstb) 1248 return NULL; 1249 1250 mstb->lct = lct; 1251 if (lct > 1) 1252 memcpy(mstb->rad, rad, lct / 2); 1253 INIT_LIST_HEAD(&mstb->ports); 1254 kref_init(&mstb->topology_kref); 1255 kref_init(&mstb->malloc_kref); 1256 return mstb; 1257 } 1258 1259 static void drm_dp_free_mst_branch_device(struct kref *kref) 1260 { 1261 struct drm_dp_mst_branch *mstb = 1262 container_of(kref, struct drm_dp_mst_branch, malloc_kref); 1263 1264 if (mstb->port_parent) 1265 drm_dp_mst_put_port_malloc(mstb->port_parent); 1266 1267 kfree(mstb); 1268 } 1269 1270 /** 1271 * DOC: Branch device and port refcounting 1272 * 1273 * Topology refcount overview 1274 * ~~~~~~~~~~~~~~~~~~~~~~~~~~ 1275 * 1276 * The refcounting schemes for &struct drm_dp_mst_branch and &struct 1277 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have 1278 * two different kinds of refcounts: topology refcounts, and malloc refcounts. 1279 * 1280 * Topology refcounts are not exposed to drivers, and are handled internally 1281 * by the DP MST helpers. The helpers use them in order to prevent the 1282 * in-memory topology state from being changed in the middle of critical 1283 * operations like changing the internal state of payload allocations. This 1284 * means each branch and port will be considered to be connected to the rest 1285 * of the topology until its topology refcount reaches zero. Additionally, 1286 * for ports this means that their associated &struct drm_connector will stay 1287 * registered with userspace until the port's refcount reaches 0. 1288 * 1289 * Malloc refcount overview 1290 * ~~~~~~~~~~~~~~~~~~~~~~~~ 1291 * 1292 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct 1293 * drm_dp_mst_branch allocated even after all of its topology references have 1294 * been dropped, so that the driver or MST helpers can safely access each 1295 * branch's last known state before it was disconnected from the topology. 1296 * When the malloc refcount of a port or branch reaches 0, the memory 1297 * allocation containing the &struct drm_dp_mst_branch or &struct 1298 * drm_dp_mst_port respectively will be freed. 1299 * 1300 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed 1301 * to drivers. As of writing this documentation, there are no drivers that 1302 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST 1303 * helpers. Exposing this API to drivers in a race-free manner would take more 1304 * tweaking of the refcounting scheme, however patches are welcome provided 1305 * there is a legitimate driver usecase for this. 1306 * 1307 * Refcount relationships in a topology 1308 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1309 * 1310 * Let's take a look at why the relationship between topology and malloc 1311 * refcounts is designed the way it is. 1312 * 1313 * .. kernel-figure:: dp-mst/topology-figure-1.dot 1314 * 1315 * An example of topology and malloc refs in a DP MST topology with two 1316 * active payloads. Topology refcount increments are indicated by solid 1317 * lines, and malloc refcount increments are indicated by dashed lines. 1318 * Each starts from the branch which incremented the refcount, and ends at 1319 * the branch to which the refcount belongs to, i.e. the arrow points the 1320 * same way as the C pointers used to reference a structure. 1321 * 1322 * As you can see in the above figure, every branch increments the topology 1323 * refcount of its children, and increments the malloc refcount of its 1324 * parent. Additionally, every payload increments the malloc refcount of its 1325 * assigned port by 1. 1326 * 1327 * So, what would happen if MSTB #3 from the above figure was unplugged from 1328 * the system, but the driver hadn't yet removed payload #2 from port #3? The 1329 * topology would start to look like the figure below. 1330 * 1331 * .. kernel-figure:: dp-mst/topology-figure-2.dot 1332 * 1333 * Ports and branch devices which have been released from memory are 1334 * colored grey, and references which have been removed are colored red. 1335 * 1336 * Whenever a port or branch device's topology refcount reaches zero, it will 1337 * decrement the topology refcounts of all its children, the malloc refcount 1338 * of its parent, and finally its own malloc refcount. For MSTB #4 and port 1339 * #4, this means they both have been disconnected from the topology and freed 1340 * from memory. But, because payload #2 is still holding a reference to port 1341 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port 1342 * is still accessible from memory. This also means port #3 has not yet 1343 * decremented the malloc refcount of MSTB #3, so its &struct 1344 * drm_dp_mst_branch will also stay allocated in memory until port #3's 1345 * malloc refcount reaches 0. 1346 * 1347 * This relationship is necessary because in order to release payload #2, we 1348 * need to be able to figure out the last relative of port #3 that's still 1349 * connected to the topology. In this case, we would travel up the topology as 1350 * shown below. 1351 * 1352 * .. kernel-figure:: dp-mst/topology-figure-3.dot 1353 * 1354 * And finally, remove payload #2 by communicating with port #2 through 1355 * sideband transactions. 1356 */ 1357 1358 /** 1359 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch 1360 * device 1361 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of 1362 * 1363 * Increments &drm_dp_mst_branch.malloc_kref. When 1364 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1365 * will be released and @mstb may no longer be used. 1366 * 1367 * See also: drm_dp_mst_put_mstb_malloc() 1368 */ 1369 static void 1370 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb) 1371 { 1372 kref_get(&mstb->malloc_kref); 1373 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref)); 1374 } 1375 1376 /** 1377 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch 1378 * device 1379 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of 1380 * 1381 * Decrements &drm_dp_mst_branch.malloc_kref. When 1382 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb 1383 * will be released and @mstb may no longer be used. 1384 * 1385 * See also: drm_dp_mst_get_mstb_malloc() 1386 */ 1387 static void 1388 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb) 1389 { 1390 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1); 1391 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device); 1392 } 1393 1394 static void drm_dp_free_mst_port(struct kref *kref) 1395 { 1396 struct drm_dp_mst_port *port = 1397 container_of(kref, struct drm_dp_mst_port, malloc_kref); 1398 1399 drm_dp_mst_put_mstb_malloc(port->parent); 1400 kfree(port); 1401 } 1402 1403 /** 1404 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port 1405 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of 1406 * 1407 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1408 * reaches 0, the memory allocation for @port will be released and @port may 1409 * no longer be used. 1410 * 1411 * Because @port could potentially be freed at any time by the DP MST helpers 1412 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this 1413 * function, drivers that which to make use of &struct drm_dp_mst_port should 1414 * ensure that they grab at least one main malloc reference to their MST ports 1415 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before 1416 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0. 1417 * 1418 * See also: drm_dp_mst_put_port_malloc() 1419 */ 1420 void 1421 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port) 1422 { 1423 kref_get(&port->malloc_kref); 1424 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref)); 1425 } 1426 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc); 1427 1428 /** 1429 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port 1430 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of 1431 * 1432 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref 1433 * reaches 0, the memory allocation for @port will be released and @port may 1434 * no longer be used. 1435 * 1436 * See also: drm_dp_mst_get_port_malloc() 1437 */ 1438 void 1439 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) 1440 { 1441 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1); 1442 kref_put(&port->malloc_kref, drm_dp_free_mst_port); 1443 } 1444 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); 1445 1446 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 1447 1448 #define STACK_DEPTH 8 1449 1450 static noinline void 1451 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, 1452 struct drm_dp_mst_topology_ref_history *history, 1453 enum drm_dp_mst_topology_ref_type type) 1454 { 1455 struct drm_dp_mst_topology_ref_entry *entry = NULL; 1456 depot_stack_handle_t backtrace; 1457 ulong stack_entries[STACK_DEPTH]; 1458 uint n; 1459 int i; 1460 1461 n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); 1462 backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); 1463 if (!backtrace) 1464 return; 1465 1466 /* Try to find an existing entry for this backtrace */ 1467 for (i = 0; i < history->len; i++) { 1468 if (history->entries[i].backtrace == backtrace) { 1469 entry = &history->entries[i]; 1470 break; 1471 } 1472 } 1473 1474 /* Otherwise add one */ 1475 if (!entry) { 1476 struct drm_dp_mst_topology_ref_entry *new; 1477 int new_len = history->len + 1; 1478 1479 new = krealloc(history->entries, sizeof(*new) * new_len, 1480 GFP_KERNEL); 1481 if (!new) 1482 return; 1483 1484 entry = &new[history->len]; 1485 history->len = new_len; 1486 history->entries = new; 1487 1488 entry->backtrace = backtrace; 1489 entry->type = type; 1490 entry->count = 0; 1491 } 1492 entry->count++; 1493 entry->ts_nsec = ktime_get_ns(); 1494 } 1495 1496 static int 1497 topology_ref_history_cmp(const void *a, const void *b) 1498 { 1499 const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; 1500 1501 if (entry_a->ts_nsec > entry_b->ts_nsec) 1502 return 1; 1503 else if (entry_a->ts_nsec < entry_b->ts_nsec) 1504 return -1; 1505 else 1506 return 0; 1507 } 1508 1509 static inline const char * 1510 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) 1511 { 1512 if (type == DRM_DP_MST_TOPOLOGY_REF_GET) 1513 return "get"; 1514 else 1515 return "put"; 1516 } 1517 1518 static void 1519 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, 1520 void *ptr, const char *type_str) 1521 { 1522 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 1523 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 1524 int i; 1525 1526 if (!buf) 1527 return; 1528 1529 if (!history->len) 1530 goto out; 1531 1532 /* First, sort the list so that it goes from oldest to newest 1533 * reference entry 1534 */ 1535 sort(history->entries, history->len, sizeof(*history->entries), 1536 topology_ref_history_cmp, NULL); 1537 1538 drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", 1539 type_str, ptr); 1540 1541 for (i = 0; i < history->len; i++) { 1542 const struct drm_dp_mst_topology_ref_entry *entry = 1543 &history->entries[i]; 1544 ulong *entries; 1545 uint nr_entries; 1546 u64 ts_nsec = entry->ts_nsec; 1547 u32 rem_nsec = do_div(ts_nsec, 1000000000); 1548 1549 nr_entries = stack_depot_fetch(entry->backtrace, &entries); 1550 stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); 1551 1552 drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", 1553 entry->count, 1554 topology_ref_type_to_str(entry->type), 1555 ts_nsec, rem_nsec / 1000, buf); 1556 } 1557 1558 /* Now free the history, since this is the only time we expose it */ 1559 kfree(history->entries); 1560 out: 1561 kfree(buf); 1562 } 1563 1564 static __always_inline void 1565 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) 1566 { 1567 __dump_topology_ref_history(&mstb->topology_ref_history, mstb, 1568 "MSTB"); 1569 } 1570 1571 static __always_inline void 1572 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) 1573 { 1574 __dump_topology_ref_history(&port->topology_ref_history, port, 1575 "Port"); 1576 } 1577 1578 static __always_inline void 1579 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, 1580 enum drm_dp_mst_topology_ref_type type) 1581 { 1582 __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); 1583 } 1584 1585 static __always_inline void 1586 save_port_topology_ref(struct drm_dp_mst_port *port, 1587 enum drm_dp_mst_topology_ref_type type) 1588 { 1589 __topology_ref_save(port->mgr, &port->topology_ref_history, type); 1590 } 1591 1592 static inline void 1593 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) 1594 { 1595 mutex_lock(&mgr->topology_ref_history_lock); 1596 } 1597 1598 static inline void 1599 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) 1600 { 1601 mutex_unlock(&mgr->topology_ref_history_lock); 1602 } 1603 #else 1604 static inline void 1605 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} 1606 static inline void 1607 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} 1608 static inline void 1609 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} 1610 static inline void 1611 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} 1612 #define save_mstb_topology_ref(mstb, type) 1613 #define save_port_topology_ref(port, type) 1614 #endif 1615 1616 static void drm_dp_destroy_mst_branch_device(struct kref *kref) 1617 { 1618 struct drm_dp_mst_branch *mstb = 1619 container_of(kref, struct drm_dp_mst_branch, topology_kref); 1620 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 1621 1622 drm_dp_mst_dump_mstb_topology_history(mstb); 1623 1624 INIT_LIST_HEAD(&mstb->destroy_next); 1625 1626 /* 1627 * This can get called under mgr->mutex, so we need to perform the 1628 * actual destruction of the mstb in another worker 1629 */ 1630 mutex_lock(&mgr->delayed_destroy_lock); 1631 list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); 1632 mutex_unlock(&mgr->delayed_destroy_lock); 1633 schedule_work(&mgr->delayed_destroy_work); 1634 } 1635 1636 /** 1637 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a 1638 * branch device unless it's zero 1639 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of 1640 * 1641 * Attempts to grab a topology reference to @mstb, if it hasn't yet been 1642 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has 1643 * reached 0). Holding a topology reference implies that a malloc reference 1644 * will be held to @mstb as long as the user holds the topology reference. 1645 * 1646 * Care should be taken to ensure that the user has at least one malloc 1647 * reference to @mstb. If you already have a topology reference to @mstb, you 1648 * should use drm_dp_mst_topology_get_mstb() instead. 1649 * 1650 * See also: 1651 * drm_dp_mst_topology_get_mstb() 1652 * drm_dp_mst_topology_put_mstb() 1653 * 1654 * Returns: 1655 * * 1: A topology reference was grabbed successfully 1656 * * 0: @port is no longer in the topology, no reference was grabbed 1657 */ 1658 static int __must_check 1659 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) 1660 { 1661 int ret; 1662 1663 topology_ref_history_lock(mstb->mgr); 1664 ret = kref_get_unless_zero(&mstb->topology_kref); 1665 if (ret) { 1666 DRM_DEBUG("mstb %p (%d)\n", 1667 mstb, kref_read(&mstb->topology_kref)); 1668 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1669 } 1670 1671 topology_ref_history_unlock(mstb->mgr); 1672 1673 return ret; 1674 } 1675 1676 /** 1677 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a 1678 * branch device 1679 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of 1680 * 1681 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or 1682 * not it's already reached 0. This is only valid to use in scenarios where 1683 * you are already guaranteed to have at least one active topology reference 1684 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used. 1685 * 1686 * See also: 1687 * drm_dp_mst_topology_try_get_mstb() 1688 * drm_dp_mst_topology_put_mstb() 1689 */ 1690 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) 1691 { 1692 topology_ref_history_lock(mstb->mgr); 1693 1694 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); 1695 WARN_ON(kref_read(&mstb->topology_kref) == 0); 1696 kref_get(&mstb->topology_kref); 1697 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); 1698 1699 topology_ref_history_unlock(mstb->mgr); 1700 } 1701 1702 /** 1703 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch 1704 * device 1705 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from 1706 * 1707 * Releases a topology reference from @mstb by decrementing 1708 * &drm_dp_mst_branch.topology_kref. 1709 * 1710 * See also: 1711 * drm_dp_mst_topology_try_get_mstb() 1712 * drm_dp_mst_topology_get_mstb() 1713 */ 1714 static void 1715 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) 1716 { 1717 topology_ref_history_lock(mstb->mgr); 1718 1719 DRM_DEBUG("mstb %p (%d)\n", 1720 mstb, kref_read(&mstb->topology_kref) - 1); 1721 save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); 1722 1723 topology_ref_history_unlock(mstb->mgr); 1724 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); 1725 } 1726 1727 static void drm_dp_destroy_port(struct kref *kref) 1728 { 1729 struct drm_dp_mst_port *port = 1730 container_of(kref, struct drm_dp_mst_port, topology_kref); 1731 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1732 1733 drm_dp_mst_dump_port_topology_history(port); 1734 1735 /* There's nothing that needs locking to destroy an input port yet */ 1736 if (port->input) { 1737 drm_dp_mst_put_port_malloc(port); 1738 return; 1739 } 1740 1741 kfree(port->cached_edid); 1742 1743 /* 1744 * we can't destroy the connector here, as we might be holding the 1745 * mode_config.mutex from an EDID retrieval 1746 */ 1747 mutex_lock(&mgr->delayed_destroy_lock); 1748 list_add(&port->next, &mgr->destroy_port_list); 1749 mutex_unlock(&mgr->delayed_destroy_lock); 1750 schedule_work(&mgr->delayed_destroy_work); 1751 } 1752 1753 /** 1754 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a 1755 * port unless it's zero 1756 * @port: &struct drm_dp_mst_port to increment the topology refcount of 1757 * 1758 * Attempts to grab a topology reference to @port, if it hasn't yet been 1759 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached 1760 * 0). Holding a topology reference implies that a malloc reference will be 1761 * held to @port as long as the user holds the topology reference. 1762 * 1763 * Care should be taken to ensure that the user has at least one malloc 1764 * reference to @port. If you already have a topology reference to @port, you 1765 * should use drm_dp_mst_topology_get_port() instead. 1766 * 1767 * See also: 1768 * drm_dp_mst_topology_get_port() 1769 * drm_dp_mst_topology_put_port() 1770 * 1771 * Returns: 1772 * * 1: A topology reference was grabbed successfully 1773 * * 0: @port is no longer in the topology, no reference was grabbed 1774 */ 1775 static int __must_check 1776 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) 1777 { 1778 int ret; 1779 1780 topology_ref_history_lock(port->mgr); 1781 ret = kref_get_unless_zero(&port->topology_kref); 1782 if (ret) { 1783 DRM_DEBUG("port %p (%d)\n", 1784 port, kref_read(&port->topology_kref)); 1785 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1786 } 1787 1788 topology_ref_history_unlock(port->mgr); 1789 return ret; 1790 } 1791 1792 /** 1793 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port 1794 * @port: The &struct drm_dp_mst_port to increment the topology refcount of 1795 * 1796 * Increments &drm_dp_mst_port.topology_refcount without checking whether or 1797 * not it's already reached 0. This is only valid to use in scenarios where 1798 * you are already guaranteed to have at least one active topology reference 1799 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used. 1800 * 1801 * See also: 1802 * drm_dp_mst_topology_try_get_port() 1803 * drm_dp_mst_topology_put_port() 1804 */ 1805 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) 1806 { 1807 topology_ref_history_lock(port->mgr); 1808 1809 WARN_ON(kref_read(&port->topology_kref) == 0); 1810 kref_get(&port->topology_kref); 1811 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); 1812 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); 1813 1814 topology_ref_history_unlock(port->mgr); 1815 } 1816 1817 /** 1818 * drm_dp_mst_topology_put_port() - release a topology reference to a port 1819 * @port: The &struct drm_dp_mst_port to release the topology reference from 1820 * 1821 * Releases a topology reference from @port by decrementing 1822 * &drm_dp_mst_port.topology_kref. 1823 * 1824 * See also: 1825 * drm_dp_mst_topology_try_get_port() 1826 * drm_dp_mst_topology_get_port() 1827 */ 1828 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) 1829 { 1830 topology_ref_history_lock(port->mgr); 1831 1832 DRM_DEBUG("port %p (%d)\n", 1833 port, kref_read(&port->topology_kref) - 1); 1834 save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); 1835 1836 topology_ref_history_unlock(port->mgr); 1837 kref_put(&port->topology_kref, drm_dp_destroy_port); 1838 } 1839 1840 static struct drm_dp_mst_branch * 1841 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb, 1842 struct drm_dp_mst_branch *to_find) 1843 { 1844 struct drm_dp_mst_port *port; 1845 struct drm_dp_mst_branch *rmstb; 1846 1847 if (to_find == mstb) 1848 return mstb; 1849 1850 list_for_each_entry(port, &mstb->ports, next) { 1851 if (port->mstb) { 1852 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1853 port->mstb, to_find); 1854 if (rmstb) 1855 return rmstb; 1856 } 1857 } 1858 return NULL; 1859 } 1860 1861 static struct drm_dp_mst_branch * 1862 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr, 1863 struct drm_dp_mst_branch *mstb) 1864 { 1865 struct drm_dp_mst_branch *rmstb = NULL; 1866 1867 mutex_lock(&mgr->lock); 1868 if (mgr->mst_primary) { 1869 rmstb = drm_dp_mst_topology_get_mstb_validated_locked( 1870 mgr->mst_primary, mstb); 1871 1872 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb)) 1873 rmstb = NULL; 1874 } 1875 mutex_unlock(&mgr->lock); 1876 return rmstb; 1877 } 1878 1879 static struct drm_dp_mst_port * 1880 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb, 1881 struct drm_dp_mst_port *to_find) 1882 { 1883 struct drm_dp_mst_port *port, *mport; 1884 1885 list_for_each_entry(port, &mstb->ports, next) { 1886 if (port == to_find) 1887 return port; 1888 1889 if (port->mstb) { 1890 mport = drm_dp_mst_topology_get_port_validated_locked( 1891 port->mstb, to_find); 1892 if (mport) 1893 return mport; 1894 } 1895 } 1896 return NULL; 1897 } 1898 1899 static struct drm_dp_mst_port * 1900 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr, 1901 struct drm_dp_mst_port *port) 1902 { 1903 struct drm_dp_mst_port *rport = NULL; 1904 1905 mutex_lock(&mgr->lock); 1906 if (mgr->mst_primary) { 1907 rport = drm_dp_mst_topology_get_port_validated_locked( 1908 mgr->mst_primary, port); 1909 1910 if (rport && !drm_dp_mst_topology_try_get_port(rport)) 1911 rport = NULL; 1912 } 1913 mutex_unlock(&mgr->lock); 1914 return rport; 1915 } 1916 1917 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 1918 { 1919 struct drm_dp_mst_port *port; 1920 int ret; 1921 1922 list_for_each_entry(port, &mstb->ports, next) { 1923 if (port->port_num == port_num) { 1924 ret = drm_dp_mst_topology_try_get_port(port); 1925 return ret ? port : NULL; 1926 } 1927 } 1928 1929 return NULL; 1930 } 1931 1932 /* 1933 * calculate a new RAD for this MST branch device 1934 * if parent has an LCT of 2 then it has 1 nibble of RAD, 1935 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 1936 */ 1937 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 1938 u8 *rad) 1939 { 1940 int parent_lct = port->parent->lct; 1941 int shift = 4; 1942 int idx = (parent_lct - 1) / 2; 1943 if (parent_lct > 1) { 1944 memcpy(rad, port->parent->rad, idx + 1); 1945 shift = (parent_lct % 2) ? 4 : 0; 1946 } else 1947 rad[0] = 0; 1948 1949 rad[idx] |= port->port_num << shift; 1950 return parent_lct + 1; 1951 } 1952 1953 static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) 1954 { 1955 switch (pdt) { 1956 case DP_PEER_DEVICE_DP_LEGACY_CONV: 1957 case DP_PEER_DEVICE_SST_SINK: 1958 return true; 1959 case DP_PEER_DEVICE_MST_BRANCHING: 1960 /* For sst branch device */ 1961 if (!mcs) 1962 return true; 1963 1964 return false; 1965 } 1966 return true; 1967 } 1968 1969 static int 1970 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, 1971 bool new_mcs) 1972 { 1973 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1974 struct drm_dp_mst_branch *mstb; 1975 u8 rad[8], lct; 1976 int ret = 0; 1977 1978 if (port->pdt == new_pdt && port->mcs == new_mcs) 1979 return 0; 1980 1981 /* Teardown the old pdt, if there is one */ 1982 if (port->pdt != DP_PEER_DEVICE_NONE) { 1983 if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 1984 /* 1985 * If the new PDT would also have an i2c bus, 1986 * don't bother with reregistering it 1987 */ 1988 if (new_pdt != DP_PEER_DEVICE_NONE && 1989 drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { 1990 port->pdt = new_pdt; 1991 port->mcs = new_mcs; 1992 return 0; 1993 } 1994 1995 /* remove i2c over sideband */ 1996 drm_dp_mst_unregister_i2c_bus(&port->aux); 1997 } else { 1998 mutex_lock(&mgr->lock); 1999 drm_dp_mst_topology_put_mstb(port->mstb); 2000 port->mstb = NULL; 2001 mutex_unlock(&mgr->lock); 2002 } 2003 } 2004 2005 port->pdt = new_pdt; 2006 port->mcs = new_mcs; 2007 2008 if (port->pdt != DP_PEER_DEVICE_NONE) { 2009 if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 2010 /* add i2c over sideband */ 2011 ret = drm_dp_mst_register_i2c_bus(&port->aux); 2012 } else { 2013 lct = drm_dp_calculate_rad(port, rad); 2014 mstb = drm_dp_add_mst_branch_device(lct, rad); 2015 if (!mstb) { 2016 ret = -ENOMEM; 2017 DRM_ERROR("Failed to create MSTB for port %p", 2018 port); 2019 goto out; 2020 } 2021 2022 mutex_lock(&mgr->lock); 2023 port->mstb = mstb; 2024 mstb->mgr = port->mgr; 2025 mstb->port_parent = port; 2026 2027 /* 2028 * Make sure this port's memory allocation stays 2029 * around until its child MSTB releases it 2030 */ 2031 drm_dp_mst_get_port_malloc(port); 2032 mutex_unlock(&mgr->lock); 2033 2034 /* And make sure we send a link address for this */ 2035 ret = 1; 2036 } 2037 } 2038 2039 out: 2040 if (ret < 0) 2041 port->pdt = DP_PEER_DEVICE_NONE; 2042 return ret; 2043 } 2044 2045 /** 2046 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband 2047 * @aux: Fake sideband AUX CH 2048 * @offset: address of the (first) register to read 2049 * @buffer: buffer to store the register values 2050 * @size: number of bytes in @buffer 2051 * 2052 * Performs the same functionality for remote devices via 2053 * sideband messaging as drm_dp_dpcd_read() does for local 2054 * devices via actual AUX CH. 2055 * 2056 * Return: Number of bytes read, or negative error code on failure. 2057 */ 2058 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, 2059 unsigned int offset, void *buffer, size_t size) 2060 { 2061 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2062 aux); 2063 2064 return drm_dp_send_dpcd_read(port->mgr, port, 2065 offset, size, buffer); 2066 } 2067 2068 /** 2069 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband 2070 * @aux: Fake sideband AUX CH 2071 * @offset: address of the (first) register to write 2072 * @buffer: buffer containing the values to write 2073 * @size: number of bytes in @buffer 2074 * 2075 * Performs the same functionality for remote devices via 2076 * sideband messaging as drm_dp_dpcd_write() does for local 2077 * devices via actual AUX CH. 2078 * 2079 * Return: 0 on success, negative error code on failure. 2080 */ 2081 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, 2082 unsigned int offset, void *buffer, size_t size) 2083 { 2084 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, 2085 aux); 2086 2087 return drm_dp_send_dpcd_write(port->mgr, port, 2088 offset, size, buffer); 2089 } 2090 2091 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) 2092 { 2093 int ret __unused; 2094 2095 memcpy(mstb->guid, guid, 16); 2096 2097 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { 2098 if (mstb->port_parent) { 2099 ret = drm_dp_send_dpcd_write( 2100 mstb->mgr, 2101 mstb->port_parent, 2102 DP_GUID, 2103 16, 2104 mstb->guid); 2105 } else { 2106 2107 ret = drm_dp_dpcd_write( 2108 mstb->mgr->aux, 2109 DP_GUID, 2110 mstb->guid, 2111 16); 2112 } 2113 } 2114 } 2115 2116 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, 2117 int pnum, 2118 char *proppath, 2119 size_t proppath_size) 2120 { 2121 int i; 2122 char temp[8]; 2123 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 2124 for (i = 0; i < (mstb->lct - 1); i++) { 2125 int shift = (i % 2) ? 0 : 4; 2126 int port_num = (mstb->rad[i / 2] >> shift) & 0xf; 2127 snprintf(temp, sizeof(temp), "-%d", port_num); 2128 strlcat(proppath, temp, proppath_size); 2129 } 2130 snprintf(temp, sizeof(temp), "-%d", pnum); 2131 strlcat(proppath, temp, proppath_size); 2132 } 2133 2134 /** 2135 * drm_dp_mst_connector_late_register() - Late MST connector registration 2136 * @connector: The MST connector 2137 * @port: The MST port for this connector 2138 * 2139 * Helper to register the remote aux device for this MST port. Drivers should 2140 * call this from their mst connector's late_register hook to enable MST aux 2141 * devices. 2142 * 2143 * Return: 0 on success, negative error code on failure. 2144 */ 2145 int drm_dp_mst_connector_late_register(struct drm_connector *connector, 2146 struct drm_dp_mst_port *port) 2147 { 2148 DRM_DEBUG_KMS("registering %s remote bus for %s\n", 2149 port->aux.name, device_xname(connector->dev->dev)); 2150 2151 port->aux.dev = connector->kdev; 2152 return drm_dp_aux_register_devnode(&port->aux); 2153 } 2154 EXPORT_SYMBOL(drm_dp_mst_connector_late_register); 2155 2156 /** 2157 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration 2158 * @connector: The MST connector 2159 * @port: The MST port for this connector 2160 * 2161 * Helper to unregister the remote aux device for this MST port, registered by 2162 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst 2163 * connector's early_unregister hook. 2164 */ 2165 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, 2166 struct drm_dp_mst_port *port) 2167 { 2168 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n", 2169 port->aux.name, device_xname(connector->dev->dev)); 2170 drm_dp_aux_unregister_devnode(&port->aux); 2171 } 2172 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); 2173 2174 static void 2175 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, 2176 struct drm_dp_mst_port *port) 2177 { 2178 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2179 char proppath[255]; 2180 int ret; 2181 2182 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 2183 port->connector = mgr->cbs->add_connector(mgr, port, proppath); 2184 if (!port->connector) { 2185 ret = -ENOMEM; 2186 goto error; 2187 } 2188 2189 if (port->pdt != DP_PEER_DEVICE_NONE && 2190 drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { 2191 port->cached_edid = drm_get_edid(port->connector, 2192 &port->aux.ddc); 2193 drm_connector_set_tile_property(port->connector); 2194 } 2195 2196 mgr->cbs->register_connector(port->connector); 2197 return; 2198 2199 error: 2200 DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); 2201 } 2202 2203 /* 2204 * Drop a topology reference, and unlink the port from the in-memory topology 2205 * layout 2206 */ 2207 static void 2208 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, 2209 struct drm_dp_mst_port *port) 2210 { 2211 mutex_lock(&mgr->lock); 2212 port->parent->num_ports--; 2213 list_del(&port->next); 2214 mutex_unlock(&mgr->lock); 2215 drm_dp_mst_topology_put_port(port); 2216 } 2217 2218 static struct drm_dp_mst_port * 2219 drm_dp_mst_add_port(struct drm_device *dev, 2220 struct drm_dp_mst_topology_mgr *mgr, 2221 struct drm_dp_mst_branch *mstb, u8 port_number) 2222 { 2223 struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); 2224 2225 if (!port) 2226 return NULL; 2227 2228 kref_init(&port->topology_kref); 2229 kref_init(&port->malloc_kref); 2230 port->parent = mstb; 2231 port->port_num = port_number; 2232 port->mgr = mgr; 2233 port->aux.name = "DPMST"; 2234 port->aux.dev = dev->dev; 2235 port->aux.is_remote = true; 2236 2237 /* initialize the MST downstream port's AUX crc work queue */ 2238 drm_dp_remote_aux_init(&port->aux); 2239 2240 /* 2241 * Make sure the memory allocation for our parent branch stays 2242 * around until our own memory allocation is released 2243 */ 2244 drm_dp_mst_get_mstb_malloc(mstb); 2245 2246 return port; 2247 } 2248 2249 static int 2250 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, 2251 struct drm_device *dev, 2252 struct drm_dp_link_addr_reply_port *port_msg) 2253 { 2254 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2255 struct drm_dp_mst_port *port; 2256 int old_ddps = 0, ret; 2257 u8 new_pdt = DP_PEER_DEVICE_NONE; 2258 bool new_mcs = 0; 2259 bool created = false, send_link_addr = false, changed = false; 2260 2261 port = drm_dp_get_port(mstb, port_msg->port_number); 2262 if (!port) { 2263 port = drm_dp_mst_add_port(dev, mgr, mstb, 2264 port_msg->port_number); 2265 if (!port) 2266 return -ENOMEM; 2267 created = true; 2268 changed = true; 2269 } else if (!port->input && port_msg->input_port && port->connector) { 2270 /* Since port->connector can't be changed here, we create a 2271 * new port if input_port changes from 0 to 1 2272 */ 2273 drm_dp_mst_topology_unlink_port(mgr, port); 2274 drm_dp_mst_topology_put_port(port); 2275 port = drm_dp_mst_add_port(dev, mgr, mstb, 2276 port_msg->port_number); 2277 if (!port) 2278 return -ENOMEM; 2279 changed = true; 2280 created = true; 2281 } else if (port->input && !port_msg->input_port) { 2282 changed = true; 2283 } else if (port->connector) { 2284 /* We're updating a port that's exposed to userspace, so do it 2285 * under lock 2286 */ 2287 drm_modeset_lock(&mgr->base.lock, NULL); 2288 2289 old_ddps = port->ddps; 2290 changed = port->ddps != port_msg->ddps || 2291 (port->ddps && 2292 (port->ldps != port_msg->legacy_device_plug_status || 2293 port->dpcd_rev != port_msg->dpcd_revision || 2294 port->mcs != port_msg->mcs || 2295 port->pdt != port_msg->peer_device_type || 2296 port->num_sdp_stream_sinks != 2297 port_msg->num_sdp_stream_sinks)); 2298 } 2299 2300 port->input = port_msg->input_port; 2301 if (!port->input) 2302 new_pdt = port_msg->peer_device_type; 2303 new_mcs = port_msg->mcs; 2304 port->ddps = port_msg->ddps; 2305 port->ldps = port_msg->legacy_device_plug_status; 2306 port->dpcd_rev = port_msg->dpcd_revision; 2307 port->num_sdp_streams = port_msg->num_sdp_streams; 2308 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 2309 2310 /* manage mstb port lists with mgr lock - take a reference 2311 for this list */ 2312 if (created) { 2313 mutex_lock(&mgr->lock); 2314 drm_dp_mst_topology_get_port(port); 2315 list_add(&port->next, &mstb->ports); 2316 mstb->num_ports++; 2317 mutex_unlock(&mgr->lock); 2318 } 2319 2320 if (old_ddps != port->ddps) { 2321 if (port->ddps) { 2322 if (!port->input) { 2323 drm_dp_send_enum_path_resources(mgr, mstb, 2324 port); 2325 } 2326 } else { 2327 port->available_pbn = 0; 2328 } 2329 } 2330 2331 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2332 if (ret == 1) { 2333 send_link_addr = true; 2334 } else if (ret < 0) { 2335 DRM_ERROR("Failed to change PDT on port %p: %d\n", 2336 port, ret); 2337 goto fail; 2338 } 2339 2340 /* 2341 * If this port wasn't just created, then we're reprobing because 2342 * we're coming out of suspend. In this case, always resend the link 2343 * address if there's an MSTB on this port 2344 */ 2345 if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 2346 port->mcs) 2347 send_link_addr = true; 2348 2349 if (port->connector) 2350 drm_modeset_unlock(&mgr->base.lock); 2351 else if (!port->input) 2352 drm_dp_mst_port_add_connector(mstb, port); 2353 2354 if (send_link_addr && port->mstb) { 2355 ret = drm_dp_send_link_address(mgr, port->mstb); 2356 if (ret == 1) /* MSTB below us changed */ 2357 changed = true; 2358 else if (ret < 0) 2359 goto fail_put; 2360 } 2361 2362 /* put reference to this port */ 2363 drm_dp_mst_topology_put_port(port); 2364 return changed; 2365 2366 fail: 2367 drm_dp_mst_topology_unlink_port(mgr, port); 2368 if (port->connector) 2369 drm_modeset_unlock(&mgr->base.lock); 2370 fail_put: 2371 drm_dp_mst_topology_put_port(port); 2372 return ret; 2373 } 2374 2375 static void 2376 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, 2377 struct drm_dp_connection_status_notify *conn_stat) 2378 { 2379 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2380 struct drm_dp_mst_port *port; 2381 int old_ddps, old_input, ret, i; 2382 u8 new_pdt; 2383 bool new_mcs; 2384 bool dowork = false, create_connector = false; 2385 2386 port = drm_dp_get_port(mstb, conn_stat->port_number); 2387 if (!port) 2388 return; 2389 2390 if (port->connector) { 2391 if (!port->input && conn_stat->input_port) { 2392 /* 2393 * We can't remove a connector from an already exposed 2394 * port, so just throw the port out and make sure we 2395 * reprobe the link address of it's parent MSTB 2396 */ 2397 drm_dp_mst_topology_unlink_port(mgr, port); 2398 mstb->link_address_sent = false; 2399 dowork = true; 2400 goto out; 2401 } 2402 2403 /* Locking is only needed if the port's exposed to userspace */ 2404 drm_modeset_lock(&mgr->base.lock, NULL); 2405 } else if (port->input && !conn_stat->input_port) { 2406 create_connector = true; 2407 /* Reprobe link address so we get num_sdp_streams */ 2408 mstb->link_address_sent = false; 2409 dowork = true; 2410 } 2411 2412 old_ddps = port->ddps; 2413 old_input = port->input; 2414 port->input = conn_stat->input_port; 2415 port->ldps = conn_stat->legacy_device_plug_status; 2416 port->ddps = conn_stat->displayport_device_plug_status; 2417 2418 if (old_ddps != port->ddps) { 2419 if (port->ddps) { 2420 dowork = true; 2421 } else { 2422 port->available_pbn = 0; 2423 } 2424 } 2425 2426 new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; 2427 new_mcs = conn_stat->message_capability_status; 2428 ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); 2429 if (ret == 1) { 2430 dowork = true; 2431 } else if (ret < 0) { 2432 DRM_ERROR("Failed to change PDT for port %p: %d\n", 2433 port, ret); 2434 dowork = false; 2435 } 2436 2437 if (!old_input && old_ddps != port->ddps && !port->ddps) { 2438 for (i = 0; i < mgr->max_payloads; i++) { 2439 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 2440 struct drm_dp_mst_port *port_validated; 2441 2442 if (!vcpi) 2443 continue; 2444 2445 port_validated = 2446 container_of(vcpi, struct drm_dp_mst_port, vcpi); 2447 port_validated = 2448 drm_dp_mst_topology_get_port_validated(mgr, port_validated); 2449 if (!port_validated) { 2450 mutex_lock(&mgr->payload_lock); 2451 vcpi->num_slots = 0; 2452 mutex_unlock(&mgr->payload_lock); 2453 } else { 2454 drm_dp_mst_topology_put_port(port_validated); 2455 } 2456 } 2457 } 2458 2459 if (port->connector) 2460 drm_modeset_unlock(&mgr->base.lock); 2461 else if (create_connector) 2462 drm_dp_mst_port_add_connector(mstb, port); 2463 2464 out: 2465 drm_dp_mst_topology_put_port(port); 2466 if (dowork) 2467 queue_work(system_long_wq, &mstb->mgr->work); 2468 } 2469 2470 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 2471 u8 lct, u8 *rad) 2472 { 2473 struct drm_dp_mst_branch *mstb; 2474 struct drm_dp_mst_port *port; 2475 int i, ret; 2476 /* find the port by iterating down */ 2477 2478 mutex_lock(&mgr->lock); 2479 mstb = mgr->mst_primary; 2480 2481 if (!mstb) 2482 goto out; 2483 2484 for (i = 0; i < lct - 1; i++) { 2485 int shift = (i % 2) ? 0 : 4; 2486 int port_num = (rad[i / 2] >> shift) & 0xf; 2487 2488 list_for_each_entry(port, &mstb->ports, next) { 2489 if (port->port_num == port_num) { 2490 mstb = port->mstb; 2491 if (!mstb) { 2492 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 2493 goto out; 2494 } 2495 2496 break; 2497 } 2498 } 2499 } 2500 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2501 if (!ret) 2502 mstb = NULL; 2503 out: 2504 mutex_unlock(&mgr->lock); 2505 return mstb; 2506 } 2507 2508 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( 2509 struct drm_dp_mst_branch *mstb, 2510 const uint8_t *guid) 2511 { 2512 struct drm_dp_mst_branch *found_mstb; 2513 struct drm_dp_mst_port *port; 2514 2515 if (memcmp(mstb->guid, guid, 16) == 0) 2516 return mstb; 2517 2518 2519 list_for_each_entry(port, &mstb->ports, next) { 2520 if (!port->mstb) 2521 continue; 2522 2523 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); 2524 2525 if (found_mstb) 2526 return found_mstb; 2527 } 2528 2529 return NULL; 2530 } 2531 2532 static struct drm_dp_mst_branch * 2533 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, 2534 const uint8_t *guid) 2535 { 2536 struct drm_dp_mst_branch *mstb; 2537 int ret; 2538 2539 /* find the port by iterating down */ 2540 mutex_lock(&mgr->lock); 2541 2542 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 2543 if (mstb) { 2544 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2545 if (!ret) 2546 mstb = NULL; 2547 } 2548 2549 mutex_unlock(&mgr->lock); 2550 return mstb; 2551 } 2552 2553 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2554 struct drm_dp_mst_branch *mstb) 2555 { 2556 struct drm_dp_mst_port *port; 2557 int ret; 2558 bool changed = false; 2559 2560 if (!mstb->link_address_sent) { 2561 ret = drm_dp_send_link_address(mgr, mstb); 2562 if (ret == 1) 2563 changed = true; 2564 else if (ret < 0) 2565 return ret; 2566 } 2567 2568 list_for_each_entry(port, &mstb->ports, next) { 2569 struct drm_dp_mst_branch *mstb_child = NULL; 2570 2571 if (port->input || !port->ddps) 2572 continue; 2573 2574 if (!port->available_pbn) { 2575 drm_modeset_lock(&mgr->base.lock, NULL); 2576 drm_dp_send_enum_path_resources(mgr, mstb, port); 2577 drm_modeset_unlock(&mgr->base.lock); 2578 changed = true; 2579 } 2580 2581 if (port->mstb) 2582 mstb_child = drm_dp_mst_topology_get_mstb_validated( 2583 mgr, port->mstb); 2584 2585 if (mstb_child) { 2586 ret = drm_dp_check_and_send_link_address(mgr, 2587 mstb_child); 2588 drm_dp_mst_topology_put_mstb(mstb_child); 2589 if (ret == 1) 2590 changed = true; 2591 else if (ret < 0) 2592 return ret; 2593 } 2594 } 2595 2596 return changed; 2597 } 2598 2599 static void drm_dp_mst_link_probe_work(struct work_struct *work) 2600 { 2601 struct drm_dp_mst_topology_mgr *mgr = 2602 container_of(work, struct drm_dp_mst_topology_mgr, work); 2603 struct drm_device *dev = mgr->dev; 2604 struct drm_dp_mst_branch *mstb; 2605 int ret; 2606 bool clear_payload_id_table; 2607 2608 mutex_lock(&mgr->probe_lock); 2609 2610 mutex_lock(&mgr->lock); 2611 clear_payload_id_table = !mgr->payload_id_table_cleared; 2612 mgr->payload_id_table_cleared = true; 2613 2614 mstb = mgr->mst_primary; 2615 if (mstb) { 2616 ret = drm_dp_mst_topology_try_get_mstb(mstb); 2617 if (!ret) 2618 mstb = NULL; 2619 } 2620 mutex_unlock(&mgr->lock); 2621 if (!mstb) { 2622 mutex_unlock(&mgr->probe_lock); 2623 return; 2624 } 2625 2626 /* 2627 * Certain branch devices seem to incorrectly report an available_pbn 2628 * of 0 on downstream sinks, even after clearing the 2629 * DP_PAYLOAD_ALLOCATE_* registers in 2630 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C 2631 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make 2632 * things work again. 2633 */ 2634 if (clear_payload_id_table) { 2635 DRM_DEBUG_KMS("Clearing payload ID table\n"); 2636 drm_dp_send_clear_payload_id_table(mgr, mstb); 2637 } 2638 2639 ret = drm_dp_check_and_send_link_address(mgr, mstb); 2640 drm_dp_mst_topology_put_mstb(mstb); 2641 2642 mutex_unlock(&mgr->probe_lock); 2643 if (ret) 2644 drm_kms_helper_hotplug_event(dev); 2645 } 2646 2647 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 2648 u8 *guid) 2649 { 2650 u64 salt; 2651 2652 if (memchr_inv(guid, 0, 16)) 2653 return true; 2654 2655 salt = get_jiffies_64(); 2656 2657 memcpy(&guid[0], &salt, sizeof(u64)); 2658 memcpy(&guid[8], &salt, sizeof(u64)); 2659 2660 return false; 2661 } 2662 2663 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) 2664 { 2665 struct drm_dp_sideband_msg_req_body req; 2666 2667 req.req_type = DP_REMOTE_DPCD_READ; 2668 req.u.dpcd_read.port_number = port_num; 2669 req.u.dpcd_read.dpcd_address = offset; 2670 req.u.dpcd_read.num_bytes = num_bytes; 2671 drm_dp_encode_sideband_req(&req, msg); 2672 2673 return 0; 2674 } 2675 2676 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 2677 bool up, u8 *msg, int len) 2678 { 2679 int ret; 2680 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 2681 int tosend, total, offset; 2682 int retries = 0; 2683 2684 retry: 2685 total = len; 2686 offset = 0; 2687 do { 2688 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 2689 2690 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 2691 &msg[offset], 2692 tosend); 2693 if (ret != tosend) { 2694 if (ret == -EIO && retries < 5) { 2695 retries++; 2696 goto retry; 2697 } 2698 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 2699 2700 return -EIO; 2701 } 2702 offset += tosend; 2703 total -= tosend; 2704 } while (total > 0); 2705 return 0; 2706 } 2707 2708 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 2709 struct drm_dp_sideband_msg_tx *txmsg) 2710 { 2711 struct drm_dp_mst_branch *mstb = txmsg->dst; 2712 u8 req_type; 2713 2714 /* both msg slots are full */ 2715 if (txmsg->seqno == -1) { 2716 if (mstb->tx_slots[0] && mstb->tx_slots[1]) { 2717 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); 2718 return -EAGAIN; 2719 } 2720 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { 2721 txmsg->seqno = mstb->last_seqno; 2722 mstb->last_seqno ^= 1; 2723 } else if (mstb->tx_slots[0] == NULL) 2724 txmsg->seqno = 0; 2725 else 2726 txmsg->seqno = 1; 2727 mstb->tx_slots[txmsg->seqno] = txmsg; 2728 } 2729 2730 req_type = txmsg->msg[0] & 0x7f; 2731 if (req_type == DP_CONNECTION_STATUS_NOTIFY || 2732 req_type == DP_RESOURCE_STATUS_NOTIFY) 2733 hdr->broadcast = 1; 2734 else 2735 hdr->broadcast = 0; 2736 hdr->path_msg = txmsg->path_msg; 2737 hdr->lct = mstb->lct; 2738 hdr->lcr = mstb->lct - 1; 2739 if (mstb->lct > 1) 2740 memcpy(hdr->rad, mstb->rad, mstb->lct / 2); 2741 hdr->seqno = txmsg->seqno; 2742 return 0; 2743 } 2744 /* 2745 * process a single block of the next message in the sideband queue 2746 */ 2747 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 2748 struct drm_dp_sideband_msg_tx *txmsg, 2749 bool up) 2750 { 2751 u8 chunk[48]; 2752 struct drm_dp_sideband_msg_hdr hdr; 2753 int len, space, idx, tosend; 2754 int ret; 2755 2756 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 2757 2758 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { 2759 txmsg->seqno = -1; 2760 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 2761 } 2762 2763 /* make hdr from dst mst - for replies use seqno 2764 otherwise assign one */ 2765 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 2766 if (ret < 0) 2767 return ret; 2768 2769 /* amount left to send in this message */ 2770 len = txmsg->cur_len - txmsg->cur_offset; 2771 2772 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 2773 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 2774 2775 tosend = min(len, space); 2776 if (len == txmsg->cur_len) 2777 hdr.somt = 1; 2778 if (space >= len) 2779 hdr.eomt = 1; 2780 2781 2782 hdr.msg_len = tosend + 1; 2783 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 2784 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 2785 /* add crc at end */ 2786 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 2787 idx += tosend + 1; 2788 2789 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 2790 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) { 2791 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 2792 2793 drm_printf(&p, "sideband msg failed to send\n"); 2794 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2795 return ret; 2796 } 2797 2798 txmsg->cur_offset += tosend; 2799 if (txmsg->cur_offset == txmsg->cur_len) { 2800 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 2801 return 1; 2802 } 2803 return 0; 2804 } 2805 2806 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 2807 { 2808 struct drm_dp_sideband_msg_tx *txmsg; 2809 int ret; 2810 2811 WARN_ON(!mutex_is_locked(&mgr->qlock)); 2812 2813 /* construct a chunk from the first msg in the tx_msg queue */ 2814 if (list_empty(&mgr->tx_msg_downq)) 2815 return; 2816 2817 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); 2818 ret = process_single_tx_qlock(mgr, txmsg, false); 2819 if (ret == 1) { 2820 /* txmsg is sent it should be in the slots now */ 2821 mgr->is_waiting_for_dwn_reply = true; 2822 list_del(&txmsg->next); 2823 } else if (ret) { 2824 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 2825 mgr->is_waiting_for_dwn_reply = false; 2826 list_del(&txmsg->next); 2827 if (txmsg->seqno != -1) 2828 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 2829 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 2830 #ifdef __NetBSD__ 2831 DRM_WAKEUP_ALL(&mgr->tx_waitq, &mgr->qlock); 2832 #else 2833 wake_up_all(&mgr->tx_waitq); 2834 #endif 2835 } 2836 } 2837 2838 /* called holding qlock */ 2839 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 2840 struct drm_dp_sideband_msg_tx *txmsg) 2841 { 2842 int ret; 2843 2844 /* construct a chunk from the first msg in the tx_msg queue */ 2845 ret = process_single_tx_qlock(mgr, txmsg, true); 2846 2847 if (ret != 1) 2848 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 2849 2850 if (txmsg->seqno != -1) { 2851 WARN_ON((unsigned int)txmsg->seqno > 2852 ARRAY_SIZE(txmsg->dst->tx_slots)); 2853 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 2854 } 2855 } 2856 2857 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 2858 struct drm_dp_sideband_msg_tx *txmsg) 2859 { 2860 mutex_lock(&mgr->qlock); 2861 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 2862 2863 if (drm_debug_enabled(DRM_UT_DP)) { 2864 struct drm_printer p = drm_debug_printer(DBG_PREFIX); 2865 2866 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2867 } 2868 2869 if (list_is_singular(&mgr->tx_msg_downq) && 2870 !mgr->is_waiting_for_dwn_reply) 2871 process_single_down_tx_qlock(mgr); 2872 mutex_unlock(&mgr->qlock); 2873 } 2874 2875 static void 2876 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) 2877 { 2878 struct drm_dp_link_addr_reply_port *port_reply; 2879 int i; 2880 2881 for (i = 0; i < reply->nports; i++) { 2882 port_reply = &reply->ports[i]; 2883 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", 2884 i, 2885 port_reply->input_port, 2886 port_reply->peer_device_type, 2887 port_reply->port_number, 2888 port_reply->dpcd_revision, 2889 port_reply->mcs, 2890 port_reply->ddps, 2891 port_reply->legacy_device_plug_status, 2892 port_reply->num_sdp_streams, 2893 port_reply->num_sdp_stream_sinks); 2894 } 2895 } 2896 2897 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 2898 struct drm_dp_mst_branch *mstb) 2899 { 2900 struct drm_dp_sideband_msg_tx *txmsg; 2901 struct drm_dp_link_address_ack_reply *reply; 2902 struct drm_dp_mst_port *port, *tmp; 2903 int i, len __unused, ret, port_mask = 0; 2904 bool changed = false; 2905 2906 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2907 if (!txmsg) 2908 return -ENOMEM; 2909 2910 txmsg->dst = mstb; 2911 len = build_link_address(txmsg); 2912 2913 mstb->link_address_sent = true; 2914 drm_dp_queue_down_tx(mgr, txmsg); 2915 2916 /* FIXME: Actually do some real error handling here */ 2917 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2918 if (ret <= 0) { 2919 DRM_ERROR("Sending link address failed with %d\n", ret); 2920 goto out; 2921 } 2922 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 2923 DRM_ERROR("link address NAK received\n"); 2924 ret = -EIO; 2925 goto out; 2926 } 2927 2928 reply = &txmsg->reply.u.link_addr; 2929 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports); 2930 drm_dp_dump_link_address(reply); 2931 2932 drm_dp_check_mstb_guid(mstb, reply->guid); 2933 2934 for (i = 0; i < reply->nports; i++) { 2935 port_mask |= BIT(reply->ports[i].port_number); 2936 ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, 2937 &reply->ports[i]); 2938 if (ret == 1) 2939 changed = true; 2940 else if (ret < 0) 2941 goto out; 2942 } 2943 2944 /* Prune any ports that are currently a part of mstb in our in-memory 2945 * topology, but were not seen in this link address. Usually this 2946 * means that they were removed while the topology was out of sync, 2947 * e.g. during suspend/resume 2948 */ 2949 mutex_lock(&mgr->lock); 2950 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 2951 if (port_mask & BIT(port->port_num)) 2952 continue; 2953 2954 DRM_DEBUG_KMS("port %d was not in link address, removing\n", 2955 port->port_num); 2956 list_del(&port->next); 2957 drm_dp_mst_topology_put_port(port); 2958 changed = true; 2959 } 2960 mutex_unlock(&mgr->lock); 2961 2962 out: 2963 if (ret <= 0) 2964 mstb->link_address_sent = false; 2965 kfree(txmsg); 2966 return ret < 0 ? ret : changed; 2967 } 2968 2969 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, 2970 struct drm_dp_mst_branch *mstb) 2971 { 2972 struct drm_dp_sideband_msg_tx *txmsg; 2973 int len __unused, ret; 2974 2975 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2976 if (!txmsg) 2977 return; 2978 2979 txmsg->dst = mstb; 2980 len = build_clear_payload_id_table(txmsg); 2981 2982 drm_dp_queue_down_tx(mgr, txmsg); 2983 2984 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2985 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 2986 DRM_DEBUG_KMS("clear payload table id nak received\n"); 2987 2988 kfree(txmsg); 2989 } 2990 2991 static int 2992 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 2993 struct drm_dp_mst_branch *mstb, 2994 struct drm_dp_mst_port *port) 2995 { 2996 struct drm_dp_enum_path_resources_ack_reply *path_res; 2997 struct drm_dp_sideband_msg_tx *txmsg; 2998 int len __unused; 2999 int ret; 3000 3001 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3002 if (!txmsg) 3003 return -ENOMEM; 3004 3005 txmsg->dst = mstb; 3006 len = build_enum_path_resources(txmsg, port->port_num); 3007 3008 drm_dp_queue_down_tx(mgr, txmsg); 3009 3010 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3011 if (ret > 0) { 3012 path_res = &txmsg->reply.u.path_resources; 3013 3014 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 3015 DRM_DEBUG_KMS("enum path resources nak received\n"); 3016 } else { 3017 if (port->port_num != path_res->port_number) 3018 DRM_ERROR("got incorrect port in response\n"); 3019 3020 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", 3021 path_res->port_number, 3022 path_res->full_payload_bw_number, 3023 path_res->avail_payload_bw_number); 3024 port->available_pbn = 3025 path_res->avail_payload_bw_number; 3026 port->fec_capable = path_res->fec_capable; 3027 } 3028 } 3029 3030 kfree(txmsg); 3031 return 0; 3032 } 3033 3034 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) 3035 { 3036 if (!mstb->port_parent) 3037 return NULL; 3038 3039 if (mstb->port_parent->mstb != mstb) 3040 return mstb->port_parent; 3041 3042 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 3043 } 3044 3045 /* 3046 * Searches upwards in the topology starting from mstb to try to find the 3047 * closest available parent of mstb that's still connected to the rest of the 3048 * topology. This can be used in order to perform operations like releasing 3049 * payloads, where the branch device which owned the payload may no longer be 3050 * around and thus would require that the payload on the last living relative 3051 * be freed instead. 3052 */ 3053 static struct drm_dp_mst_branch * 3054 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 3055 struct drm_dp_mst_branch *mstb, 3056 int *port_num) 3057 { 3058 struct drm_dp_mst_branch *rmstb = NULL; 3059 struct drm_dp_mst_port *found_port; 3060 3061 mutex_lock(&mgr->lock); 3062 if (!mgr->mst_primary) 3063 goto out; 3064 3065 do { 3066 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 3067 if (!found_port) 3068 break; 3069 3070 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) { 3071 rmstb = found_port->parent; 3072 *port_num = found_port->port_num; 3073 } else { 3074 /* Search again, starting from this parent */ 3075 mstb = found_port->parent; 3076 } 3077 } while (!rmstb); 3078 out: 3079 mutex_unlock(&mgr->lock); 3080 return rmstb; 3081 } 3082 3083 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 3084 struct drm_dp_mst_port *port, 3085 int id, 3086 int pbn) 3087 { 3088 struct drm_dp_sideband_msg_tx *txmsg; 3089 struct drm_dp_mst_branch *mstb; 3090 int len __unused, ret, port_num; 3091 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 3092 int i; 3093 3094 port_num = port->port_num; 3095 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3096 if (!mstb) { 3097 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, 3098 port->parent, 3099 &port_num); 3100 3101 if (!mstb) 3102 return -EINVAL; 3103 } 3104 3105 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3106 if (!txmsg) { 3107 ret = -ENOMEM; 3108 goto fail_put; 3109 } 3110 3111 for (i = 0; i < port->num_sdp_streams; i++) 3112 sinks[i] = i; 3113 3114 txmsg->dst = mstb; 3115 len = build_allocate_payload(txmsg, port_num, 3116 id, 3117 pbn, port->num_sdp_streams, sinks); 3118 3119 drm_dp_queue_down_tx(mgr, txmsg); 3120 3121 /* 3122 * FIXME: there is a small chance that between getting the last 3123 * connected mstb and sending the payload message, the last connected 3124 * mstb could also be removed from the topology. In the future, this 3125 * needs to be fixed by restarting the 3126 * drm_dp_get_last_connected_port_and_mstb() search in the event of a 3127 * timeout if the topology is still connected to the system. 3128 */ 3129 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3130 if (ret > 0) { 3131 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3132 ret = -EINVAL; 3133 else 3134 ret = 0; 3135 } 3136 kfree(txmsg); 3137 fail_put: 3138 drm_dp_mst_topology_put_mstb(mstb); 3139 return ret; 3140 } 3141 3142 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, 3143 struct drm_dp_mst_port *port, bool power_up) 3144 { 3145 struct drm_dp_sideband_msg_tx *txmsg; 3146 int len __unused, ret; 3147 3148 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3149 if (!port) 3150 return -EINVAL; 3151 3152 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3153 if (!txmsg) { 3154 drm_dp_mst_topology_put_port(port); 3155 return -ENOMEM; 3156 } 3157 3158 txmsg->dst = port->parent; 3159 len = build_power_updown_phy(txmsg, port->port_num, power_up); 3160 drm_dp_queue_down_tx(mgr, txmsg); 3161 3162 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg); 3163 if (ret > 0) { 3164 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3165 ret = -EINVAL; 3166 else 3167 ret = 0; 3168 } 3169 kfree(txmsg); 3170 drm_dp_mst_topology_put_port(port); 3171 3172 return ret; 3173 } 3174 EXPORT_SYMBOL(drm_dp_send_power_updown_phy); 3175 3176 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 3177 int id, 3178 struct drm_dp_payload *payload) 3179 { 3180 int ret; 3181 3182 ret = drm_dp_dpcd_write_payload(mgr, id, payload); 3183 if (ret < 0) { 3184 payload->payload_state = 0; 3185 return ret; 3186 } 3187 payload->payload_state = DP_PAYLOAD_LOCAL; 3188 return 0; 3189 } 3190 3191 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 3192 struct drm_dp_mst_port *port, 3193 int id, 3194 struct drm_dp_payload *payload) 3195 { 3196 int ret; 3197 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 3198 if (ret < 0) 3199 return ret; 3200 payload->payload_state = DP_PAYLOAD_REMOTE; 3201 return ret; 3202 } 3203 3204 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 3205 struct drm_dp_mst_port *port, 3206 int id, 3207 struct drm_dp_payload *payload) 3208 { 3209 DRM_DEBUG_KMS("\n"); 3210 /* it's okay for these to fail */ 3211 if (port) { 3212 drm_dp_payload_send_msg(mgr, port, id, 0); 3213 } 3214 3215 drm_dp_dpcd_write_payload(mgr, id, payload); 3216 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; 3217 return 0; 3218 } 3219 3220 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 3221 int id, 3222 struct drm_dp_payload *payload) 3223 { 3224 payload->payload_state = 0; 3225 return 0; 3226 } 3227 3228 /** 3229 * drm_dp_update_payload_part1() - Execute payload update part 1 3230 * @mgr: manager to use. 3231 * 3232 * This iterates over all proposed virtual channels, and tries to 3233 * allocate space in the link for them. For 0->slots transitions, 3234 * this step just writes the VCPI to the MST device. For slots->0 3235 * transitions, this writes the updated VCPIs and removes the 3236 * remote VC payloads. 3237 * 3238 * after calling this the driver should generate ACT and payload 3239 * packets. 3240 */ 3241 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 3242 { 3243 struct drm_dp_payload req_payload; 3244 struct drm_dp_mst_port *port; 3245 int i, j; 3246 int cur_slots = 1; 3247 3248 mutex_lock(&mgr->payload_lock); 3249 for (i = 0; i < mgr->max_payloads; i++) { 3250 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 3251 struct drm_dp_payload *payload = &mgr->payloads[i]; 3252 bool put_port = false; 3253 3254 /* solve the current payloads - compare to the hw ones 3255 - update the hw view */ 3256 req_payload.start_slot = cur_slots; 3257 if (vcpi) { 3258 port = container_of(vcpi, struct drm_dp_mst_port, 3259 vcpi); 3260 3261 /* Validated ports don't matter if we're releasing 3262 * VCPI 3263 */ 3264 if (vcpi->num_slots) { 3265 port = drm_dp_mst_topology_get_port_validated( 3266 mgr, port); 3267 if (!port) { 3268 mutex_unlock(&mgr->payload_lock); 3269 return -EINVAL; 3270 } 3271 put_port = true; 3272 } 3273 3274 req_payload.num_slots = vcpi->num_slots; 3275 req_payload.vcpi = vcpi->vcpi; 3276 } else { 3277 port = NULL; 3278 req_payload.num_slots = 0; 3279 } 3280 3281 payload->start_slot = req_payload.start_slot; 3282 /* work out what is required to happen with this payload */ 3283 if (payload->num_slots != req_payload.num_slots) { 3284 3285 /* need to push an update for this payload */ 3286 if (req_payload.num_slots) { 3287 drm_dp_create_payload_step1(mgr, vcpi->vcpi, 3288 &req_payload); 3289 payload->num_slots = req_payload.num_slots; 3290 payload->vcpi = req_payload.vcpi; 3291 3292 } else if (payload->num_slots) { 3293 payload->num_slots = 0; 3294 drm_dp_destroy_payload_step1(mgr, port, 3295 payload->vcpi, 3296 payload); 3297 req_payload.payload_state = 3298 payload->payload_state; 3299 payload->start_slot = 0; 3300 } 3301 payload->payload_state = req_payload.payload_state; 3302 } 3303 cur_slots += req_payload.num_slots; 3304 3305 if (put_port) 3306 drm_dp_mst_topology_put_port(port); 3307 } 3308 3309 for (i = 0; i < mgr->max_payloads; /* do nothing */) { 3310 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { 3311 i++; 3312 continue; 3313 } 3314 3315 DRM_DEBUG_KMS("removing payload %d\n", i); 3316 for (j = i; j < mgr->max_payloads - 1; j++) { 3317 mgr->payloads[j] = mgr->payloads[j + 1]; 3318 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; 3319 3320 if (mgr->proposed_vcpis[j] && 3321 mgr->proposed_vcpis[j]->num_slots) { 3322 set_bit(j + 1, &mgr->payload_mask); 3323 } else { 3324 clear_bit(j + 1, &mgr->payload_mask); 3325 } 3326 } 3327 3328 memset(&mgr->payloads[mgr->max_payloads - 1], 0, 3329 sizeof(struct drm_dp_payload)); 3330 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; 3331 clear_bit(mgr->max_payloads, &mgr->payload_mask); 3332 } 3333 mutex_unlock(&mgr->payload_lock); 3334 3335 return 0; 3336 } 3337 EXPORT_SYMBOL(drm_dp_update_payload_part1); 3338 3339 /** 3340 * drm_dp_update_payload_part2() - Execute payload update part 2 3341 * @mgr: manager to use. 3342 * 3343 * This iterates over all proposed virtual channels, and tries to 3344 * allocate space in the link for them. For 0->slots transitions, 3345 * this step writes the remote VC payload commands. For slots->0 3346 * this just resets some internal state. 3347 */ 3348 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) 3349 { 3350 struct drm_dp_mst_port *port; 3351 int i; 3352 int ret = 0; 3353 mutex_lock(&mgr->payload_lock); 3354 for (i = 0; i < mgr->max_payloads; i++) { 3355 3356 if (!mgr->proposed_vcpis[i]) 3357 continue; 3358 3359 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 3360 3361 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 3362 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 3363 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 3364 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 3365 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); 3366 } 3367 if (ret) { 3368 mutex_unlock(&mgr->payload_lock); 3369 return ret; 3370 } 3371 } 3372 mutex_unlock(&mgr->payload_lock); 3373 return 0; 3374 } 3375 EXPORT_SYMBOL(drm_dp_update_payload_part2); 3376 3377 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 3378 struct drm_dp_mst_port *port, 3379 int offset, int size, u8 *bytes) 3380 { 3381 int len __unused; 3382 int ret = 0; 3383 struct drm_dp_sideband_msg_tx *txmsg; 3384 struct drm_dp_mst_branch *mstb; 3385 3386 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3387 if (!mstb) 3388 return -EINVAL; 3389 3390 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3391 if (!txmsg) { 3392 ret = -ENOMEM; 3393 goto fail_put; 3394 } 3395 3396 len = build_dpcd_read(txmsg, port->port_num, offset, size); 3397 txmsg->dst = port->parent; 3398 3399 drm_dp_queue_down_tx(mgr, txmsg); 3400 3401 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3402 if (ret < 0) 3403 goto fail_free; 3404 3405 /* DPCD read should never be NACKed */ 3406 if (txmsg->reply.reply_type == 1) { 3407 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n", 3408 mstb, port->port_num, offset, size); 3409 ret = -EIO; 3410 goto fail_free; 3411 } 3412 3413 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) { 3414 ret = -EPROTO; 3415 goto fail_free; 3416 } 3417 3418 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes, 3419 size); 3420 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret); 3421 3422 fail_free: 3423 kfree(txmsg); 3424 fail_put: 3425 drm_dp_mst_topology_put_mstb(mstb); 3426 3427 return ret; 3428 } 3429 3430 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 3431 struct drm_dp_mst_port *port, 3432 int offset, int size, u8 *bytes) 3433 { 3434 int len __unused; 3435 int ret; 3436 struct drm_dp_sideband_msg_tx *txmsg; 3437 struct drm_dp_mst_branch *mstb; 3438 3439 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 3440 if (!mstb) 3441 return -EINVAL; 3442 3443 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3444 if (!txmsg) { 3445 ret = -ENOMEM; 3446 goto fail_put; 3447 } 3448 3449 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 3450 txmsg->dst = mstb; 3451 3452 drm_dp_queue_down_tx(mgr, txmsg); 3453 3454 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 3455 if (ret > 0) { 3456 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3457 ret = -EIO; 3458 else 3459 ret = 0; 3460 } 3461 kfree(txmsg); 3462 fail_put: 3463 drm_dp_mst_topology_put_mstb(mstb); 3464 return ret; 3465 } 3466 3467 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 3468 { 3469 struct drm_dp_sideband_msg_reply_body reply; 3470 3471 reply.reply_type = DP_SIDEBAND_REPLY_ACK; 3472 reply.req_type = req_type; 3473 drm_dp_encode_sideband_reply(&reply, msg); 3474 return 0; 3475 } 3476 3477 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 3478 struct drm_dp_mst_branch *mstb, 3479 int req_type, int seqno, bool broadcast) 3480 { 3481 struct drm_dp_sideband_msg_tx *txmsg; 3482 3483 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 3484 if (!txmsg) 3485 return -ENOMEM; 3486 3487 txmsg->dst = mstb; 3488 txmsg->seqno = seqno; 3489 drm_dp_encode_up_ack_reply(txmsg, req_type); 3490 3491 mutex_lock(&mgr->qlock); 3492 3493 process_single_up_tx_qlock(mgr, txmsg); 3494 3495 mutex_unlock(&mgr->qlock); 3496 3497 kfree(txmsg); 3498 return 0; 3499 } 3500 3501 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) 3502 { 3503 if (dp_link_bw == 0 || dp_link_count == 0) 3504 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", 3505 dp_link_bw, dp_link_count); 3506 3507 return dp_link_bw * dp_link_count / 2; 3508 } 3509 3510 /** 3511 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 3512 * @mgr: manager to set state for 3513 * @mst_state: true to enable MST on this connector - false to disable. 3514 * 3515 * This is called by the driver when it detects an MST capable device plugged 3516 * into a DP MST capable port, or when a DP MST capable device is unplugged. 3517 */ 3518 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 3519 { 3520 int ret = 0; 3521 int i = 0; 3522 struct drm_dp_mst_branch *mstb = NULL; 3523 3524 mutex_lock(&mgr->lock); 3525 if (mst_state == mgr->mst_state) 3526 goto out_unlock; 3527 3528 mgr->mst_state = mst_state; 3529 /* set the device into MST mode */ 3530 if (mst_state) { 3531 WARN_ON(mgr->mst_primary); 3532 3533 /* get dpcd info */ 3534 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 3535 if (ret != DP_RECEIVER_CAP_SIZE) { 3536 DRM_DEBUG_KMS("failed to read DPCD\n"); 3537 goto out_unlock; 3538 } 3539 3540 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], 3541 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); 3542 if (mgr->pbn_div == 0) { 3543 ret = -EINVAL; 3544 goto out_unlock; 3545 } 3546 3547 /* add initial branch device at LCT 1 */ 3548 mstb = drm_dp_add_mst_branch_device(1, NULL); 3549 if (mstb == NULL) { 3550 ret = -ENOMEM; 3551 goto out_unlock; 3552 } 3553 mstb->mgr = mgr; 3554 3555 /* give this the main reference */ 3556 mgr->mst_primary = mstb; 3557 drm_dp_mst_topology_get_mstb(mgr->mst_primary); 3558 3559 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3560 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 3561 if (ret < 0) { 3562 goto out_unlock; 3563 } 3564 3565 { 3566 struct drm_dp_payload reset_pay; 3567 reset_pay.start_slot = 0; 3568 reset_pay.num_slots = 0x3f; 3569 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); 3570 } 3571 3572 queue_work(system_long_wq, &mgr->work); 3573 3574 ret = 0; 3575 } else { 3576 /* disable MST on the device */ 3577 mstb = mgr->mst_primary; 3578 mgr->mst_primary = NULL; 3579 /* this can fail if the device is gone */ 3580 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 3581 ret = 0; 3582 mutex_lock(&mgr->payload_lock); 3583 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 3584 mgr->payload_mask = 0; 3585 set_bit(0, &mgr->payload_mask); 3586 for (i = 0; i < mgr->max_payloads; i++) { 3587 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 3588 3589 if (vcpi) { 3590 vcpi->vcpi = 0; 3591 vcpi->num_slots = 0; 3592 } 3593 mgr->proposed_vcpis[i] = NULL; 3594 } 3595 mgr->vcpi_mask = 0; 3596 mutex_unlock(&mgr->payload_lock); 3597 3598 mgr->payload_id_table_cleared = false; 3599 } 3600 3601 out_unlock: 3602 mutex_unlock(&mgr->lock); 3603 if (mstb) 3604 drm_dp_mst_topology_put_mstb(mstb); 3605 return ret; 3606 3607 } 3608 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 3609 3610 static void 3611 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) 3612 { 3613 struct drm_dp_mst_port *port; 3614 3615 /* The link address will need to be re-sent on resume */ 3616 mstb->link_address_sent = false; 3617 3618 list_for_each_entry(port, &mstb->ports, next) { 3619 /* The PBN for each port will also need to be re-probed */ 3620 port->available_pbn = 0; 3621 3622 if (port->mstb) 3623 drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); 3624 } 3625 } 3626 3627 /** 3628 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 3629 * @mgr: manager to suspend 3630 * 3631 * This function tells the MST device that we can't handle UP messages 3632 * anymore. This should stop it from sending any since we are suspended. 3633 */ 3634 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 3635 { 3636 mutex_lock(&mgr->lock); 3637 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3638 DP_MST_EN | DP_UPSTREAM_IS_SRC); 3639 mutex_unlock(&mgr->lock); 3640 flush_work(&mgr->up_req_work); 3641 flush_work(&mgr->work); 3642 flush_work(&mgr->delayed_destroy_work); 3643 3644 mutex_lock(&mgr->lock); 3645 if (mgr->mst_state && mgr->mst_primary) 3646 drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); 3647 mutex_unlock(&mgr->lock); 3648 } 3649 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 3650 3651 /** 3652 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 3653 * @mgr: manager to resume 3654 * @sync: whether or not to perform topology reprobing synchronously 3655 * 3656 * This will fetch DPCD and see if the device is still there, 3657 * if it is, it will rewrite the MSTM control bits, and return. 3658 * 3659 * If the device fails this returns -1, and the driver should do 3660 * a full MST reprobe, in case we were undocked. 3661 * 3662 * During system resume (where it is assumed that the driver will be calling 3663 * drm_atomic_helper_resume()) this function should be called beforehand with 3664 * @sync set to true. In contexts like runtime resume where the driver is not 3665 * expected to be calling drm_atomic_helper_resume(), this function should be 3666 * called with @sync set to false in order to avoid deadlocking. 3667 * 3668 * Returns: -1 if the MST topology was removed while we were suspended, 0 3669 * otherwise. 3670 */ 3671 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, 3672 bool sync) 3673 { 3674 int ret; 3675 u8 guid[16]; 3676 3677 mutex_lock(&mgr->lock); 3678 if (!mgr->mst_primary) 3679 goto out_fail; 3680 3681 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, 3682 DP_RECEIVER_CAP_SIZE); 3683 if (ret != DP_RECEIVER_CAP_SIZE) { 3684 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 3685 goto out_fail; 3686 } 3687 3688 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 3689 DP_MST_EN | 3690 DP_UP_REQ_EN | 3691 DP_UPSTREAM_IS_SRC); 3692 if (ret < 0) { 3693 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); 3694 goto out_fail; 3695 } 3696 3697 /* Some hubs forget their guids after they resume */ 3698 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); 3699 if (ret != 16) { 3700 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 3701 goto out_fail; 3702 } 3703 drm_dp_check_mstb_guid(mgr->mst_primary, guid); 3704 3705 /* 3706 * For the final step of resuming the topology, we need to bring the 3707 * state of our in-memory topology back into sync with reality. So, 3708 * restart the probing process as if we're probing a new hub 3709 */ 3710 queue_work(system_long_wq, &mgr->work); 3711 mutex_unlock(&mgr->lock); 3712 3713 if (sync) { 3714 DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); 3715 flush_work(&mgr->work); 3716 } 3717 3718 return 0; 3719 3720 out_fail: 3721 mutex_unlock(&mgr->lock); 3722 return -1; 3723 } 3724 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 3725 3726 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 3727 { 3728 int len; 3729 u8 replyblock[32]; 3730 int replylen, origlen __unused, curreply; 3731 int ret; 3732 struct drm_dp_sideband_msg_rx *msg; 3733 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; 3734 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; 3735 3736 len = min(mgr->max_dpcd_transaction_bytes, 16); 3737 ret = drm_dp_dpcd_read(mgr->aux, basereg, 3738 replyblock, len); 3739 if (ret != len) { 3740 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 3741 return false; 3742 } 3743 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 3744 if (!ret) { 3745 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 3746 return false; 3747 } 3748 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 3749 3750 origlen = replylen; 3751 replylen -= len; 3752 curreply = len; 3753 while (replylen > 0) { 3754 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 3755 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 3756 replyblock, len); 3757 if (ret != len) { 3758 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", 3759 len, ret); 3760 return false; 3761 } 3762 3763 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 3764 if (!ret) { 3765 DRM_DEBUG_KMS("failed to build sideband msg\n"); 3766 return false; 3767 } 3768 3769 curreply += len; 3770 replylen -= len; 3771 } 3772 return true; 3773 } 3774 3775 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 3776 { 3777 struct drm_dp_sideband_msg_tx *txmsg; 3778 struct drm_dp_mst_branch *mstb; 3779 struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr; 3780 int slot = -1; 3781 3782 if (!drm_dp_get_one_sb_msg(mgr, false)) 3783 goto clear_down_rep_recv; 3784 3785 if (!mgr->down_rep_recv.have_eomt) 3786 return 0; 3787 3788 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); 3789 if (!mstb) { 3790 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", 3791 hdr->lct); 3792 goto clear_down_rep_recv; 3793 } 3794 3795 /* find the message */ 3796 slot = hdr->seqno; 3797 mutex_lock(&mgr->qlock); 3798 txmsg = mstb->tx_slots[slot]; 3799 /* remove from slots */ 3800 mutex_unlock(&mgr->qlock); 3801 3802 if (!txmsg) { 3803 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 3804 mstb, hdr->seqno, hdr->lct, hdr->rad[0], 3805 mgr->down_rep_recv.msg[0]); 3806 goto no_msg; 3807 } 3808 3809 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); 3810 3811 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) 3812 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", 3813 txmsg->reply.req_type, 3814 drm_dp_mst_req_type_str(txmsg->reply.req_type), 3815 txmsg->reply.u.nak.reason, 3816 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), 3817 txmsg->reply.u.nak.nak_data); 3818 3819 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3820 drm_dp_mst_topology_put_mstb(mstb); 3821 3822 mutex_lock(&mgr->qlock); 3823 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 3824 mstb->tx_slots[slot] = NULL; 3825 mgr->is_waiting_for_dwn_reply = false; 3826 mutex_unlock(&mgr->qlock); 3827 3828 #ifdef __NetBSD__ 3829 DRM_WAKEUP_ALL(&mgr->tx_waitq, &mgr->qlock); 3830 #else 3831 wake_up_all(&mgr->tx_waitq); 3832 #endif 3833 3834 return 0; 3835 3836 no_msg: 3837 drm_dp_mst_topology_put_mstb(mstb); 3838 clear_down_rep_recv: 3839 mutex_lock(&mgr->qlock); 3840 mgr->is_waiting_for_dwn_reply = false; 3841 mutex_unlock(&mgr->qlock); 3842 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3843 3844 return 0; 3845 } 3846 3847 static inline bool 3848 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, 3849 struct drm_dp_pending_up_req *up_req) 3850 { 3851 struct drm_dp_mst_branch *mstb = NULL; 3852 struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; 3853 struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; 3854 bool hotplug = false; 3855 3856 if (hdr->broadcast) { 3857 const u8 *guid = NULL; 3858 3859 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) 3860 guid = msg->u.conn_stat.guid; 3861 else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) 3862 guid = msg->u.resource_stat.guid; 3863 3864 if (guid) 3865 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); 3866 } else { 3867 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); 3868 } 3869 3870 if (!mstb) { 3871 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", 3872 hdr->lct); 3873 return false; 3874 } 3875 3876 /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ 3877 if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { 3878 drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); 3879 hotplug = true; 3880 } 3881 3882 drm_dp_mst_topology_put_mstb(mstb); 3883 return hotplug; 3884 } 3885 3886 static void drm_dp_mst_up_req_work(struct work_struct *work) 3887 { 3888 struct drm_dp_mst_topology_mgr *mgr = 3889 container_of(work, struct drm_dp_mst_topology_mgr, 3890 up_req_work); 3891 struct drm_dp_pending_up_req *up_req; 3892 bool send_hotplug = false; 3893 3894 mutex_lock(&mgr->probe_lock); 3895 while (true) { 3896 mutex_lock(&mgr->up_req_lock); 3897 up_req = list_first_entry_or_null(&mgr->up_req_list, 3898 struct drm_dp_pending_up_req, 3899 next); 3900 if (up_req) 3901 list_del(&up_req->next); 3902 mutex_unlock(&mgr->up_req_lock); 3903 3904 if (!up_req) 3905 break; 3906 3907 send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); 3908 kfree(up_req); 3909 } 3910 mutex_unlock(&mgr->probe_lock); 3911 3912 if (send_hotplug) 3913 drm_kms_helper_hotplug_event(mgr->dev); 3914 } 3915 3916 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 3917 { 3918 struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; 3919 struct drm_dp_pending_up_req *up_req; 3920 bool seqno; 3921 3922 if (!drm_dp_get_one_sb_msg(mgr, true)) 3923 goto out; 3924 3925 if (!mgr->up_req_recv.have_eomt) 3926 return 0; 3927 3928 up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); 3929 if (!up_req) { 3930 DRM_ERROR("Not enough memory to process MST up req\n"); 3931 return -ENOMEM; 3932 } 3933 INIT_LIST_HEAD(&up_req->next); 3934 3935 seqno = hdr->seqno; 3936 drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); 3937 3938 if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && 3939 up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { 3940 DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", 3941 up_req->msg.req_type); 3942 kfree(up_req); 3943 goto out; 3944 } 3945 3946 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, 3947 seqno, false); 3948 3949 if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 3950 const struct drm_dp_connection_status_notify *conn_stat = 3951 &up_req->msg.u.conn_stat; 3952 3953 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", 3954 conn_stat->port_number, 3955 conn_stat->legacy_device_plug_status, 3956 conn_stat->displayport_device_plug_status, 3957 conn_stat->message_capability_status, 3958 conn_stat->input_port, 3959 conn_stat->peer_device_type); 3960 } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 3961 const struct drm_dp_resource_status_notify *res_stat = 3962 &up_req->msg.u.resource_stat; 3963 3964 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", 3965 res_stat->port_number, 3966 res_stat->available_pbn); 3967 } 3968 3969 up_req->hdr = *hdr; 3970 mutex_lock(&mgr->up_req_lock); 3971 list_add_tail(&up_req->next, &mgr->up_req_list); 3972 mutex_unlock(&mgr->up_req_lock); 3973 queue_work(system_long_wq, &mgr->up_req_work); 3974 3975 out: 3976 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3977 return 0; 3978 } 3979 3980 /** 3981 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 3982 * @mgr: manager to notify irq for. 3983 * @esi: 4 bytes from SINK_COUNT_ESI 3984 * @handled: whether the hpd interrupt was consumed or not 3985 * 3986 * This should be called from the driver when it detects a short IRQ, 3987 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 3988 * topology manager will process the sideband messages received as a result 3989 * of this. 3990 */ 3991 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) 3992 { 3993 int ret = 0; 3994 int sc; 3995 *handled = false; 3996 sc = esi[0] & 0x3f; 3997 3998 if (sc != mgr->sink_count) { 3999 mgr->sink_count = sc; 4000 *handled = true; 4001 } 4002 4003 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 4004 ret = drm_dp_mst_handle_down_rep(mgr); 4005 *handled = true; 4006 } 4007 4008 if (esi[1] & DP_UP_REQ_MSG_RDY) { 4009 ret |= drm_dp_mst_handle_up_req(mgr); 4010 *handled = true; 4011 } 4012 4013 drm_dp_mst_kick_tx(mgr); 4014 return ret; 4015 } 4016 EXPORT_SYMBOL(drm_dp_mst_hpd_irq); 4017 4018 /** 4019 * drm_dp_mst_detect_port() - get connection status for an MST port 4020 * @connector: DRM connector for this port 4021 * @ctx: The acquisition context to use for grabbing locks 4022 * @mgr: manager for this port 4023 * @port: pointer to a port 4024 * 4025 * This returns the current connection state for a port. 4026 */ 4027 int 4028 drm_dp_mst_detect_port(struct drm_connector *connector, 4029 struct drm_modeset_acquire_ctx *ctx, 4030 struct drm_dp_mst_topology_mgr *mgr, 4031 struct drm_dp_mst_port *port) 4032 { 4033 int ret; 4034 4035 /* we need to search for the port in the mgr in case it's gone */ 4036 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4037 if (!port) 4038 return connector_status_disconnected; 4039 4040 ret = drm_modeset_lock(&mgr->base.lock, ctx); 4041 if (ret) 4042 goto out; 4043 4044 ret = connector_status_disconnected; 4045 4046 if (!port->ddps) 4047 goto out; 4048 4049 switch (port->pdt) { 4050 case DP_PEER_DEVICE_NONE: 4051 case DP_PEER_DEVICE_MST_BRANCHING: 4052 if (!port->mcs) 4053 ret = connector_status_connected; 4054 break; 4055 4056 case DP_PEER_DEVICE_SST_SINK: 4057 ret = connector_status_connected; 4058 /* for logical ports - cache the EDID */ 4059 if (port->port_num >= 8 && !port->cached_edid) { 4060 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); 4061 } 4062 break; 4063 case DP_PEER_DEVICE_DP_LEGACY_CONV: 4064 if (port->ldps) 4065 ret = connector_status_connected; 4066 break; 4067 } 4068 out: 4069 drm_dp_mst_topology_put_port(port); 4070 return ret; 4071 } 4072 EXPORT_SYMBOL(drm_dp_mst_detect_port); 4073 4074 /** 4075 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not 4076 * @mgr: manager for this port 4077 * @port: unverified pointer to a port. 4078 * 4079 * This returns whether the port supports audio or not. 4080 */ 4081 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, 4082 struct drm_dp_mst_port *port) 4083 { 4084 bool ret = false; 4085 4086 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4087 if (!port) 4088 return ret; 4089 ret = port->has_audio; 4090 drm_dp_mst_topology_put_port(port); 4091 return ret; 4092 } 4093 EXPORT_SYMBOL(drm_dp_mst_port_has_audio); 4094 4095 /** 4096 * drm_dp_mst_get_edid() - get EDID for an MST port 4097 * @connector: toplevel connector to get EDID for 4098 * @mgr: manager for this port 4099 * @port: unverified pointer to a port. 4100 * 4101 * This returns an EDID for the port connected to a connector, 4102 * It validates the pointer still exists so the caller doesn't require a 4103 * reference. 4104 */ 4105 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4106 { 4107 struct edid *edid = NULL; 4108 4109 /* we need to search for the port in the mgr in case it's gone */ 4110 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4111 if (!port) 4112 return NULL; 4113 4114 if (port->cached_edid) 4115 edid = drm_edid_duplicate(port->cached_edid); 4116 else { 4117 edid = drm_get_edid(connector, &port->aux.ddc); 4118 } 4119 port->has_audio = drm_detect_monitor_audio(edid); 4120 drm_dp_mst_topology_put_port(port); 4121 return edid; 4122 } 4123 EXPORT_SYMBOL(drm_dp_mst_get_edid); 4124 4125 /** 4126 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value 4127 * @mgr: manager to use 4128 * @pbn: payload bandwidth to convert into slots. 4129 * 4130 * Calculate the number of VCPI slots that will be required for the given PBN 4131 * value. This function is deprecated, and should not be used in atomic 4132 * drivers. 4133 * 4134 * RETURNS: 4135 * The total slots required for this port, or error. 4136 */ 4137 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, 4138 int pbn) 4139 { 4140 int num_slots; 4141 4142 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 4143 4144 /* max. time slots - one slot for MTP header */ 4145 if (num_slots > 63) 4146 return -ENOSPC; 4147 return num_slots; 4148 } 4149 EXPORT_SYMBOL(drm_dp_find_vcpi_slots); 4150 4151 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4152 struct drm_dp_vcpi *vcpi, int pbn, int slots) 4153 { 4154 int ret; 4155 4156 /* max. time slots - one slot for MTP header */ 4157 if (slots > 63) 4158 return -ENOSPC; 4159 4160 vcpi->pbn = pbn; 4161 vcpi->aligned_pbn = slots * mgr->pbn_div; 4162 vcpi->num_slots = slots; 4163 4164 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); 4165 if (ret < 0) 4166 return ret; 4167 return 0; 4168 } 4169 4170 /** 4171 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state 4172 * @state: global atomic state 4173 * @mgr: MST topology manager for the port 4174 * @port: port to find vcpi slots for 4175 * @pbn: bandwidth required for the mode in PBN 4176 * @pbn_div: divider for DSC mode that takes FEC into account 4177 * 4178 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it 4179 * may have had. Any atomic drivers which support MST must call this function 4180 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the 4181 * current VCPI allocation for the new state, but only when 4182 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set 4183 * to ensure compatibility with userspace applications that still use the 4184 * legacy modesetting UAPI. 4185 * 4186 * Allocations set by this function are not checked against the bandwidth 4187 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). 4188 * 4189 * Additionally, it is OK to call this function multiple times on the same 4190 * @port as needed. It is not OK however, to call this function and 4191 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. 4192 * 4193 * See also: 4194 * drm_dp_atomic_release_vcpi_slots() 4195 * drm_dp_mst_atomic_check() 4196 * 4197 * Returns: 4198 * Total slots in the atomic state assigned for this port, or a negative error 4199 * code if the port no longer exists 4200 */ 4201 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 4202 struct drm_dp_mst_topology_mgr *mgr, 4203 struct drm_dp_mst_port *port, int pbn, 4204 int pbn_div) 4205 { 4206 struct drm_dp_mst_topology_state *topology_state; 4207 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; 4208 int prev_slots, prev_bw, req_slots; 4209 4210 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4211 if (IS_ERR(topology_state)) 4212 return PTR_ERR(topology_state); 4213 4214 /* Find the current allocation for this port, if any */ 4215 list_for_each_entry(pos, &topology_state->vcpis, next) { 4216 if (pos->port == port) { 4217 vcpi = pos; 4218 prev_slots = vcpi->vcpi; 4219 prev_bw = vcpi->pbn; 4220 4221 /* 4222 * This should never happen, unless the driver tries 4223 * releasing and allocating the same VCPI allocation, 4224 * which is an error 4225 */ 4226 if (WARN_ON(!prev_slots)) { 4227 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", 4228 port); 4229 return -EINVAL; 4230 } 4231 4232 break; 4233 } 4234 } 4235 if (!vcpi) { 4236 prev_slots = 0; 4237 prev_bw = 0; 4238 } 4239 4240 if (pbn_div <= 0) 4241 pbn_div = mgr->pbn_div; 4242 4243 req_slots = DIV_ROUND_UP(pbn, pbn_div); 4244 4245 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", 4246 port->connector->base.id, port->connector->name, 4247 port, prev_slots, req_slots); 4248 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", 4249 port->connector->base.id, port->connector->name, 4250 port, prev_bw, pbn); 4251 4252 /* Add the new allocation to the state */ 4253 if (!vcpi) { 4254 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL); 4255 if (!vcpi) 4256 return -ENOMEM; 4257 4258 drm_dp_mst_get_port_malloc(port); 4259 vcpi->port = port; 4260 list_add(&vcpi->next, &topology_state->vcpis); 4261 } 4262 vcpi->vcpi = req_slots; 4263 vcpi->pbn = pbn; 4264 4265 return req_slots; 4266 } 4267 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); 4268 4269 /** 4270 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots 4271 * @state: global atomic state 4272 * @mgr: MST topology manager for the port 4273 * @port: The port to release the VCPI slots from 4274 * 4275 * Releases any VCPI slots that have been allocated to a port in the atomic 4276 * state. Any atomic drivers which support MST must call this function in 4277 * their &drm_connector_helper_funcs.atomic_check() callback when the 4278 * connector will no longer have VCPI allocated (e.g. because its CRTC was 4279 * removed) when it had VCPI allocated in the previous atomic state. 4280 * 4281 * It is OK to call this even if @port has been removed from the system. 4282 * Additionally, it is OK to call this function multiple times on the same 4283 * @port as needed. It is not OK however, to call this function and 4284 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check 4285 * phase. 4286 * 4287 * See also: 4288 * drm_dp_atomic_find_vcpi_slots() 4289 * drm_dp_mst_atomic_check() 4290 * 4291 * Returns: 4292 * 0 if all slots for this port were added back to 4293 * &drm_dp_mst_topology_state.avail_slots or negative error code 4294 */ 4295 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 4296 struct drm_dp_mst_topology_mgr *mgr, 4297 struct drm_dp_mst_port *port) 4298 { 4299 struct drm_dp_mst_topology_state *topology_state; 4300 struct drm_dp_vcpi_allocation *pos; 4301 bool found = false; 4302 4303 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 4304 if (IS_ERR(topology_state)) 4305 return PTR_ERR(topology_state); 4306 4307 list_for_each_entry(pos, &topology_state->vcpis, next) { 4308 if (pos->port == port) { 4309 found = true; 4310 break; 4311 } 4312 } 4313 if (WARN_ON(!found)) { 4314 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n", 4315 port, &topology_state->base); 4316 return -EINVAL; 4317 } 4318 4319 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); 4320 if (pos->vcpi) { 4321 drm_dp_mst_put_port_malloc(port); 4322 pos->vcpi = 0; 4323 } 4324 4325 return 0; 4326 } 4327 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); 4328 4329 /** 4330 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 4331 * @mgr: manager for this port 4332 * @port: port to allocate a virtual channel for. 4333 * @pbn: payload bandwidth number to request 4334 * @slots: returned number of slots for this PBN. 4335 */ 4336 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4337 struct drm_dp_mst_port *port, int pbn, int slots) 4338 { 4339 int ret; 4340 4341 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4342 if (!port) 4343 return false; 4344 4345 if (slots < 0) 4346 return false; 4347 4348 if (port->vcpi.vcpi > 0) { 4349 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", 4350 port->vcpi.vcpi, port->vcpi.pbn, pbn); 4351 if (pbn == port->vcpi.pbn) { 4352 drm_dp_mst_topology_put_port(port); 4353 return true; 4354 } 4355 } 4356 4357 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); 4358 if (ret) { 4359 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", 4360 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 4361 goto out; 4362 } 4363 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", 4364 pbn, port->vcpi.num_slots); 4365 4366 /* Keep port allocated until its payload has been removed */ 4367 drm_dp_mst_get_port_malloc(port); 4368 drm_dp_mst_topology_put_port(port); 4369 return true; 4370 out: 4371 return false; 4372 } 4373 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); 4374 4375 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4376 { 4377 int slots = 0; 4378 port = drm_dp_mst_topology_get_port_validated(mgr, port); 4379 if (!port) 4380 return slots; 4381 4382 slots = port->vcpi.num_slots; 4383 drm_dp_mst_topology_put_port(port); 4384 return slots; 4385 } 4386 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 4387 4388 /** 4389 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI 4390 * @mgr: manager for this port 4391 * @port: unverified pointer to a port. 4392 * 4393 * This just resets the number of slots for the ports VCPI for later programming. 4394 */ 4395 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 4396 { 4397 /* 4398 * A port with VCPI will remain allocated until its VCPI is 4399 * released, no verified ref needed 4400 */ 4401 4402 port->vcpi.num_slots = 0; 4403 } 4404 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 4405 4406 /** 4407 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI 4408 * @mgr: manager for this port 4409 * @port: port to deallocate vcpi for 4410 * 4411 * This can be called unconditionally, regardless of whether 4412 * drm_dp_mst_allocate_vcpi() succeeded or not. 4413 */ 4414 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 4415 struct drm_dp_mst_port *port) 4416 { 4417 if (!port->vcpi.vcpi) 4418 return; 4419 4420 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 4421 port->vcpi.num_slots = 0; 4422 port->vcpi.pbn = 0; 4423 port->vcpi.aligned_pbn = 0; 4424 port->vcpi.vcpi = 0; 4425 drm_dp_mst_put_port_malloc(port); 4426 } 4427 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 4428 4429 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 4430 int id, struct drm_dp_payload *payload) 4431 { 4432 u8 payload_alloc[3], status; 4433 int ret; 4434 int retries = 0; 4435 4436 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 4437 DP_PAYLOAD_TABLE_UPDATED); 4438 4439 payload_alloc[0] = id; 4440 payload_alloc[1] = payload->start_slot; 4441 payload_alloc[2] = payload->num_slots; 4442 4443 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 4444 if (ret != 3) { 4445 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); 4446 goto fail; 4447 } 4448 4449 retry: 4450 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4451 if (ret < 0) { 4452 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 4453 goto fail; 4454 } 4455 4456 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 4457 retries++; 4458 if (retries < 20) { 4459 usleep_range(10000, 20000); 4460 goto retry; 4461 } 4462 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); 4463 ret = -EINVAL; 4464 goto fail; 4465 } 4466 ret = 0; 4467 fail: 4468 return ret; 4469 } 4470 4471 4472 /** 4473 * drm_dp_check_act_status() - Check ACT handled status. 4474 * @mgr: manager to use 4475 * 4476 * Check the payload status bits in the DPCD for ACT handled completion. 4477 */ 4478 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 4479 { 4480 u8 status; 4481 int ret; 4482 int count = 0; 4483 4484 do { 4485 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 4486 4487 if (ret < 0) { 4488 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 4489 goto fail; 4490 } 4491 4492 if (status & DP_PAYLOAD_ACT_HANDLED) 4493 break; 4494 count++; 4495 udelay(100); 4496 4497 } while (count < 30); 4498 4499 if (!(status & DP_PAYLOAD_ACT_HANDLED)) { 4500 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); 4501 ret = -EINVAL; 4502 goto fail; 4503 } 4504 return 0; 4505 fail: 4506 return ret; 4507 } 4508 EXPORT_SYMBOL(drm_dp_check_act_status); 4509 4510 /** 4511 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 4512 * @clock: dot clock for the mode 4513 * @bpp: bpp for the mode. 4514 * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel 4515 * 4516 * This uses the formula in the spec to calculate the PBN value for a mode. 4517 */ 4518 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) 4519 { 4520 /* 4521 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 4522 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on 4523 * common multiplier to render an integer PBN for all link rate/lane 4524 * counts combinations 4525 * calculate 4526 * peak_kbps *= (1006/1000) 4527 * peak_kbps *= (64/54) 4528 * peak_kbps *= 8 convert to bytes 4529 * 4530 * If the bpp is in units of 1/16, further divide by 16. Put this 4531 * factor in the numerator rather than the denominator to avoid 4532 * integer overflow 4533 */ 4534 4535 if (dsc) 4536 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006), 4537 8 * 54 * 1000 * 1000); 4538 4539 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 4540 8 * 54 * 1000 * 1000); 4541 } 4542 EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 4543 4544 /* we want to kick the TX after we've ack the up/down IRQs. */ 4545 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 4546 { 4547 queue_work(system_long_wq, &mgr->tx_work); 4548 } 4549 4550 #if IS_ENABLED(CONFIG_DEBUG_FS) 4551 static void drm_dp_mst_dump_mstb(struct seq_file *m, 4552 struct drm_dp_mst_branch *mstb) 4553 { 4554 struct drm_dp_mst_port *port; 4555 int tabs = mstb->lct; 4556 char prefix[10]; 4557 int i; 4558 4559 for (i = 0; i < tabs; i++) 4560 prefix[i] = '\t'; 4561 prefix[i] = '\0'; 4562 4563 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); 4564 list_for_each_entry(port, &mstb->ports, next) { 4565 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector); 4566 if (port->mstb) 4567 drm_dp_mst_dump_mstb(m, port->mstb); 4568 } 4569 } 4570 4571 #define DP_PAYLOAD_TABLE_SIZE 64 4572 4573 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 4574 char *buf) 4575 { 4576 int i; 4577 4578 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { 4579 if (drm_dp_dpcd_read(mgr->aux, 4580 DP_PAYLOAD_TABLE_UPDATE_STATUS + i, 4581 &buf[i], 16) != 16) 4582 return false; 4583 } 4584 return true; 4585 } 4586 4587 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, 4588 struct drm_dp_mst_port *port, char *name, 4589 int namelen) 4590 { 4591 struct edid *mst_edid; 4592 4593 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port); 4594 drm_edid_get_monitor_name(mst_edid, name, namelen); 4595 } 4596 4597 /** 4598 * drm_dp_mst_dump_topology(): dump topology to seq file. 4599 * @m: seq_file to dump output to 4600 * @mgr: manager to dump current topology for. 4601 * 4602 * helper to dump MST topology to a seq file for debugfs. 4603 */ 4604 void drm_dp_mst_dump_topology(struct seq_file *m, 4605 struct drm_dp_mst_topology_mgr *mgr) 4606 { 4607 int i; 4608 struct drm_dp_mst_port *port; 4609 4610 mutex_lock(&mgr->lock); 4611 if (mgr->mst_primary) 4612 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 4613 4614 /* dump VCPIs */ 4615 mutex_unlock(&mgr->lock); 4616 4617 mutex_lock(&mgr->payload_lock); 4618 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask, 4619 mgr->max_payloads); 4620 4621 for (i = 0; i < mgr->max_payloads; i++) { 4622 if (mgr->proposed_vcpis[i]) { 4623 char name[14]; 4624 4625 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 4626 fetch_monitor_name(mgr, port, name, sizeof(name)); 4627 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i, 4628 port->port_num, port->vcpi.vcpi, 4629 port->vcpi.num_slots, 4630 (*name != 0) ? name : "Unknown"); 4631 } else 4632 seq_printf(m, "vcpi %d:unused\n", i); 4633 } 4634 for (i = 0; i < mgr->max_payloads; i++) { 4635 seq_printf(m, "payload %d: %d, %d, %d\n", 4636 i, 4637 mgr->payloads[i].payload_state, 4638 mgr->payloads[i].start_slot, 4639 mgr->payloads[i].num_slots); 4640 4641 4642 } 4643 mutex_unlock(&mgr->payload_lock); 4644 4645 mutex_lock(&mgr->lock); 4646 if (mgr->mst_primary) { 4647 u8 buf[DP_PAYLOAD_TABLE_SIZE]; 4648 int ret; 4649 4650 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); 4651 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); 4652 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 4653 seq_printf(m, "faux/mst: %*ph\n", 2, buf); 4654 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 4655 seq_printf(m, "mst ctrl: %*ph\n", 1, buf); 4656 4657 /* dump the standard OUI branch header */ 4658 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); 4659 seq_printf(m, "branch oui: %*phN devid: ", 3, buf); 4660 for (i = 0x3; i < 0x8 && buf[i]; i++) 4661 seq_printf(m, "%c", buf[i]); 4662 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", 4663 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); 4664 if (dump_dp_payload_table(mgr, buf)) 4665 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); 4666 } 4667 4668 mutex_unlock(&mgr->lock); 4669 4670 } 4671 EXPORT_SYMBOL(drm_dp_mst_dump_topology); 4672 #endif /* IS_ENABLED(CONFIG_DEBUG_FS) */ 4673 4674 static void drm_dp_tx_work(struct work_struct *work) 4675 { 4676 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 4677 4678 mutex_lock(&mgr->qlock); 4679 if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) 4680 process_single_down_tx_qlock(mgr); 4681 mutex_unlock(&mgr->qlock); 4682 } 4683 4684 static inline void 4685 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) 4686 { 4687 if (port->connector) 4688 port->mgr->cbs->destroy_connector(port->mgr, port->connector); 4689 4690 drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); 4691 drm_dp_mst_put_port_malloc(port); 4692 } 4693 4694 static inline void 4695 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) 4696 { 4697 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 4698 struct drm_dp_mst_port *port, *tmp; 4699 bool wake_tx = false; 4700 4701 mutex_lock(&mgr->lock); 4702 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 4703 list_del(&port->next); 4704 drm_dp_mst_topology_put_port(port); 4705 } 4706 mutex_unlock(&mgr->lock); 4707 4708 /* drop any tx slots msg */ 4709 mutex_lock(&mstb->mgr->qlock); 4710 if (mstb->tx_slots[0]) { 4711 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 4712 mstb->tx_slots[0] = NULL; 4713 wake_tx = true; 4714 } 4715 if (mstb->tx_slots[1]) { 4716 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 4717 mstb->tx_slots[1] = NULL; 4718 wake_tx = true; 4719 } 4720 mutex_unlock(&mstb->mgr->qlock); 4721 4722 if (wake_tx) 4723 { 4724 #ifdef __NetBSD__ 4725 DRM_WAKEUP_ALL(&mstb->mgr->tx_waitq, &mstb->mgr->qlock); 4726 #else 4727 wake_up_all(&mstb->mgr->tx_waitq); 4728 #endif 4729 } 4730 4731 drm_dp_mst_put_mstb_malloc(mstb); 4732 } 4733 4734 static void drm_dp_delayed_destroy_work(struct work_struct *work) 4735 { 4736 struct drm_dp_mst_topology_mgr *mgr = 4737 container_of(work, struct drm_dp_mst_topology_mgr, 4738 delayed_destroy_work); 4739 bool send_hotplug = false, go_again; 4740 4741 /* 4742 * Not a regular list traverse as we have to drop the destroy 4743 * connector lock before destroying the mstb/port, to avoid AB->BA 4744 * ordering between this lock and the config mutex. 4745 */ 4746 do { 4747 go_again = false; 4748 4749 for (;;) { 4750 struct drm_dp_mst_branch *mstb; 4751 4752 mutex_lock(&mgr->delayed_destroy_lock); 4753 mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, 4754 struct drm_dp_mst_branch, 4755 destroy_next); 4756 if (mstb) 4757 list_del(&mstb->destroy_next); 4758 mutex_unlock(&mgr->delayed_destroy_lock); 4759 4760 if (!mstb) 4761 break; 4762 4763 drm_dp_delayed_destroy_mstb(mstb); 4764 go_again = true; 4765 } 4766 4767 for (;;) { 4768 struct drm_dp_mst_port *port; 4769 4770 mutex_lock(&mgr->delayed_destroy_lock); 4771 port = list_first_entry_or_null(&mgr->destroy_port_list, 4772 struct drm_dp_mst_port, 4773 next); 4774 if (port) 4775 list_del(&port->next); 4776 mutex_unlock(&mgr->delayed_destroy_lock); 4777 4778 if (!port) 4779 break; 4780 4781 drm_dp_delayed_destroy_port(port); 4782 send_hotplug = true; 4783 go_again = true; 4784 } 4785 } while (go_again); 4786 4787 if (send_hotplug) 4788 drm_kms_helper_hotplug_event(mgr->dev); 4789 } 4790 4791 static struct drm_private_state * 4792 drm_dp_mst_duplicate_state(struct drm_private_obj *obj) 4793 { 4794 struct drm_dp_mst_topology_state *state, *old_state = 4795 to_dp_mst_topology_state(obj->state); 4796 struct drm_dp_vcpi_allocation *pos, *vcpi; 4797 4798 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL); 4799 if (!state) 4800 return NULL; 4801 4802 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 4803 4804 INIT_LIST_HEAD(&state->vcpis); 4805 4806 list_for_each_entry(pos, &old_state->vcpis, next) { 4807 /* Prune leftover freed VCPI allocations */ 4808 if (!pos->vcpi) 4809 continue; 4810 4811 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL); 4812 if (!vcpi) 4813 goto fail; 4814 4815 drm_dp_mst_get_port_malloc(vcpi->port); 4816 list_add(&vcpi->next, &state->vcpis); 4817 } 4818 4819 return &state->base; 4820 4821 fail: 4822 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) { 4823 drm_dp_mst_put_port_malloc(pos->port); 4824 kfree(pos); 4825 } 4826 kfree(state); 4827 4828 return NULL; 4829 } 4830 4831 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, 4832 struct drm_private_state *state) 4833 { 4834 struct drm_dp_mst_topology_state *mst_state = 4835 to_dp_mst_topology_state(state); 4836 struct drm_dp_vcpi_allocation *pos, *tmp; 4837 4838 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) { 4839 /* We only keep references to ports with non-zero VCPIs */ 4840 if (pos->vcpi) 4841 drm_dp_mst_put_port_malloc(pos->port); 4842 kfree(pos); 4843 } 4844 4845 kfree(mst_state); 4846 } 4847 4848 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, 4849 struct drm_dp_mst_branch *branch) 4850 { 4851 while (port->parent) { 4852 if (port->parent == branch) 4853 return true; 4854 4855 if (port->parent->port_parent) 4856 port = port->parent->port_parent; 4857 else 4858 break; 4859 } 4860 return false; 4861 } 4862 4863 static inline 4864 int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, 4865 struct drm_dp_mst_topology_state *mst_state) 4866 { 4867 struct drm_dp_mst_port *port; 4868 struct drm_dp_vcpi_allocation *vcpi; 4869 int pbn_limit = 0, pbn_used = 0; 4870 4871 list_for_each_entry(port, &branch->ports, next) { 4872 if (port->mstb) 4873 if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) 4874 return -ENOSPC; 4875 4876 if (port->available_pbn > 0) 4877 pbn_limit = port->available_pbn; 4878 } 4879 DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", 4880 branch, pbn_limit); 4881 4882 list_for_each_entry(vcpi, &mst_state->vcpis, next) { 4883 if (!vcpi->pbn) 4884 continue; 4885 4886 if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) 4887 pbn_used += vcpi->pbn; 4888 } 4889 DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", 4890 branch, pbn_used); 4891 4892 if (pbn_used > pbn_limit) { 4893 DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", 4894 branch); 4895 return -ENOSPC; 4896 } 4897 return 0; 4898 } 4899 4900 static inline int 4901 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, 4902 struct drm_dp_mst_topology_state *mst_state) 4903 { 4904 struct drm_dp_vcpi_allocation *vcpi; 4905 int avail_slots = 63, payload_count = 0; 4906 4907 list_for_each_entry(vcpi, &mst_state->vcpis, next) { 4908 /* Releasing VCPI is always OK-even if the port is gone */ 4909 if (!vcpi->vcpi) { 4910 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n", 4911 vcpi->port); 4912 continue; 4913 } 4914 4915 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n", 4916 vcpi->port, vcpi->vcpi); 4917 4918 avail_slots -= vcpi->vcpi; 4919 if (avail_slots < 0) { 4920 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", 4921 vcpi->port, mst_state, 4922 avail_slots + vcpi->vcpi); 4923 return -ENOSPC; 4924 } 4925 4926 if (++payload_count > mgr->max_payloads) { 4927 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n", 4928 mgr, mst_state, mgr->max_payloads); 4929 return -EINVAL; 4930 } 4931 } 4932 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", 4933 mgr, mst_state, avail_slots, 4934 63 - avail_slots); 4935 4936 return 0; 4937 } 4938 4939 /** 4940 * drm_dp_mst_add_affected_dsc_crtcs 4941 * @state: Pointer to the new struct drm_dp_mst_topology_state 4942 * @mgr: MST topology manager 4943 * 4944 * Whenever there is a change in mst topology 4945 * DSC configuration would have to be recalculated 4946 * therefore we need to trigger modeset on all affected 4947 * CRTCs in that topology 4948 * 4949 * See also: 4950 * drm_dp_mst_atomic_enable_dsc() 4951 */ 4952 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) 4953 { 4954 struct drm_dp_mst_topology_state *mst_state; 4955 struct drm_dp_vcpi_allocation *pos; 4956 struct drm_connector *connector; 4957 struct drm_connector_state *conn_state; 4958 struct drm_crtc *crtc; 4959 struct drm_crtc_state *crtc_state; 4960 4961 mst_state = drm_atomic_get_mst_topology_state(state, mgr); 4962 4963 if (IS_ERR(mst_state)) 4964 return -EINVAL; 4965 4966 list_for_each_entry(pos, &mst_state->vcpis, next) { 4967 4968 connector = pos->port->connector; 4969 4970 if (!connector) 4971 return -EINVAL; 4972 4973 conn_state = drm_atomic_get_connector_state(state, connector); 4974 4975 if (IS_ERR(conn_state)) 4976 return PTR_ERR(conn_state); 4977 4978 crtc = conn_state->crtc; 4979 4980 if (WARN_ON(!crtc)) 4981 return -EINVAL; 4982 4983 if (!drm_dp_mst_dsc_aux_for_port(pos->port)) 4984 continue; 4985 4986 crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); 4987 4988 if (IS_ERR(crtc_state)) 4989 return PTR_ERR(crtc_state); 4990 4991 DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", 4992 mgr, crtc); 4993 4994 crtc_state->mode_changed = true; 4995 } 4996 return 0; 4997 } 4998 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); 4999 5000 /** 5001 * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off 5002 * @state: Pointer to the new drm_atomic_state 5003 * @port: Pointer to the affected MST Port 5004 * @pbn: Newly recalculated bw required for link with DSC enabled 5005 * @pbn_div: Divider to calculate correct number of pbn per slot 5006 * @enable: Boolean flag to enable or disable DSC on the port 5007 * 5008 * This function enables DSC on the given Port 5009 * by recalculating its vcpi from pbn provided 5010 * and sets dsc_enable flag to keep track of which 5011 * ports have DSC enabled 5012 * 5013 */ 5014 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, 5015 struct drm_dp_mst_port *port, 5016 int pbn, int pbn_div, 5017 bool enable) 5018 { 5019 struct drm_dp_mst_topology_state *mst_state; 5020 struct drm_dp_vcpi_allocation *pos; 5021 bool found = false; 5022 int vcpi = 0; 5023 5024 mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); 5025 5026 if (IS_ERR(mst_state)) 5027 return PTR_ERR(mst_state); 5028 5029 list_for_each_entry(pos, &mst_state->vcpis, next) { 5030 if (pos->port == port) { 5031 found = true; 5032 break; 5033 } 5034 } 5035 5036 if (!found) { 5037 DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", 5038 port, mst_state); 5039 return -EINVAL; 5040 } 5041 5042 if (pos->dsc_enabled == enable) { 5043 DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", 5044 port, enable, pos->vcpi); 5045 vcpi = pos->vcpi; 5046 } 5047 5048 if (enable) { 5049 vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); 5050 DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", 5051 port, vcpi); 5052 if (vcpi < 0) 5053 return -EINVAL; 5054 } 5055 5056 pos->dsc_enabled = enable; 5057 5058 return vcpi; 5059 } 5060 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); 5061 /** 5062 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an 5063 * atomic update is valid 5064 * @state: Pointer to the new &struct drm_dp_mst_topology_state 5065 * 5066 * Checks the given topology state for an atomic update to ensure that it's 5067 * valid. This includes checking whether there's enough bandwidth to support 5068 * the new VCPI allocations in the atomic update. 5069 * 5070 * Any atomic drivers supporting DP MST must make sure to call this after 5071 * checking the rest of their state in their 5072 * &drm_mode_config_funcs.atomic_check() callback. 5073 * 5074 * See also: 5075 * drm_dp_atomic_find_vcpi_slots() 5076 * drm_dp_atomic_release_vcpi_slots() 5077 * 5078 * Returns: 5079 * 5080 * 0 if the new state is valid, negative error code otherwise. 5081 */ 5082 int drm_dp_mst_atomic_check(struct drm_atomic_state *state) 5083 { 5084 struct drm_dp_mst_topology_mgr *mgr; 5085 struct drm_dp_mst_topology_state *mst_state; 5086 int i, ret = 0; 5087 5088 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { 5089 if (!mgr->mst_state) 5090 continue; 5091 5092 ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); 5093 if (ret) 5094 break; 5095 ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); 5096 if (ret) 5097 break; 5098 } 5099 5100 return ret; 5101 } 5102 EXPORT_SYMBOL(drm_dp_mst_atomic_check); 5103 5104 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = { 5105 .atomic_duplicate_state = drm_dp_mst_duplicate_state, 5106 .atomic_destroy_state = drm_dp_mst_destroy_state, 5107 }; 5108 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); 5109 5110 /** 5111 * drm_atomic_get_mst_topology_state: get MST topology state 5112 * 5113 * @state: global atomic state 5114 * @mgr: MST topology manager, also the private object in this case 5115 * 5116 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic 5117 * state vtable so that the private object state returned is that of a MST 5118 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller 5119 * to care of the locking, so warn if don't hold the connection_mutex. 5120 * 5121 * RETURNS: 5122 * 5123 * The MST topology state or error pointer. 5124 */ 5125 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, 5126 struct drm_dp_mst_topology_mgr *mgr) 5127 { 5128 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); 5129 } 5130 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); 5131 5132 /** 5133 * drm_dp_mst_topology_mgr_init - initialise a topology manager 5134 * @mgr: manager struct to initialise 5135 * @dev: device providing this structure - for i2c addition. 5136 * @aux: DP helper aux channel to talk to this device 5137 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 5138 * @max_payloads: maximum number of payloads this GPU can source 5139 * @conn_base_id: the connector object ID the MST device is connected to. 5140 * 5141 * Return 0 for success, or negative error code on failure 5142 */ 5143 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 5144 struct drm_device *dev, struct drm_dp_aux *aux, 5145 int max_dpcd_transaction_bytes, 5146 int max_payloads, int conn_base_id) 5147 { 5148 struct drm_dp_mst_topology_state *mst_state; 5149 5150 mutex_init(&mgr->lock); 5151 mutex_init(&mgr->qlock); 5152 mutex_init(&mgr->payload_lock); 5153 mutex_init(&mgr->delayed_destroy_lock); 5154 mutex_init(&mgr->up_req_lock); 5155 mutex_init(&mgr->probe_lock); 5156 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5157 mutex_init(&mgr->topology_ref_history_lock); 5158 #endif 5159 INIT_LIST_HEAD(&mgr->tx_msg_downq); 5160 INIT_LIST_HEAD(&mgr->destroy_port_list); 5161 INIT_LIST_HEAD(&mgr->destroy_branch_device_list); 5162 INIT_LIST_HEAD(&mgr->up_req_list); 5163 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 5164 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 5165 INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); 5166 INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); 5167 #ifdef __NetBSD__ 5168 DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait"); 5169 #else 5170 init_waitqueue_head(&mgr->tx_waitq); 5171 #endif 5172 mgr->dev = dev; 5173 mgr->aux = aux; 5174 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 5175 mgr->max_payloads = max_payloads; 5176 mgr->conn_base_id = conn_base_id; 5177 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || 5178 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) 5179 return -EINVAL; 5180 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 5181 if (!mgr->payloads) 5182 return -ENOMEM; 5183 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); 5184 if (!mgr->proposed_vcpis) 5185 return -ENOMEM; 5186 set_bit(0, &mgr->payload_mask); 5187 5188 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); 5189 if (mst_state == NULL) 5190 return -ENOMEM; 5191 5192 mst_state->mgr = mgr; 5193 INIT_LIST_HEAD(&mst_state->vcpis); 5194 5195 drm_atomic_private_obj_init(dev, &mgr->base, 5196 &mst_state->base, 5197 &drm_dp_mst_topology_state_funcs); 5198 5199 return 0; 5200 } 5201 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 5202 5203 /** 5204 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 5205 * @mgr: manager to destroy 5206 */ 5207 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 5208 { 5209 drm_dp_mst_topology_mgr_set_mst(mgr, false); 5210 flush_work(&mgr->work); 5211 cancel_work_sync(&mgr->delayed_destroy_work); 5212 mutex_lock(&mgr->payload_lock); 5213 kfree(mgr->payloads); 5214 mgr->payloads = NULL; 5215 kfree(mgr->proposed_vcpis); 5216 mgr->proposed_vcpis = NULL; 5217 mutex_unlock(&mgr->payload_lock); 5218 mgr->dev = NULL; 5219 mgr->aux = NULL; 5220 drm_atomic_private_obj_fini(&mgr->base); 5221 mgr->funcs = NULL; 5222 5223 #ifdef __NetBSD__ 5224 DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq); 5225 #endif 5226 mutex_destroy(&mgr->delayed_destroy_lock); 5227 mutex_destroy(&mgr->payload_lock); 5228 mutex_destroy(&mgr->qlock); 5229 mutex_destroy(&mgr->lock); 5230 mutex_destroy(&mgr->up_req_lock); 5231 mutex_destroy(&mgr->probe_lock); 5232 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) 5233 mutex_destroy(&mgr->topology_ref_history_lock); 5234 #endif 5235 } 5236 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 5237 5238 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num) 5239 { 5240 int i; 5241 5242 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS) 5243 return false; 5244 5245 for (i = 0; i < num - 1; i++) { 5246 if (msgs[i].flags & I2C_M_RD || 5247 msgs[i].len > 0xff) 5248 return false; 5249 } 5250 5251 return msgs[num - 1].flags & I2C_M_RD && 5252 msgs[num - 1].len <= 0xff; 5253 } 5254 5255 /* I2C device */ 5256 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, 5257 int num) 5258 { 5259 struct drm_dp_aux *aux = adapter->algo_data; 5260 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); 5261 struct drm_dp_mst_branch *mstb; 5262 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 5263 unsigned int i; 5264 struct drm_dp_sideband_msg_req_body msg; 5265 struct drm_dp_sideband_msg_tx *txmsg = NULL; 5266 int ret; 5267 5268 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 5269 if (!mstb) 5270 return -EREMOTEIO; 5271 5272 if (!remote_i2c_read_ok(msgs, num)) { 5273 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); 5274 ret = -EIO; 5275 goto out; 5276 } 5277 5278 memset(&msg, 0, sizeof(msg)); 5279 msg.req_type = DP_REMOTE_I2C_READ; 5280 msg.u.i2c_read.num_transactions = num - 1; 5281 msg.u.i2c_read.port_number = port->port_num; 5282 for (i = 0; i < num - 1; i++) { 5283 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 5284 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 5285 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 5286 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP); 5287 } 5288 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 5289 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 5290 5291 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 5292 if (!txmsg) { 5293 ret = -ENOMEM; 5294 goto out; 5295 } 5296 5297 txmsg->dst = mstb; 5298 drm_dp_encode_sideband_req(&msg, txmsg); 5299 5300 drm_dp_queue_down_tx(mgr, txmsg); 5301 5302 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 5303 if (ret > 0) { 5304 5305 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { 5306 ret = -EREMOTEIO; 5307 goto out; 5308 } 5309 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 5310 ret = -EIO; 5311 goto out; 5312 } 5313 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 5314 ret = num; 5315 } 5316 out: 5317 kfree(txmsg); 5318 drm_dp_mst_topology_put_mstb(mstb); 5319 return ret; 5320 } 5321 5322 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 5323 { 5324 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 5325 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 5326 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 5327 I2C_FUNC_10BIT_ADDR; 5328 } 5329 5330 static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 5331 .functionality = drm_dp_mst_i2c_functionality, 5332 .master_xfer = drm_dp_mst_i2c_xfer, 5333 }; 5334 5335 /** 5336 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 5337 * @aux: DisplayPort AUX channel 5338 * 5339 * Returns 0 on success or a negative error code on failure. 5340 */ 5341 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) 5342 { 5343 aux->ddc.algo = &drm_dp_mst_i2c_algo; 5344 aux->ddc.algo_data = aux; 5345 aux->ddc.retries = 3; 5346 5347 aux->ddc.class = I2C_CLASS_DDC; 5348 aux->ddc.owner = THIS_MODULE; 5349 aux->ddc.dev.parent = aux->dev; 5350 #ifndef __NetBSD__ /* XXX of? */ 5351 aux->ddc.dev.of_node = aux->dev->of_node; 5352 #endif 5353 5354 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), 5355 sizeof(aux->ddc.name)); 5356 5357 return i2c_add_adapter(&aux->ddc); 5358 } 5359 5360 /** 5361 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 5362 * @aux: DisplayPort AUX channel 5363 */ 5364 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) 5365 { 5366 i2c_del_adapter(&aux->ddc); 5367 } 5368 5369 /** 5370 * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device 5371 * @port: The port to check 5372 * 5373 * A single physical MST hub object can be represented in the topology 5374 * by multiple branches, with virtual ports between those branches. 5375 * 5376 * As of DP1.4, An MST hub with internal (virtual) ports must expose 5377 * certain DPCD registers over those ports. See sections 2.6.1.1.1 5378 * and 2.6.1.1.2 of Display Port specification v1.4 for details. 5379 * 5380 * May acquire mgr->lock 5381 * 5382 * Returns: 5383 * true if the port is a virtual DP peer device, false otherwise 5384 */ 5385 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) 5386 { 5387 struct drm_dp_mst_port *downstream_port; 5388 5389 if (!port || port->dpcd_rev < DP_DPCD_REV_14) 5390 return false; 5391 5392 /* Virtual DP Sink (Internal Display Panel) */ 5393 if (port->port_num >= 8) 5394 return true; 5395 5396 /* DP-to-HDMI Protocol Converter */ 5397 if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && 5398 !port->mcs && 5399 port->ldps) 5400 return true; 5401 5402 /* DP-to-DP */ 5403 mutex_lock(&port->mgr->lock); 5404 if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && 5405 port->mstb && 5406 port->mstb->num_ports == 2) { 5407 list_for_each_entry(downstream_port, &port->mstb->ports, next) { 5408 if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && 5409 !downstream_port->input) { 5410 mutex_unlock(&port->mgr->lock); 5411 return true; 5412 } 5413 } 5414 } 5415 mutex_unlock(&port->mgr->lock); 5416 5417 return false; 5418 } 5419 5420 /** 5421 * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC 5422 * @port: The port to check. A leaf of the MST tree with an attached display. 5423 * 5424 * Depending on the situation, DSC may be enabled via the endpoint aux, 5425 * the immediately upstream aux, or the connector's physical aux. 5426 * 5427 * This is both the correct aux to read DSC_CAPABILITY and the 5428 * correct aux to write DSC_ENABLED. 5429 * 5430 * This operation can be expensive (up to four aux reads), so 5431 * the caller should cache the return. 5432 * 5433 * Returns: 5434 * NULL if DSC cannot be enabled on this port, otherwise the aux device 5435 */ 5436 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) 5437 { 5438 struct drm_dp_mst_port *immediate_upstream_port; 5439 struct drm_dp_mst_port *fec_port; 5440 struct drm_dp_desc desc = { 0 }; 5441 u8 endpoint_fec; 5442 u8 endpoint_dsc; 5443 5444 if (!port) 5445 return NULL; 5446 5447 if (port->parent->port_parent) 5448 immediate_upstream_port = port->parent->port_parent; 5449 else 5450 immediate_upstream_port = NULL; 5451 5452 fec_port = immediate_upstream_port; 5453 while (fec_port) { 5454 /* 5455 * Each physical link (i.e. not a virtual port) between the 5456 * output and the primary device must support FEC 5457 */ 5458 if (!drm_dp_mst_is_virtual_dpcd(fec_port) && 5459 !fec_port->fec_capable) 5460 return NULL; 5461 5462 fec_port = fec_port->parent->port_parent; 5463 } 5464 5465 /* DP-to-DP peer device */ 5466 if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { 5467 u8 upstream_dsc; 5468 5469 if (drm_dp_dpcd_read(&port->aux, 5470 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 5471 return NULL; 5472 if (drm_dp_dpcd_read(&port->aux, 5473 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 5474 return NULL; 5475 if (drm_dp_dpcd_read(&immediate_upstream_port->aux, 5476 DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) 5477 return NULL; 5478 5479 /* Enpoint decompression with DP-to-DP peer device */ 5480 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 5481 (endpoint_fec & DP_FEC_CAPABLE) && 5482 (upstream_dsc & 0x2) /* DSC passthrough */) 5483 return &port->aux; 5484 5485 /* Virtual DPCD decompression with DP-to-DP peer device */ 5486 return &immediate_upstream_port->aux; 5487 } 5488 5489 /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ 5490 if (drm_dp_mst_is_virtual_dpcd(port)) 5491 return &port->aux; 5492 5493 /* 5494 * Synaptics quirk 5495 * Applies to ports for which: 5496 * - Physical aux has Synaptics OUI 5497 * - DPv1.4 or higher 5498 * - Port is on primary branch device 5499 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) 5500 */ 5501 if (drm_dp_read_desc(port->mgr->aux, &desc, true)) 5502 return NULL; 5503 5504 if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && 5505 port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && 5506 port->parent == port->mgr->mst_primary) { 5507 u8 downstreamport; 5508 5509 if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT, 5510 &downstreamport, 1) < 0) 5511 return NULL; 5512 5513 if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) && 5514 ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK) 5515 != DP_DWN_STRM_PORT_TYPE_ANALOG)) 5516 return port->mgr->aux; 5517 } 5518 5519 /* 5520 * The check below verifies if the MST sink 5521 * connected to the GPU is capable of DSC - 5522 * therefore the endpoint needs to be 5523 * both DSC and FEC capable. 5524 */ 5525 if (drm_dp_dpcd_read(&port->aux, 5526 DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) 5527 return NULL; 5528 if (drm_dp_dpcd_read(&port->aux, 5529 DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) 5530 return NULL; 5531 if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && 5532 (endpoint_fec & DP_FEC_CAPABLE)) 5533 return &port->aux; 5534 5535 return NULL; 5536 } 5537 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port); 5538