1 /* 2 * Copyright © 2014 Red Hat 3 * 4 * Permission to use, copy, modify, distribute, and sell this software and its 5 * documentation for any purpose is hereby granted without fee, provided that 6 * the above copyright notice appear in all copies and that both that copyright 7 * notice and this permission notice appear in supporting documentation, and 8 * that the name of the copyright holders not be used in advertising or 9 * publicity pertaining to distribution of the software without specific, 10 * written prior permission. The copyright holders make no representations 11 * about the suitability of this software for any purpose. It is provided "as 12 * is" without express or implied warranty. 13 * 14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 20 * OF THIS SOFTWARE. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/delay.h> 25 #include <linux/init.h> 26 #include <linux/errno.h> 27 #include <linux/sched.h> 28 #include <linux/seq_file.h> 29 #include <linux/i2c.h> 30 #include <drm/drm_dp_mst_helper.h> 31 #include <drm/drmP.h> 32 33 #include <drm/drm_fixed.h> 34 35 /** 36 * DOC: dp mst helper 37 * 38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport 39 * protocol. The helpers contain a topology manager and bandwidth manager. 40 * The helpers encapsulate the sending and received of sideband msgs. 41 */ 42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 43 char *buf); 44 static int test_calc_pbn_mode(void); 45 46 static void drm_dp_put_port(struct drm_dp_mst_port *port); 47 48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 49 int id, 50 struct drm_dp_payload *payload); 51 52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 53 struct drm_dp_mst_port *port, 54 int offset, int size, u8 *bytes); 55 56 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 57 struct drm_dp_mst_branch *mstb); 58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 59 struct drm_dp_mst_branch *mstb, 60 struct drm_dp_mst_port *port); 61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 62 u8 *guid); 63 64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); 65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); 66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); 67 /* sideband msg handling */ 68 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) 69 { 70 u8 bitmask = 0x80; 71 u8 bitshift = 7; 72 u8 array_index = 0; 73 int number_of_bits = num_nibbles * 4; 74 u8 remainder = 0; 75 76 while (number_of_bits != 0) { 77 number_of_bits--; 78 remainder <<= 1; 79 remainder |= (data[array_index] & bitmask) >> bitshift; 80 bitmask >>= 1; 81 bitshift--; 82 if (bitmask == 0) { 83 bitmask = 0x80; 84 bitshift = 7; 85 array_index++; 86 } 87 if ((remainder & 0x10) == 0x10) 88 remainder ^= 0x13; 89 } 90 91 number_of_bits = 4; 92 while (number_of_bits != 0) { 93 number_of_bits--; 94 remainder <<= 1; 95 if ((remainder & 0x10) != 0) 96 remainder ^= 0x13; 97 } 98 99 return remainder; 100 } 101 102 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes) 103 { 104 u8 bitmask = 0x80; 105 u8 bitshift = 7; 106 u8 array_index = 0; 107 int number_of_bits = number_of_bytes * 8; 108 u16 remainder = 0; 109 110 while (number_of_bits != 0) { 111 number_of_bits--; 112 remainder <<= 1; 113 remainder |= (data[array_index] & bitmask) >> bitshift; 114 bitmask >>= 1; 115 bitshift--; 116 if (bitmask == 0) { 117 bitmask = 0x80; 118 bitshift = 7; 119 array_index++; 120 } 121 if ((remainder & 0x100) == 0x100) 122 remainder ^= 0xd5; 123 } 124 125 number_of_bits = 8; 126 while (number_of_bits != 0) { 127 number_of_bits--; 128 remainder <<= 1; 129 if ((remainder & 0x100) != 0) 130 remainder ^= 0xd5; 131 } 132 133 return remainder & 0xff; 134 } 135 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr) 136 { 137 u8 size = 3; 138 size += (hdr->lct / 2); 139 return size; 140 } 141 142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 143 u8 *buf, int *len) 144 { 145 int idx = 0; 146 int i; 147 u8 crc4; 148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf); 149 for (i = 0; i < (hdr->lct / 2); i++) 150 buf[idx++] = hdr->rad[i]; 151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) | 152 (hdr->msg_len & 0x3f); 153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4); 154 155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1); 156 buf[idx - 1] |= (crc4 & 0xf); 157 158 *len = idx; 159 } 160 161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, 162 u8 *buf, int buflen, u8 *hdrlen) 163 { 164 u8 crc4; 165 u8 len; 166 int i; 167 u8 idx; 168 if (buf[0] == 0) 169 return false; 170 len = 3; 171 len += ((buf[0] & 0xf0) >> 4) / 2; 172 if (len > buflen) 173 return false; 174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1); 175 176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) { 177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]); 178 return false; 179 } 180 181 hdr->lct = (buf[0] & 0xf0) >> 4; 182 hdr->lcr = (buf[0] & 0xf); 183 idx = 1; 184 for (i = 0; i < (hdr->lct / 2); i++) 185 hdr->rad[i] = buf[idx++]; 186 hdr->broadcast = (buf[idx] >> 7) & 0x1; 187 hdr->path_msg = (buf[idx] >> 6) & 0x1; 188 hdr->msg_len = buf[idx] & 0x3f; 189 idx++; 190 hdr->somt = (buf[idx] >> 7) & 0x1; 191 hdr->eomt = (buf[idx] >> 6) & 0x1; 192 hdr->seqno = (buf[idx] >> 4) & 0x1; 193 idx++; 194 *hdrlen = idx; 195 return true; 196 } 197 198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, 199 struct drm_dp_sideband_msg_tx *raw) 200 { 201 int idx = 0; 202 int i; 203 u8 *buf = raw->msg; 204 buf[idx++] = req->req_type & 0x7f; 205 206 switch (req->req_type) { 207 case DP_ENUM_PATH_RESOURCES: 208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4; 209 idx++; 210 break; 211 case DP_ALLOCATE_PAYLOAD: 212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 | 213 (req->u.allocate_payload.number_sdp_streams & 0xf); 214 idx++; 215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f); 216 idx++; 217 buf[idx] = (req->u.allocate_payload.pbn >> 8); 218 idx++; 219 buf[idx] = (req->u.allocate_payload.pbn & 0xff); 220 idx++; 221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) { 222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) | 223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf); 224 idx++; 225 } 226 if (req->u.allocate_payload.number_sdp_streams & 1) { 227 i = req->u.allocate_payload.number_sdp_streams - 1; 228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4; 229 idx++; 230 } 231 break; 232 case DP_QUERY_PAYLOAD: 233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4; 234 idx++; 235 buf[idx] = (req->u.query_payload.vcpi & 0x7f); 236 idx++; 237 break; 238 case DP_REMOTE_DPCD_READ: 239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4; 240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf; 241 idx++; 242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8; 243 idx++; 244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff); 245 idx++; 246 buf[idx] = (req->u.dpcd_read.num_bytes); 247 idx++; 248 break; 249 250 case DP_REMOTE_DPCD_WRITE: 251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4; 252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf; 253 idx++; 254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8; 255 idx++; 256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff); 257 idx++; 258 buf[idx] = (req->u.dpcd_write.num_bytes); 259 idx++; 260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes); 261 idx += req->u.dpcd_write.num_bytes; 262 break; 263 case DP_REMOTE_I2C_READ: 264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4; 265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3); 266 idx++; 267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) { 268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f; 269 idx++; 270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes; 271 idx++; 272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); 273 idx += req->u.i2c_read.transactions[i].num_bytes; 274 275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; 276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); 277 idx++; 278 } 279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f; 280 idx++; 281 buf[idx] = (req->u.i2c_read.num_bytes_read); 282 idx++; 283 break; 284 285 case DP_REMOTE_I2C_WRITE: 286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4; 287 idx++; 288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f; 289 idx++; 290 buf[idx] = (req->u.i2c_write.num_bytes); 291 idx++; 292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); 293 idx += req->u.i2c_write.num_bytes; 294 break; 295 } 296 raw->cur_len = idx; 297 } 298 299 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) 300 { 301 u8 crc4; 302 crc4 = drm_dp_msg_data_crc4(msg, len); 303 msg[len] = crc4; 304 } 305 306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep, 307 struct drm_dp_sideband_msg_tx *raw) 308 { 309 int idx = 0; 310 u8 *buf = raw->msg; 311 312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f); 313 314 raw->cur_len = idx; 315 } 316 317 /* this adds a chunk of msg to the builder to get the final msg */ 318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, 319 u8 *replybuf, u8 replybuflen, bool hdr) 320 { 321 int ret; 322 u8 crc4; 323 324 if (hdr) { 325 u8 hdrlen; 326 struct drm_dp_sideband_msg_hdr recv_hdr; 327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen); 328 if (ret == false) { 329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false); 330 return false; 331 } 332 333 /* get length contained in this portion */ 334 msg->curchunk_len = recv_hdr.msg_len; 335 msg->curchunk_hdrlen = hdrlen; 336 337 /* we have already gotten an somt - don't bother parsing */ 338 if (recv_hdr.somt && msg->have_somt) 339 return false; 340 341 if (recv_hdr.somt) { 342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr)); 343 msg->have_somt = true; 344 } 345 if (recv_hdr.eomt) 346 msg->have_eomt = true; 347 348 /* copy the bytes for the remainder of this header chunk */ 349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen)); 350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx); 351 } else { 352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen); 353 msg->curchunk_idx += replybuflen; 354 } 355 356 if (msg->curchunk_idx >= msg->curchunk_len) { 357 /* do CRC */ 358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1); 359 /* copy chunk into bigger msg */ 360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1); 361 msg->curlen += msg->curchunk_len - 1; 362 } 363 return true; 364 } 365 366 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw, 367 struct drm_dp_sideband_msg_reply_body *repmsg) 368 { 369 int idx = 1; 370 int i; 371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16); 372 idx += 16; 373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf; 374 idx++; 375 if (idx > raw->curlen) 376 goto fail_len; 377 for (i = 0; i < repmsg->u.link_addr.nports; i++) { 378 if (raw->msg[idx] & 0x80) 379 repmsg->u.link_addr.ports[i].input_port = 1; 380 381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7; 382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf); 383 384 idx++; 385 if (idx > raw->curlen) 386 goto fail_len; 387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1; 388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1; 389 if (repmsg->u.link_addr.ports[i].input_port == 0) 390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 391 idx++; 392 if (idx > raw->curlen) 393 goto fail_len; 394 if (repmsg->u.link_addr.ports[i].input_port == 0) { 395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]); 396 idx++; 397 if (idx > raw->curlen) 398 goto fail_len; 399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16); 400 idx += 16; 401 if (idx > raw->curlen) 402 goto fail_len; 403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf; 404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf); 405 idx++; 406 407 } 408 if (idx > raw->curlen) 409 goto fail_len; 410 } 411 412 return true; 413 fail_len: 414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 415 return false; 416 } 417 418 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw, 419 struct drm_dp_sideband_msg_reply_body *repmsg) 420 { 421 int idx = 1; 422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf; 423 idx++; 424 if (idx > raw->curlen) 425 goto fail_len; 426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx]; 427 if (idx > raw->curlen) 428 goto fail_len; 429 430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes); 431 return true; 432 fail_len: 433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); 434 return false; 435 } 436 437 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw, 438 struct drm_dp_sideband_msg_reply_body *repmsg) 439 { 440 int idx = 1; 441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf; 442 idx++; 443 if (idx > raw->curlen) 444 goto fail_len; 445 return true; 446 fail_len: 447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen); 448 return false; 449 } 450 451 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw, 452 struct drm_dp_sideband_msg_reply_body *repmsg) 453 { 454 int idx = 1; 455 456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf); 457 idx++; 458 if (idx > raw->curlen) 459 goto fail_len; 460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx]; 461 idx++; 462 /* TODO check */ 463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes); 464 return true; 465 fail_len: 466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); 467 return false; 468 } 469 470 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw, 471 struct drm_dp_sideband_msg_reply_body *repmsg) 472 { 473 int idx = 1; 474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; 475 idx++; 476 if (idx > raw->curlen) 477 goto fail_len; 478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 479 idx += 2; 480 if (idx > raw->curlen) 481 goto fail_len; 482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 483 idx += 2; 484 if (idx > raw->curlen) 485 goto fail_len; 486 return true; 487 fail_len: 488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen); 489 return false; 490 } 491 492 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw, 493 struct drm_dp_sideband_msg_reply_body *repmsg) 494 { 495 int idx = 1; 496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 497 idx++; 498 if (idx > raw->curlen) 499 goto fail_len; 500 repmsg->u.allocate_payload.vcpi = raw->msg[idx]; 501 idx++; 502 if (idx > raw->curlen) 503 goto fail_len; 504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]); 505 idx += 2; 506 if (idx > raw->curlen) 507 goto fail_len; 508 return true; 509 fail_len: 510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen); 511 return false; 512 } 513 514 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw, 515 struct drm_dp_sideband_msg_reply_body *repmsg) 516 { 517 int idx = 1; 518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf; 519 idx++; 520 if (idx > raw->curlen) 521 goto fail_len; 522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 523 idx += 2; 524 if (idx > raw->curlen) 525 goto fail_len; 526 return true; 527 fail_len: 528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen); 529 return false; 530 } 531 532 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, 533 struct drm_dp_sideband_msg_reply_body *msg) 534 { 535 memset(msg, 0, sizeof(*msg)); 536 msg->reply_type = (raw->msg[0] & 0x80) >> 7; 537 msg->req_type = (raw->msg[0] & 0x7f); 538 539 if (msg->reply_type) { 540 memcpy(msg->u.nak.guid, &raw->msg[1], 16); 541 msg->u.nak.reason = raw->msg[17]; 542 msg->u.nak.nak_data = raw->msg[18]; 543 return false; 544 } 545 546 switch (msg->req_type) { 547 case DP_LINK_ADDRESS: 548 return drm_dp_sideband_parse_link_address(raw, msg); 549 case DP_QUERY_PAYLOAD: 550 return drm_dp_sideband_parse_query_payload_ack(raw, msg); 551 case DP_REMOTE_DPCD_READ: 552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg); 553 case DP_REMOTE_DPCD_WRITE: 554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); 555 case DP_REMOTE_I2C_READ: 556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); 557 case DP_ENUM_PATH_RESOURCES: 558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); 559 case DP_ALLOCATE_PAYLOAD: 560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg); 561 default: 562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); 563 return false; 564 } 565 } 566 567 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw, 568 struct drm_dp_sideband_msg_req_body *msg) 569 { 570 int idx = 1; 571 572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 573 idx++; 574 if (idx > raw->curlen) 575 goto fail_len; 576 577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16); 578 idx += 16; 579 if (idx > raw->curlen) 580 goto fail_len; 581 582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1; 583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1; 584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1; 585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1; 586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7); 587 idx++; 588 return true; 589 fail_len: 590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); 591 return false; 592 } 593 594 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw, 595 struct drm_dp_sideband_msg_req_body *msg) 596 { 597 int idx = 1; 598 599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4; 600 idx++; 601 if (idx > raw->curlen) 602 goto fail_len; 603 604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16); 605 idx += 16; 606 if (idx > raw->curlen) 607 goto fail_len; 608 609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]); 610 idx++; 611 return true; 612 fail_len: 613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); 614 return false; 615 } 616 617 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, 618 struct drm_dp_sideband_msg_req_body *msg) 619 { 620 memset(msg, 0, sizeof(*msg)); 621 msg->req_type = (raw->msg[0] & 0x7f); 622 623 switch (msg->req_type) { 624 case DP_CONNECTION_STATUS_NOTIFY: 625 return drm_dp_sideband_parse_connection_status_notify(raw, msg); 626 case DP_RESOURCE_STATUS_NOTIFY: 627 return drm_dp_sideband_parse_resource_status_notify(raw, msg); 628 default: 629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type); 630 return false; 631 } 632 } 633 634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) 635 { 636 struct drm_dp_sideband_msg_req_body req; 637 638 req.req_type = DP_REMOTE_DPCD_WRITE; 639 req.u.dpcd_write.port_number = port_num; 640 req.u.dpcd_write.dpcd_address = offset; 641 req.u.dpcd_write.num_bytes = num_bytes; 642 req.u.dpcd_write.bytes = bytes; 643 drm_dp_encode_sideband_req(&req, msg); 644 645 return 0; 646 } 647 648 static int build_link_address(struct drm_dp_sideband_msg_tx *msg) 649 { 650 struct drm_dp_sideband_msg_req_body req; 651 652 req.req_type = DP_LINK_ADDRESS; 653 drm_dp_encode_sideband_req(&req, msg); 654 return 0; 655 } 656 657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) 658 { 659 struct drm_dp_sideband_msg_req_body req; 660 661 req.req_type = DP_ENUM_PATH_RESOURCES; 662 req.u.port_num.port_number = port_num; 663 drm_dp_encode_sideband_req(&req, msg); 664 msg->path_msg = true; 665 return 0; 666 } 667 668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, 669 u8 vcpi, uint16_t pbn) 670 { 671 struct drm_dp_sideband_msg_req_body req; 672 memset(&req, 0, sizeof(req)); 673 req.req_type = DP_ALLOCATE_PAYLOAD; 674 req.u.allocate_payload.port_number = port_num; 675 req.u.allocate_payload.vcpi = vcpi; 676 req.u.allocate_payload.pbn = pbn; 677 drm_dp_encode_sideband_req(&req, msg); 678 msg->path_msg = true; 679 return 0; 680 } 681 682 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 683 struct drm_dp_vcpi *vcpi) 684 { 685 int ret; 686 687 mutex_lock(&mgr->payload_lock); 688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 689 if (ret > mgr->max_payloads) { 690 ret = -EINVAL; 691 DRM_DEBUG_KMS("out of payload ids %d\n", ret); 692 goto out_unlock; 693 } 694 695 set_bit(ret, &mgr->payload_mask); 696 vcpi->vcpi = ret; 697 mgr->proposed_vcpis[ret - 1] = vcpi; 698 out_unlock: 699 mutex_unlock(&mgr->payload_lock); 700 return ret; 701 } 702 703 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 704 int id) 705 { 706 if (id == 0) 707 return; 708 709 mutex_lock(&mgr->payload_lock); 710 DRM_DEBUG_KMS("putting payload %d\n", id); 711 clear_bit(id, &mgr->payload_mask); 712 mgr->proposed_vcpis[id - 1] = NULL; 713 mutex_unlock(&mgr->payload_lock); 714 } 715 716 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 717 struct drm_dp_sideband_msg_tx *txmsg) 718 { 719 bool ret; 720 mutex_lock(&mgr->qlock); 721 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 722 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 723 mutex_unlock(&mgr->qlock); 724 return ret; 725 } 726 727 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 728 struct drm_dp_sideband_msg_tx *txmsg) 729 { 730 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 731 int ret; 732 733 ret = wait_event_timeout(mgr->tx_waitq, 734 check_txmsg_state(mgr, txmsg), 735 (4 * HZ)); 736 mutex_lock(&mstb->mgr->qlock); 737 if (ret > 0) { 738 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { 739 ret = -EIO; 740 goto out; 741 } 742 } else { 743 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); 744 745 /* dump some state */ 746 ret = -EIO; 747 748 /* remove from q */ 749 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || 750 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { 751 list_del(&txmsg->next); 752 } 753 754 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || 755 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 756 mstb->tx_slots[txmsg->seqno] = NULL; 757 } 758 } 759 out: 760 mutex_unlock(&mgr->qlock); 761 762 return ret; 763 } 764 765 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) 766 { 767 struct drm_dp_mst_branch *mstb; 768 769 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL); 770 if (!mstb) 771 return NULL; 772 773 mstb->lct = lct; 774 if (lct > 1) 775 memcpy(mstb->rad, rad, lct / 2); 776 INIT_LIST_HEAD(&mstb->ports); 777 kref_init(&mstb->kref); 778 return mstb; 779 } 780 781 static void drm_dp_destroy_mst_branch_device(struct kref *kref) 782 { 783 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 784 struct drm_dp_mst_port *port, *tmp; 785 bool wake_tx = false; 786 787 cancel_work_sync(&mstb->mgr->work); 788 789 /* 790 * destroy all ports - don't need lock 791 * as there are no more references to the mst branch 792 * device at this point. 793 */ 794 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 795 list_del(&port->next); 796 drm_dp_put_port(port); 797 } 798 799 /* drop any tx slots msg */ 800 mutex_lock(&mstb->mgr->qlock); 801 if (mstb->tx_slots[0]) { 802 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 803 mstb->tx_slots[0] = NULL; 804 wake_tx = true; 805 } 806 if (mstb->tx_slots[1]) { 807 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 808 mstb->tx_slots[1] = NULL; 809 wake_tx = true; 810 } 811 mutex_unlock(&mstb->mgr->qlock); 812 813 if (wake_tx) 814 wake_up(&mstb->mgr->tx_waitq); 815 kfree(mstb); 816 } 817 818 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 819 { 820 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); 821 } 822 823 824 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 825 { 826 struct drm_dp_mst_branch *mstb; 827 828 switch (old_pdt) { 829 case DP_PEER_DEVICE_DP_LEGACY_CONV: 830 case DP_PEER_DEVICE_SST_SINK: 831 /* remove i2c over sideband */ 832 drm_dp_mst_unregister_i2c_bus(&port->aux); 833 break; 834 case DP_PEER_DEVICE_MST_BRANCHING: 835 mstb = port->mstb; 836 port->mstb = NULL; 837 drm_dp_put_mst_branch_device(mstb); 838 break; 839 } 840 } 841 842 static void drm_dp_destroy_port(struct kref *kref) 843 { 844 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 845 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 846 if (!port->input) { 847 port->vcpi.num_slots = 0; 848 849 kfree(port->cached_edid); 850 851 /* we can't destroy the connector here, as 852 we might be holding the mode_config.mutex 853 from an EDID retrieval */ 854 if (port->connector) { 855 mutex_lock(&mgr->destroy_connector_lock); 856 list_add(&port->next, &mgr->destroy_connector_list); 857 mutex_unlock(&mgr->destroy_connector_lock); 858 schedule_work(&mgr->destroy_connector_work); 859 return; 860 } 861 drm_dp_port_teardown_pdt(port, port->pdt); 862 863 if (!port->input && port->vcpi.vcpi > 0) 864 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 865 } 866 kfree(port); 867 868 (*mgr->cbs->hotplug)(mgr); 869 } 870 871 static void drm_dp_put_port(struct drm_dp_mst_port *port) 872 { 873 kref_put(&port->kref, drm_dp_destroy_port); 874 } 875 876 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) 877 { 878 struct drm_dp_mst_port *port; 879 struct drm_dp_mst_branch *rmstb; 880 if (to_find == mstb) { 881 kref_get(&mstb->kref); 882 return mstb; 883 } 884 list_for_each_entry(port, &mstb->ports, next) { 885 if (port->mstb) { 886 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); 887 if (rmstb) 888 return rmstb; 889 } 890 } 891 return NULL; 892 } 893 894 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) 895 { 896 struct drm_dp_mst_branch *rmstb = NULL; 897 mutex_lock(&mgr->lock); 898 if (mgr->mst_primary) 899 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); 900 mutex_unlock(&mgr->lock); 901 return rmstb; 902 } 903 904 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) 905 { 906 struct drm_dp_mst_port *port, *mport; 907 908 list_for_each_entry(port, &mstb->ports, next) { 909 if (port == to_find) { 910 kref_get(&port->kref); 911 return port; 912 } 913 if (port->mstb) { 914 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); 915 if (mport) 916 return mport; 917 } 918 } 919 return NULL; 920 } 921 922 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 923 { 924 struct drm_dp_mst_port *rport = NULL; 925 mutex_lock(&mgr->lock); 926 if (mgr->mst_primary) 927 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); 928 mutex_unlock(&mgr->lock); 929 return rport; 930 } 931 932 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 933 { 934 struct drm_dp_mst_port *port; 935 936 list_for_each_entry(port, &mstb->ports, next) { 937 if (port->port_num == port_num) { 938 kref_get(&port->kref); 939 return port; 940 } 941 } 942 943 return NULL; 944 } 945 946 /* 947 * calculate a new RAD for this MST branch device 948 * if parent has an LCT of 2 then it has 1 nibble of RAD, 949 * if parent has an LCT of 3 then it has 2 nibbles of RAD, 950 */ 951 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, 952 u8 *rad) 953 { 954 int lct = port->parent->lct; 955 int shift = 4; 956 int idx = lct / 2; 957 if (lct > 1) { 958 memcpy(rad, port->parent->rad, idx); 959 shift = (lct % 2) ? 4 : 0; 960 } else 961 rad[0] = 0; 962 963 rad[idx] |= port->port_num << shift; 964 return lct + 1; 965 } 966 967 /* 968 * return sends link address for new mstb 969 */ 970 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) 971 { 972 int ret; 973 u8 rad[6], lct; 974 bool send_link = false; 975 switch (port->pdt) { 976 case DP_PEER_DEVICE_DP_LEGACY_CONV: 977 case DP_PEER_DEVICE_SST_SINK: 978 /* add i2c over sideband */ 979 ret = drm_dp_mst_register_i2c_bus(&port->aux); 980 break; 981 case DP_PEER_DEVICE_MST_BRANCHING: 982 lct = drm_dp_calculate_rad(port, rad); 983 984 port->mstb = drm_dp_add_mst_branch_device(lct, rad); 985 port->mstb->mgr = port->mgr; 986 port->mstb->port_parent = port; 987 988 send_link = true; 989 break; 990 } 991 return send_link; 992 } 993 994 static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, 995 struct drm_dp_mst_port *port) 996 { 997 int ret; 998 if (port->dpcd_rev >= 0x12) { 999 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); 1000 if (!port->guid_valid) { 1001 ret = drm_dp_send_dpcd_write(mstb->mgr, 1002 port, 1003 DP_GUID, 1004 16, port->guid); 1005 port->guid_valid = true; 1006 } 1007 } 1008 } 1009 1010 static void build_mst_prop_path(struct drm_dp_mst_port *port, 1011 struct drm_dp_mst_branch *mstb, 1012 char *proppath, 1013 size_t proppath_size) 1014 { 1015 int i; 1016 char temp[8]; 1017 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); 1018 for (i = 0; i < (mstb->lct - 1); i++) { 1019 int shift = (i % 2) ? 0 : 4; 1020 int port_num = mstb->rad[i / 2] >> shift; 1021 snprintf(temp, sizeof(temp), "-%d", port_num); 1022 strlcat(proppath, temp, proppath_size); 1023 } 1024 snprintf(temp, sizeof(temp), "-%d", port->port_num); 1025 strlcat(proppath, temp, proppath_size); 1026 } 1027 1028 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, 1029 struct device *dev, 1030 struct drm_dp_link_addr_reply_port *port_msg) 1031 { 1032 struct drm_dp_mst_port *port; 1033 bool ret; 1034 bool created = false; 1035 int old_pdt = 0; 1036 int old_ddps = 0; 1037 port = drm_dp_get_port(mstb, port_msg->port_number); 1038 if (!port) { 1039 port = kzalloc(sizeof(*port), GFP_KERNEL); 1040 if (!port) 1041 return; 1042 kref_init(&port->kref); 1043 port->parent = mstb; 1044 port->port_num = port_msg->port_number; 1045 port->mgr = mstb->mgr; 1046 port->aux.name = "DPMST"; 1047 port->aux.dev = dev; 1048 created = true; 1049 } else { 1050 old_pdt = port->pdt; 1051 old_ddps = port->ddps; 1052 } 1053 1054 port->pdt = port_msg->peer_device_type; 1055 port->input = port_msg->input_port; 1056 port->mcs = port_msg->mcs; 1057 port->ddps = port_msg->ddps; 1058 port->ldps = port_msg->legacy_device_plug_status; 1059 port->dpcd_rev = port_msg->dpcd_revision; 1060 port->num_sdp_streams = port_msg->num_sdp_streams; 1061 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; 1062 memcpy(port->guid, port_msg->peer_guid, 16); 1063 1064 /* manage mstb port lists with mgr lock - take a reference 1065 for this list */ 1066 if (created) { 1067 mutex_lock(&mstb->mgr->lock); 1068 kref_get(&port->kref); 1069 list_add(&port->next, &mstb->ports); 1070 mutex_unlock(&mstb->mgr->lock); 1071 } 1072 1073 if (old_ddps != port->ddps) { 1074 if (port->ddps) { 1075 drm_dp_check_port_guid(mstb, port); 1076 if (!port->input) 1077 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1078 } else { 1079 port->guid_valid = false; 1080 port->available_pbn = 0; 1081 } 1082 } 1083 1084 if (old_pdt != port->pdt && !port->input) { 1085 drm_dp_port_teardown_pdt(port, old_pdt); 1086 1087 ret = drm_dp_port_setup_pdt(port); 1088 if (ret == true) { 1089 drm_dp_send_link_address(mstb->mgr, port->mstb); 1090 port->mstb->link_address_sent = true; 1091 } 1092 } 1093 1094 if (created && !port->input) { 1095 char proppath[255]; 1096 build_mst_prop_path(port, mstb, proppath, sizeof(proppath)); 1097 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1098 1099 if (port->port_num >= 8) { 1100 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1101 } 1102 } 1103 1104 /* put reference to this port */ 1105 drm_dp_put_port(port); 1106 } 1107 1108 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, 1109 struct drm_dp_connection_status_notify *conn_stat) 1110 { 1111 struct drm_dp_mst_port *port; 1112 int old_pdt; 1113 int old_ddps; 1114 bool dowork = false; 1115 port = drm_dp_get_port(mstb, conn_stat->port_number); 1116 if (!port) 1117 return; 1118 1119 old_ddps = port->ddps; 1120 old_pdt = port->pdt; 1121 port->pdt = conn_stat->peer_device_type; 1122 port->mcs = conn_stat->message_capability_status; 1123 port->ldps = conn_stat->legacy_device_plug_status; 1124 port->ddps = conn_stat->displayport_device_plug_status; 1125 1126 if (old_ddps != port->ddps) { 1127 if (port->ddps) { 1128 drm_dp_check_port_guid(mstb, port); 1129 dowork = true; 1130 } else { 1131 port->guid_valid = false; 1132 port->available_pbn = 0; 1133 } 1134 } 1135 if (old_pdt != port->pdt && !port->input) { 1136 drm_dp_port_teardown_pdt(port, old_pdt); 1137 1138 if (drm_dp_port_setup_pdt(port)) 1139 dowork = true; 1140 } 1141 1142 drm_dp_put_port(port); 1143 if (dowork) 1144 queue_work(system_long_wq, &mstb->mgr->work); 1145 1146 } 1147 1148 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, 1149 u8 lct, u8 *rad) 1150 { 1151 struct drm_dp_mst_branch *mstb; 1152 struct drm_dp_mst_port *port; 1153 int i; 1154 /* find the port by iterating down */ 1155 1156 mutex_lock(&mgr->lock); 1157 mstb = mgr->mst_primary; 1158 1159 for (i = 0; i < lct - 1; i++) { 1160 int shift = (i % 2) ? 0 : 4; 1161 int port_num = rad[i / 2] >> shift; 1162 1163 list_for_each_entry(port, &mstb->ports, next) { 1164 if (port->port_num == port_num) { 1165 if (!port->mstb) { 1166 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]); 1167 return NULL; 1168 } 1169 1170 mstb = port->mstb; 1171 break; 1172 } 1173 } 1174 } 1175 kref_get(&mstb->kref); 1176 mutex_unlock(&mgr->lock); 1177 return mstb; 1178 } 1179 1180 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1181 struct drm_dp_mst_branch *mstb) 1182 { 1183 struct drm_dp_mst_port *port; 1184 struct drm_dp_mst_branch *mstb_child; 1185 if (!mstb->link_address_sent) { 1186 drm_dp_send_link_address(mgr, mstb); 1187 mstb->link_address_sent = true; 1188 } 1189 list_for_each_entry(port, &mstb->ports, next) { 1190 if (port->input) 1191 continue; 1192 1193 if (!port->ddps) 1194 continue; 1195 1196 if (!port->available_pbn) 1197 drm_dp_send_enum_path_resources(mgr, mstb, port); 1198 1199 if (port->mstb) { 1200 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); 1201 if (mstb_child) { 1202 drm_dp_check_and_send_link_address(mgr, mstb_child); 1203 drm_dp_put_mst_branch_device(mstb_child); 1204 } 1205 } 1206 } 1207 } 1208 1209 static void drm_dp_mst_link_probe_work(struct work_struct *work) 1210 { 1211 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1212 struct drm_dp_mst_branch *mstb; 1213 1214 mutex_lock(&mgr->lock); 1215 mstb = mgr->mst_primary; 1216 if (mstb) { 1217 kref_get(&mstb->kref); 1218 } 1219 mutex_unlock(&mgr->lock); 1220 if (mstb) { 1221 drm_dp_check_and_send_link_address(mgr, mstb); 1222 drm_dp_put_mst_branch_device(mstb); 1223 } 1224 } 1225 1226 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1227 u8 *guid) 1228 { 1229 static u8 zero_guid[16]; 1230 1231 if (!memcmp(guid, zero_guid, 16)) { 1232 u64 salt = get_jiffies_64(); 1233 memcpy(&guid[0], &salt, sizeof(u64)); 1234 memcpy(&guid[8], &salt, sizeof(u64)); 1235 return false; 1236 } 1237 return true; 1238 } 1239 1240 #if 0 1241 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes) 1242 { 1243 struct drm_dp_sideband_msg_req_body req; 1244 1245 req.req_type = DP_REMOTE_DPCD_READ; 1246 req.u.dpcd_read.port_number = port_num; 1247 req.u.dpcd_read.dpcd_address = offset; 1248 req.u.dpcd_read.num_bytes = num_bytes; 1249 drm_dp_encode_sideband_req(&req, msg); 1250 1251 return 0; 1252 } 1253 #endif 1254 1255 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, 1256 bool up, u8 *msg, int len) 1257 { 1258 int ret; 1259 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE; 1260 int tosend, total, offset; 1261 int retries = 0; 1262 1263 retry: 1264 total = len; 1265 offset = 0; 1266 do { 1267 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); 1268 1269 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, 1270 &msg[offset], 1271 tosend); 1272 if (ret != tosend) { 1273 if (ret == -EIO && retries < 5) { 1274 retries++; 1275 goto retry; 1276 } 1277 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1278 1279 return -EIO; 1280 } 1281 offset += tosend; 1282 total -= tosend; 1283 } while (total > 0); 1284 return 0; 1285 } 1286 1287 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, 1288 struct drm_dp_sideband_msg_tx *txmsg) 1289 { 1290 struct drm_dp_mst_branch *mstb = txmsg->dst; 1291 1292 /* both msg slots are full */ 1293 if (txmsg->seqno == -1) { 1294 if (mstb->tx_slots[0] && mstb->tx_slots[1]) { 1295 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__); 1296 return -EAGAIN; 1297 } 1298 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) { 1299 txmsg->seqno = mstb->last_seqno; 1300 mstb->last_seqno ^= 1; 1301 } else if (mstb->tx_slots[0] == NULL) 1302 txmsg->seqno = 0; 1303 else 1304 txmsg->seqno = 1; 1305 mstb->tx_slots[txmsg->seqno] = txmsg; 1306 } 1307 hdr->broadcast = 0; 1308 hdr->path_msg = txmsg->path_msg; 1309 hdr->lct = mstb->lct; 1310 hdr->lcr = mstb->lct - 1; 1311 if (mstb->lct > 1) 1312 memcpy(hdr->rad, mstb->rad, mstb->lct / 2); 1313 hdr->seqno = txmsg->seqno; 1314 return 0; 1315 } 1316 /* 1317 * process a single block of the next message in the sideband queue 1318 */ 1319 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, 1320 struct drm_dp_sideband_msg_tx *txmsg, 1321 bool up) 1322 { 1323 u8 chunk[48]; 1324 struct drm_dp_sideband_msg_hdr hdr; 1325 int len, space, idx, tosend; 1326 int ret; 1327 1328 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr)); 1329 1330 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { 1331 txmsg->seqno = -1; 1332 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; 1333 } 1334 1335 /* make hdr from dst mst - for replies use seqno 1336 otherwise assign one */ 1337 ret = set_hdr_from_dst_qlock(&hdr, txmsg); 1338 if (ret < 0) 1339 return ret; 1340 1341 /* amount left to send in this message */ 1342 len = txmsg->cur_len - txmsg->cur_offset; 1343 1344 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */ 1345 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr); 1346 1347 tosend = min(len, space); 1348 if (len == txmsg->cur_len) 1349 hdr.somt = 1; 1350 if (space >= len) 1351 hdr.eomt = 1; 1352 1353 1354 hdr.msg_len = tosend + 1; 1355 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx); 1356 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); 1357 /* add crc at end */ 1358 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend); 1359 idx += tosend + 1; 1360 1361 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); 1362 if (ret) { 1363 DRM_DEBUG_KMS("sideband msg failed to send\n"); 1364 return ret; 1365 } 1366 1367 txmsg->cur_offset += tosend; 1368 if (txmsg->cur_offset == txmsg->cur_len) { 1369 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; 1370 return 1; 1371 } 1372 return 0; 1373 } 1374 1375 /* must be called holding qlock */ 1376 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1377 { 1378 struct drm_dp_sideband_msg_tx *txmsg; 1379 int ret; 1380 1381 /* construct a chunk from the first msg in the tx_msg queue */ 1382 if (list_empty(&mgr->tx_msg_downq)) { 1383 mgr->tx_down_in_progress = false; 1384 return; 1385 } 1386 mgr->tx_down_in_progress = true; 1387 1388 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); 1389 ret = process_single_tx_qlock(mgr, txmsg, false); 1390 if (ret == 1) { 1391 /* txmsg is sent it should be in the slots now */ 1392 list_del(&txmsg->next); 1393 } else if (ret) { 1394 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1395 list_del(&txmsg->next); 1396 if (txmsg->seqno != -1) 1397 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1398 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 1399 wake_up(&mgr->tx_waitq); 1400 } 1401 if (list_empty(&mgr->tx_msg_downq)) { 1402 mgr->tx_down_in_progress = false; 1403 return; 1404 } 1405 } 1406 1407 /* called holding qlock */ 1408 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1409 { 1410 struct drm_dp_sideband_msg_tx *txmsg; 1411 int ret; 1412 1413 /* construct a chunk from the first msg in the tx_msg queue */ 1414 if (list_empty(&mgr->tx_msg_upq)) { 1415 mgr->tx_up_in_progress = false; 1416 return; 1417 } 1418 1419 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); 1420 ret = process_single_tx_qlock(mgr, txmsg, true); 1421 if (ret == 1) { 1422 /* up txmsgs aren't put in slots - so free after we send it */ 1423 list_del(&txmsg->next); 1424 kfree(txmsg); 1425 } else if (ret) 1426 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1427 mgr->tx_up_in_progress = true; 1428 } 1429 1430 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 1431 struct drm_dp_sideband_msg_tx *txmsg) 1432 { 1433 mutex_lock(&mgr->qlock); 1434 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 1435 if (!mgr->tx_down_in_progress) 1436 process_single_down_tx_qlock(mgr); 1437 mutex_unlock(&mgr->qlock); 1438 } 1439 1440 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, 1441 struct drm_dp_mst_branch *mstb) 1442 { 1443 int len; 1444 struct drm_dp_sideband_msg_tx *txmsg; 1445 int ret; 1446 1447 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1448 if (!txmsg) 1449 return -ENOMEM; 1450 1451 txmsg->dst = mstb; 1452 len = build_link_address(txmsg); 1453 1454 drm_dp_queue_down_tx(mgr, txmsg); 1455 1456 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1457 if (ret > 0) { 1458 int i; 1459 1460 if (txmsg->reply.reply_type == 1) 1461 DRM_DEBUG_KMS("link address nak received\n"); 1462 else { 1463 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); 1464 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1465 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, 1466 txmsg->reply.u.link_addr.ports[i].input_port, 1467 txmsg->reply.u.link_addr.ports[i].peer_device_type, 1468 txmsg->reply.u.link_addr.ports[i].port_number, 1469 txmsg->reply.u.link_addr.ports[i].dpcd_revision, 1470 txmsg->reply.u.link_addr.ports[i].mcs, 1471 txmsg->reply.u.link_addr.ports[i].ddps, 1472 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, 1473 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, 1474 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); 1475 } 1476 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1477 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1478 } 1479 (*mgr->cbs->hotplug)(mgr); 1480 } 1481 } else 1482 DRM_DEBUG_KMS("link address failed %d\n", ret); 1483 1484 kfree(txmsg); 1485 return 0; 1486 } 1487 1488 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, 1489 struct drm_dp_mst_branch *mstb, 1490 struct drm_dp_mst_port *port) 1491 { 1492 int len; 1493 struct drm_dp_sideband_msg_tx *txmsg; 1494 int ret; 1495 1496 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1497 if (!txmsg) 1498 return -ENOMEM; 1499 1500 txmsg->dst = mstb; 1501 len = build_enum_path_resources(txmsg, port->port_num); 1502 1503 drm_dp_queue_down_tx(mgr, txmsg); 1504 1505 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1506 if (ret > 0) { 1507 if (txmsg->reply.reply_type == 1) 1508 DRM_DEBUG_KMS("enum path resources nak received\n"); 1509 else { 1510 if (port->port_num != txmsg->reply.u.path_resources.port_number) 1511 DRM_ERROR("got incorrect port in response\n"); 1512 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, 1513 txmsg->reply.u.path_resources.avail_payload_bw_number); 1514 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; 1515 } 1516 } 1517 1518 kfree(txmsg); 1519 return 0; 1520 } 1521 1522 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, 1523 struct drm_dp_mst_port *port, 1524 int id, 1525 int pbn) 1526 { 1527 struct drm_dp_sideband_msg_tx *txmsg; 1528 struct drm_dp_mst_branch *mstb; 1529 int len, ret; 1530 1531 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1532 if (!mstb) 1533 return -EINVAL; 1534 1535 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1536 if (!txmsg) { 1537 ret = -ENOMEM; 1538 goto fail_put; 1539 } 1540 1541 txmsg->dst = mstb; 1542 len = build_allocate_payload(txmsg, port->port_num, 1543 id, 1544 pbn); 1545 1546 drm_dp_queue_down_tx(mgr, txmsg); 1547 1548 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1549 if (ret > 0) { 1550 if (txmsg->reply.reply_type == 1) { 1551 ret = -EINVAL; 1552 } else 1553 ret = 0; 1554 } 1555 kfree(txmsg); 1556 fail_put: 1557 drm_dp_put_mst_branch_device(mstb); 1558 return ret; 1559 } 1560 1561 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1562 int id, 1563 struct drm_dp_payload *payload) 1564 { 1565 int ret; 1566 1567 ret = drm_dp_dpcd_write_payload(mgr, id, payload); 1568 if (ret < 0) { 1569 payload->payload_state = 0; 1570 return ret; 1571 } 1572 payload->payload_state = DP_PAYLOAD_LOCAL; 1573 return 0; 1574 } 1575 1576 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1577 struct drm_dp_mst_port *port, 1578 int id, 1579 struct drm_dp_payload *payload) 1580 { 1581 int ret; 1582 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); 1583 if (ret < 0) 1584 return ret; 1585 payload->payload_state = DP_PAYLOAD_REMOTE; 1586 return ret; 1587 } 1588 1589 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, 1590 struct drm_dp_mst_port *port, 1591 int id, 1592 struct drm_dp_payload *payload) 1593 { 1594 DRM_DEBUG_KMS("\n"); 1595 /* its okay for these to fail */ 1596 if (port) { 1597 drm_dp_payload_send_msg(mgr, port, id, 0); 1598 } 1599 1600 drm_dp_dpcd_write_payload(mgr, id, payload); 1601 payload->payload_state = 0; 1602 return 0; 1603 } 1604 1605 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, 1606 int id, 1607 struct drm_dp_payload *payload) 1608 { 1609 payload->payload_state = 0; 1610 return 0; 1611 } 1612 1613 /** 1614 * drm_dp_update_payload_part1() - Execute payload update part 1 1615 * @mgr: manager to use. 1616 * 1617 * This iterates over all proposed virtual channels, and tries to 1618 * allocate space in the link for them. For 0->slots transitions, 1619 * this step just writes the VCPI to the MST device. For slots->0 1620 * transitions, this writes the updated VCPIs and removes the 1621 * remote VC payloads. 1622 * 1623 * after calling this the driver should generate ACT and payload 1624 * packets. 1625 */ 1626 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 1627 { 1628 int i; 1629 int cur_slots = 1; 1630 struct drm_dp_payload req_payload; 1631 struct drm_dp_mst_port *port; 1632 1633 mutex_lock(&mgr->payload_lock); 1634 for (i = 0; i < mgr->max_payloads; i++) { 1635 /* solve the current payloads - compare to the hw ones 1636 - update the hw view */ 1637 req_payload.start_slot = cur_slots; 1638 if (mgr->proposed_vcpis[i]) { 1639 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1640 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; 1641 } else { 1642 port = NULL; 1643 req_payload.num_slots = 0; 1644 } 1645 /* work out what is required to happen with this payload */ 1646 if (mgr->payloads[i].start_slot != req_payload.start_slot || 1647 mgr->payloads[i].num_slots != req_payload.num_slots) { 1648 1649 /* need to push an update for this payload */ 1650 if (req_payload.num_slots) { 1651 drm_dp_create_payload_step1(mgr, i + 1, &req_payload); 1652 mgr->payloads[i].num_slots = req_payload.num_slots; 1653 } else if (mgr->payloads[i].num_slots) { 1654 mgr->payloads[i].num_slots = 0; 1655 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]); 1656 req_payload.payload_state = mgr->payloads[i].payload_state; 1657 } else 1658 req_payload.payload_state = 0; 1659 1660 mgr->payloads[i].start_slot = req_payload.start_slot; 1661 mgr->payloads[i].payload_state = req_payload.payload_state; 1662 } 1663 cur_slots += req_payload.num_slots; 1664 } 1665 mutex_unlock(&mgr->payload_lock); 1666 1667 return 0; 1668 } 1669 EXPORT_SYMBOL(drm_dp_update_payload_part1); 1670 1671 /** 1672 * drm_dp_update_payload_part2() - Execute payload update part 2 1673 * @mgr: manager to use. 1674 * 1675 * This iterates over all proposed virtual channels, and tries to 1676 * allocate space in the link for them. For 0->slots transitions, 1677 * this step writes the remote VC payload commands. For slots->0 1678 * this just resets some internal state. 1679 */ 1680 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) 1681 { 1682 struct drm_dp_mst_port *port; 1683 int i; 1684 int ret; 1685 mutex_lock(&mgr->payload_lock); 1686 for (i = 0; i < mgr->max_payloads; i++) { 1687 1688 if (!mgr->proposed_vcpis[i]) 1689 continue; 1690 1691 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 1692 1693 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 1694 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 1695 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]); 1696 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1697 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]); 1698 } 1699 if (ret) { 1700 mutex_unlock(&mgr->payload_lock); 1701 return ret; 1702 } 1703 } 1704 mutex_unlock(&mgr->payload_lock); 1705 return 0; 1706 } 1707 EXPORT_SYMBOL(drm_dp_update_payload_part2); 1708 1709 #if 0 /* unused as of yet */ 1710 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, 1711 struct drm_dp_mst_port *port, 1712 int offset, int size) 1713 { 1714 int len; 1715 struct drm_dp_sideband_msg_tx *txmsg; 1716 1717 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1718 if (!txmsg) 1719 return -ENOMEM; 1720 1721 len = build_dpcd_read(txmsg, port->port_num, 0, 8); 1722 txmsg->dst = port->parent; 1723 1724 drm_dp_queue_down_tx(mgr, txmsg); 1725 1726 return 0; 1727 } 1728 #endif 1729 1730 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, 1731 struct drm_dp_mst_port *port, 1732 int offset, int size, u8 *bytes) 1733 { 1734 int len; 1735 int ret; 1736 struct drm_dp_sideband_msg_tx *txmsg; 1737 struct drm_dp_mst_branch *mstb; 1738 1739 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 1740 if (!mstb) 1741 return -EINVAL; 1742 1743 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1744 if (!txmsg) { 1745 ret = -ENOMEM; 1746 goto fail_put; 1747 } 1748 1749 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); 1750 txmsg->dst = mstb; 1751 1752 drm_dp_queue_down_tx(mgr, txmsg); 1753 1754 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 1755 if (ret > 0) { 1756 if (txmsg->reply.reply_type == 1) { 1757 ret = -EINVAL; 1758 } else 1759 ret = 0; 1760 } 1761 kfree(txmsg); 1762 fail_put: 1763 drm_dp_put_mst_branch_device(mstb); 1764 return ret; 1765 } 1766 1767 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type) 1768 { 1769 struct drm_dp_sideband_msg_reply_body reply; 1770 1771 reply.reply_type = 1; 1772 reply.req_type = req_type; 1773 drm_dp_encode_sideband_reply(&reply, msg); 1774 return 0; 1775 } 1776 1777 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, 1778 struct drm_dp_mst_branch *mstb, 1779 int req_type, int seqno, bool broadcast) 1780 { 1781 struct drm_dp_sideband_msg_tx *txmsg; 1782 1783 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 1784 if (!txmsg) 1785 return -ENOMEM; 1786 1787 txmsg->dst = mstb; 1788 txmsg->seqno = seqno; 1789 drm_dp_encode_up_ack_reply(txmsg, req_type); 1790 1791 mutex_lock(&mgr->qlock); 1792 list_add_tail(&txmsg->next, &mgr->tx_msg_upq); 1793 if (!mgr->tx_up_in_progress) { 1794 process_single_up_tx_qlock(mgr); 1795 } 1796 mutex_unlock(&mgr->qlock); 1797 return 0; 1798 } 1799 1800 static bool drm_dp_get_vc_payload_bw(int dp_link_bw, 1801 int dp_link_count, 1802 int *out) 1803 { 1804 switch (dp_link_bw) { 1805 default: 1806 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", 1807 dp_link_bw, dp_link_count); 1808 return false; 1809 1810 case DP_LINK_BW_1_62: 1811 *out = 3 * dp_link_count; 1812 break; 1813 case DP_LINK_BW_2_7: 1814 *out = 5 * dp_link_count; 1815 break; 1816 case DP_LINK_BW_5_4: 1817 *out = 10 * dp_link_count; 1818 break; 1819 } 1820 return true; 1821 } 1822 1823 /** 1824 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager 1825 * @mgr: manager to set state for 1826 * @mst_state: true to enable MST on this connector - false to disable. 1827 * 1828 * This is called by the driver when it detects an MST capable device plugged 1829 * into a DP MST capable port, or when a DP MST capable device is unplugged. 1830 */ 1831 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) 1832 { 1833 int ret = 0; 1834 struct drm_dp_mst_branch *mstb = NULL; 1835 1836 mutex_lock(&mgr->lock); 1837 if (mst_state == mgr->mst_state) 1838 goto out_unlock; 1839 1840 mgr->mst_state = mst_state; 1841 /* set the device into MST mode */ 1842 if (mst_state) { 1843 WARN_ON(mgr->mst_primary); 1844 1845 /* get dpcd info */ 1846 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 1847 if (ret != DP_RECEIVER_CAP_SIZE) { 1848 DRM_DEBUG_KMS("failed to read DPCD\n"); 1849 goto out_unlock; 1850 } 1851 1852 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], 1853 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, 1854 &mgr->pbn_div)) { 1855 ret = -EINVAL; 1856 goto out_unlock; 1857 } 1858 1859 mgr->total_pbn = 2560; 1860 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); 1861 mgr->avail_slots = mgr->total_slots; 1862 1863 /* add initial branch device at LCT 1 */ 1864 mstb = drm_dp_add_mst_branch_device(1, NULL); 1865 if (mstb == NULL) { 1866 ret = -ENOMEM; 1867 goto out_unlock; 1868 } 1869 mstb->mgr = mgr; 1870 1871 /* give this the main reference */ 1872 mgr->mst_primary = mstb; 1873 kref_get(&mgr->mst_primary->kref); 1874 1875 { 1876 struct drm_dp_payload reset_pay; 1877 reset_pay.start_slot = 0; 1878 reset_pay.num_slots = 0x3f; 1879 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); 1880 } 1881 1882 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1883 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 1884 if (ret < 0) { 1885 goto out_unlock; 1886 } 1887 1888 1889 /* sort out guid */ 1890 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); 1891 if (ret != 16) { 1892 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); 1893 goto out_unlock; 1894 } 1895 1896 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); 1897 if (!mgr->guid_valid) { 1898 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); 1899 mgr->guid_valid = true; 1900 } 1901 1902 queue_work(system_long_wq, &mgr->work); 1903 1904 ret = 0; 1905 } else { 1906 /* disable MST on the device */ 1907 mstb = mgr->mst_primary; 1908 mgr->mst_primary = NULL; 1909 /* this can fail if the device is gone */ 1910 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); 1911 ret = 0; 1912 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 1913 mgr->payload_mask = 0; 1914 set_bit(0, &mgr->payload_mask); 1915 } 1916 1917 out_unlock: 1918 mutex_unlock(&mgr->lock); 1919 if (mstb) 1920 drm_dp_put_mst_branch_device(mstb); 1921 return ret; 1922 1923 } 1924 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); 1925 1926 /** 1927 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager 1928 * @mgr: manager to suspend 1929 * 1930 * This function tells the MST device that we can't handle UP messages 1931 * anymore. This should stop it from sending any since we are suspended. 1932 */ 1933 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) 1934 { 1935 mutex_lock(&mgr->lock); 1936 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1937 DP_MST_EN | DP_UPSTREAM_IS_SRC); 1938 mutex_unlock(&mgr->lock); 1939 } 1940 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); 1941 1942 /** 1943 * drm_dp_mst_topology_mgr_resume() - resume the MST manager 1944 * @mgr: manager to resume 1945 * 1946 * This will fetch DPCD and see if the device is still there, 1947 * if it is, it will rewrite the MSTM control bits, and return. 1948 * 1949 * if the device fails this returns -1, and the driver should do 1950 * a full MST reprobe, in case we were undocked. 1951 */ 1952 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) 1953 { 1954 int ret = 0; 1955 1956 mutex_lock(&mgr->lock); 1957 1958 if (mgr->mst_primary) { 1959 int sret; 1960 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); 1961 if (sret != DP_RECEIVER_CAP_SIZE) { 1962 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); 1963 ret = -1; 1964 goto out_unlock; 1965 } 1966 1967 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 1968 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 1969 if (ret < 0) { 1970 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); 1971 ret = -1; 1972 goto out_unlock; 1973 } 1974 ret = 0; 1975 } else 1976 ret = -1; 1977 1978 out_unlock: 1979 mutex_unlock(&mgr->lock); 1980 return ret; 1981 } 1982 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 1983 1984 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 1985 { 1986 int len; 1987 u8 replyblock[32]; 1988 int replylen, origlen, curreply; 1989 int ret; 1990 struct drm_dp_sideband_msg_rx *msg; 1991 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE; 1992 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; 1993 1994 len = min(mgr->max_dpcd_transaction_bytes, 16); 1995 ret = drm_dp_dpcd_read(mgr->aux, basereg, 1996 replyblock, len); 1997 if (ret != len) { 1998 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 1999 return; 2000 } 2001 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2002 if (!ret) { 2003 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2004 return; 2005 } 2006 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2007 2008 origlen = replylen; 2009 replylen -= len; 2010 curreply = len; 2011 while (replylen > 0) { 2012 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); 2013 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2014 replyblock, len); 2015 if (ret != len) { 2016 DRM_DEBUG_KMS("failed to read a chunk\n"); 2017 } 2018 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2019 if (ret == false) 2020 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2021 curreply += len; 2022 replylen -= len; 2023 } 2024 } 2025 2026 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2027 { 2028 int ret = 0; 2029 2030 drm_dp_get_one_sb_msg(mgr, false); 2031 2032 if (mgr->down_rep_recv.have_eomt) { 2033 struct drm_dp_sideband_msg_tx *txmsg; 2034 struct drm_dp_mst_branch *mstb; 2035 int slot = -1; 2036 mstb = drm_dp_get_mst_branch_device(mgr, 2037 mgr->down_rep_recv.initial_hdr.lct, 2038 mgr->down_rep_recv.initial_hdr.rad); 2039 2040 if (!mstb) { 2041 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); 2042 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2043 return 0; 2044 } 2045 2046 /* find the message */ 2047 slot = mgr->down_rep_recv.initial_hdr.seqno; 2048 mutex_lock(&mgr->qlock); 2049 txmsg = mstb->tx_slots[slot]; 2050 /* remove from slots */ 2051 mutex_unlock(&mgr->qlock); 2052 2053 if (!txmsg) { 2054 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", 2055 mstb, 2056 mgr->down_rep_recv.initial_hdr.seqno, 2057 mgr->down_rep_recv.initial_hdr.lct, 2058 mgr->down_rep_recv.initial_hdr.rad[0], 2059 mgr->down_rep_recv.msg[0]); 2060 drm_dp_put_mst_branch_device(mstb); 2061 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2062 return 0; 2063 } 2064 2065 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); 2066 if (txmsg->reply.reply_type == 1) { 2067 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); 2068 } 2069 2070 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2071 drm_dp_put_mst_branch_device(mstb); 2072 2073 mutex_lock(&mgr->qlock); 2074 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 2075 mstb->tx_slots[slot] = NULL; 2076 mutex_unlock(&mgr->qlock); 2077 2078 wake_up(&mgr->tx_waitq); 2079 } 2080 return ret; 2081 } 2082 2083 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2084 { 2085 int ret = 0; 2086 drm_dp_get_one_sb_msg(mgr, true); 2087 2088 if (mgr->up_req_recv.have_eomt) { 2089 struct drm_dp_sideband_msg_req_body msg; 2090 struct drm_dp_mst_branch *mstb; 2091 bool seqno; 2092 mstb = drm_dp_get_mst_branch_device(mgr, 2093 mgr->up_req_recv.initial_hdr.lct, 2094 mgr->up_req_recv.initial_hdr.rad); 2095 if (!mstb) { 2096 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); 2097 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2098 return 0; 2099 } 2100 2101 seqno = mgr->up_req_recv.initial_hdr.seqno; 2102 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); 2103 2104 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { 2105 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); 2106 drm_dp_update_port(mstb, &msg.u.conn_stat); 2107 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2108 (*mgr->cbs->hotplug)(mgr); 2109 2110 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2111 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); 2112 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2113 } 2114 2115 drm_dp_put_mst_branch_device(mstb); 2116 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2117 } 2118 return ret; 2119 } 2120 2121 /** 2122 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify 2123 * @mgr: manager to notify irq for. 2124 * @esi: 4 bytes from SINK_COUNT_ESI 2125 * 2126 * This should be called from the driver when it detects a short IRQ, 2127 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The 2128 * topology manager will process the sideband messages received as a result 2129 * of this. 2130 */ 2131 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) 2132 { 2133 int ret = 0; 2134 int sc; 2135 *handled = false; 2136 sc = esi[0] & 0x3f; 2137 2138 if (sc != mgr->sink_count) { 2139 mgr->sink_count = sc; 2140 *handled = true; 2141 } 2142 2143 if (esi[1] & DP_DOWN_REP_MSG_RDY) { 2144 ret = drm_dp_mst_handle_down_rep(mgr); 2145 *handled = true; 2146 } 2147 2148 if (esi[1] & DP_UP_REQ_MSG_RDY) { 2149 ret |= drm_dp_mst_handle_up_req(mgr); 2150 *handled = true; 2151 } 2152 2153 drm_dp_mst_kick_tx(mgr); 2154 return ret; 2155 } 2156 EXPORT_SYMBOL(drm_dp_mst_hpd_irq); 2157 2158 /** 2159 * drm_dp_mst_detect_port() - get connection status for an MST port 2160 * @mgr: manager for this port 2161 * @port: unverified pointer to a port 2162 * 2163 * This returns the current connection state for a port. It validates the 2164 * port pointer still exists so the caller doesn't require a reference 2165 */ 2166 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, 2167 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2168 { 2169 enum drm_connector_status status = connector_status_disconnected; 2170 2171 /* we need to search for the port in the mgr in case its gone */ 2172 port = drm_dp_get_validated_port_ref(mgr, port); 2173 if (!port) 2174 return connector_status_disconnected; 2175 2176 if (!port->ddps) 2177 goto out; 2178 2179 switch (port->pdt) { 2180 case DP_PEER_DEVICE_NONE: 2181 case DP_PEER_DEVICE_MST_BRANCHING: 2182 break; 2183 2184 case DP_PEER_DEVICE_SST_SINK: 2185 status = connector_status_connected; 2186 /* for logical ports - cache the EDID */ 2187 if (port->port_num >= 8 && !port->cached_edid) { 2188 port->cached_edid = drm_get_edid(connector, &port->aux.ddc); 2189 } 2190 break; 2191 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2192 if (port->ldps) 2193 status = connector_status_connected; 2194 break; 2195 } 2196 out: 2197 drm_dp_put_port(port); 2198 return status; 2199 } 2200 EXPORT_SYMBOL(drm_dp_mst_detect_port); 2201 2202 /** 2203 * drm_dp_mst_get_edid() - get EDID for an MST port 2204 * @connector: toplevel connector to get EDID for 2205 * @mgr: manager for this port 2206 * @port: unverified pointer to a port. 2207 * 2208 * This returns an EDID for the port connected to a connector, 2209 * It validates the pointer still exists so the caller doesn't require a 2210 * reference. 2211 */ 2212 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2213 { 2214 struct edid *edid = NULL; 2215 2216 /* we need to search for the port in the mgr in case its gone */ 2217 port = drm_dp_get_validated_port_ref(mgr, port); 2218 if (!port) 2219 return NULL; 2220 2221 if (port->cached_edid) 2222 edid = drm_edid_duplicate(port->cached_edid); 2223 else 2224 edid = drm_get_edid(connector, &port->aux.ddc); 2225 2226 drm_mode_connector_set_tile_property(connector); 2227 drm_dp_put_port(port); 2228 return edid; 2229 } 2230 EXPORT_SYMBOL(drm_dp_mst_get_edid); 2231 2232 /** 2233 * drm_dp_find_vcpi_slots() - find slots for this PBN value 2234 * @mgr: manager to use 2235 * @pbn: payload bandwidth to convert into slots. 2236 */ 2237 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, 2238 int pbn) 2239 { 2240 int num_slots; 2241 2242 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2243 2244 if (num_slots > mgr->avail_slots) 2245 return -ENOSPC; 2246 return num_slots; 2247 } 2248 EXPORT_SYMBOL(drm_dp_find_vcpi_slots); 2249 2250 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, 2251 struct drm_dp_vcpi *vcpi, int pbn) 2252 { 2253 int num_slots; 2254 int ret; 2255 2256 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); 2257 2258 if (num_slots > mgr->avail_slots) 2259 return -ENOSPC; 2260 2261 vcpi->pbn = pbn; 2262 vcpi->aligned_pbn = num_slots * mgr->pbn_div; 2263 vcpi->num_slots = num_slots; 2264 2265 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); 2266 if (ret < 0) 2267 return ret; 2268 return 0; 2269 } 2270 2271 /** 2272 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 2273 * @mgr: manager for this port 2274 * @port: port to allocate a virtual channel for. 2275 * @pbn: payload bandwidth number to request 2276 * @slots: returned number of slots for this PBN. 2277 */ 2278 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots) 2279 { 2280 int ret; 2281 2282 port = drm_dp_get_validated_port_ref(mgr, port); 2283 if (!port) 2284 return false; 2285 2286 if (port->vcpi.vcpi > 0) { 2287 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 2288 if (pbn == port->vcpi.pbn) { 2289 *slots = port->vcpi.num_slots; 2290 return true; 2291 } 2292 } 2293 2294 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); 2295 if (ret) { 2296 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret); 2297 goto out; 2298 } 2299 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots); 2300 *slots = port->vcpi.num_slots; 2301 2302 drm_dp_put_port(port); 2303 return true; 2304 out: 2305 return false; 2306 } 2307 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); 2308 2309 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2310 { 2311 int slots = 0; 2312 port = drm_dp_get_validated_port_ref(mgr, port); 2313 if (!port) 2314 return slots; 2315 2316 slots = port->vcpi.num_slots; 2317 drm_dp_put_port(port); 2318 return slots; 2319 } 2320 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 2321 2322 /** 2323 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI 2324 * @mgr: manager for this port 2325 * @port: unverified pointer to a port. 2326 * 2327 * This just resets the number of slots for the ports VCPI for later programming. 2328 */ 2329 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2330 { 2331 port = drm_dp_get_validated_port_ref(mgr, port); 2332 if (!port) 2333 return; 2334 port->vcpi.num_slots = 0; 2335 drm_dp_put_port(port); 2336 } 2337 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 2338 2339 /** 2340 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI 2341 * @mgr: manager for this port 2342 * @port: unverified port to deallocate vcpi for 2343 */ 2344 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2345 { 2346 port = drm_dp_get_validated_port_ref(mgr, port); 2347 if (!port) 2348 return; 2349 2350 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2351 port->vcpi.num_slots = 0; 2352 port->vcpi.pbn = 0; 2353 port->vcpi.aligned_pbn = 0; 2354 port->vcpi.vcpi = 0; 2355 drm_dp_put_port(port); 2356 } 2357 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 2358 2359 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 2360 int id, struct drm_dp_payload *payload) 2361 { 2362 u8 payload_alloc[3], status; 2363 int ret; 2364 int retries = 0; 2365 2366 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, 2367 DP_PAYLOAD_TABLE_UPDATED); 2368 2369 payload_alloc[0] = id; 2370 payload_alloc[1] = payload->start_slot; 2371 payload_alloc[2] = payload->num_slots; 2372 2373 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); 2374 if (ret != 3) { 2375 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret); 2376 goto fail; 2377 } 2378 2379 retry: 2380 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2381 if (ret < 0) { 2382 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2383 goto fail; 2384 } 2385 2386 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) { 2387 retries++; 2388 if (retries < 20) { 2389 usleep_range(10000, 20000); 2390 goto retry; 2391 } 2392 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status); 2393 ret = -EINVAL; 2394 goto fail; 2395 } 2396 ret = 0; 2397 fail: 2398 return ret; 2399 } 2400 2401 2402 /** 2403 * drm_dp_check_act_status() - Check ACT handled status. 2404 * @mgr: manager to use 2405 * 2406 * Check the payload status bits in the DPCD for ACT handled completion. 2407 */ 2408 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) 2409 { 2410 u8 status; 2411 int ret; 2412 int count = 0; 2413 2414 do { 2415 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); 2416 2417 if (ret < 0) { 2418 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret); 2419 goto fail; 2420 } 2421 2422 if (status & DP_PAYLOAD_ACT_HANDLED) 2423 break; 2424 count++; 2425 udelay(100); 2426 2427 } while (count < 30); 2428 2429 if (!(status & DP_PAYLOAD_ACT_HANDLED)) { 2430 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count); 2431 ret = -EINVAL; 2432 goto fail; 2433 } 2434 return 0; 2435 fail: 2436 return ret; 2437 } 2438 EXPORT_SYMBOL(drm_dp_check_act_status); 2439 2440 /** 2441 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. 2442 * @clock: dot clock for the mode 2443 * @bpp: bpp for the mode. 2444 * 2445 * This uses the formula in the spec to calculate the PBN value for a mode. 2446 */ 2447 int drm_dp_calc_pbn_mode(int clock, int bpp) 2448 { 2449 fixed20_12 pix_bw; 2450 fixed20_12 fbpp; 2451 fixed20_12 result; 2452 fixed20_12 margin, tmp; 2453 u32 res; 2454 2455 pix_bw.full = dfixed_const(clock); 2456 fbpp.full = dfixed_const(bpp); 2457 tmp.full = dfixed_const(8); 2458 fbpp.full = dfixed_div(fbpp, tmp); 2459 2460 result.full = dfixed_mul(pix_bw, fbpp); 2461 margin.full = dfixed_const(54); 2462 tmp.full = dfixed_const(64); 2463 margin.full = dfixed_div(margin, tmp); 2464 result.full = dfixed_div(result, margin); 2465 2466 margin.full = dfixed_const(1006); 2467 tmp.full = dfixed_const(1000); 2468 margin.full = dfixed_div(margin, tmp); 2469 result.full = dfixed_mul(result, margin); 2470 2471 result.full = dfixed_div(result, tmp); 2472 result.full = dfixed_ceil(result); 2473 res = dfixed_trunc(result); 2474 return res; 2475 } 2476 EXPORT_SYMBOL(drm_dp_calc_pbn_mode); 2477 2478 static int test_calc_pbn_mode(void) 2479 { 2480 int ret; 2481 ret = drm_dp_calc_pbn_mode(154000, 30); 2482 if (ret != 689) 2483 return -EINVAL; 2484 ret = drm_dp_calc_pbn_mode(234000, 30); 2485 if (ret != 1047) 2486 return -EINVAL; 2487 return 0; 2488 } 2489 2490 /* we want to kick the TX after we've ack the up/down IRQs. */ 2491 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) 2492 { 2493 queue_work(system_long_wq, &mgr->tx_work); 2494 } 2495 2496 static void drm_dp_mst_dump_mstb(struct seq_file *m, 2497 struct drm_dp_mst_branch *mstb) 2498 { 2499 struct drm_dp_mst_port *port; 2500 int tabs = mstb->lct; 2501 char prefix[10]; 2502 int i; 2503 2504 for (i = 0; i < tabs; i++) 2505 prefix[i] = '\t'; 2506 prefix[i] = '\0'; 2507 2508 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports); 2509 list_for_each_entry(port, &mstb->ports, next) { 2510 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector); 2511 if (port->mstb) 2512 drm_dp_mst_dump_mstb(m, port->mstb); 2513 } 2514 } 2515 2516 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, 2517 char *buf) 2518 { 2519 int ret; 2520 int i; 2521 for (i = 0; i < 4; i++) { 2522 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); 2523 if (ret != 16) 2524 break; 2525 } 2526 if (i == 4) 2527 return true; 2528 return false; 2529 } 2530 2531 /** 2532 * drm_dp_mst_dump_topology(): dump topology to seq file. 2533 * @m: seq_file to dump output to 2534 * @mgr: manager to dump current topology for. 2535 * 2536 * helper to dump MST topology to a seq file for debugfs. 2537 */ 2538 void drm_dp_mst_dump_topology(struct seq_file *m, 2539 struct drm_dp_mst_topology_mgr *mgr) 2540 { 2541 int i; 2542 struct drm_dp_mst_port *port; 2543 mutex_lock(&mgr->lock); 2544 if (mgr->mst_primary) 2545 drm_dp_mst_dump_mstb(m, mgr->mst_primary); 2546 2547 /* dump VCPIs */ 2548 mutex_unlock(&mgr->lock); 2549 2550 mutex_lock(&mgr->payload_lock); 2551 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask); 2552 2553 for (i = 0; i < mgr->max_payloads; i++) { 2554 if (mgr->proposed_vcpis[i]) { 2555 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); 2556 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots); 2557 } else 2558 seq_printf(m, "vcpi %d:unsed\n", i); 2559 } 2560 for (i = 0; i < mgr->max_payloads; i++) { 2561 seq_printf(m, "payload %d: %d, %d, %d\n", 2562 i, 2563 mgr->payloads[i].payload_state, 2564 mgr->payloads[i].start_slot, 2565 mgr->payloads[i].num_slots); 2566 2567 2568 } 2569 mutex_unlock(&mgr->payload_lock); 2570 2571 mutex_lock(&mgr->lock); 2572 if (mgr->mst_primary) { 2573 u8 buf[64]; 2574 bool bret; 2575 int ret; 2576 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); 2577 seq_printf(m, "dpcd: "); 2578 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++) 2579 seq_printf(m, "%02x ", buf[i]); 2580 seq_printf(m, "\n"); 2581 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); 2582 seq_printf(m, "faux/mst: "); 2583 for (i = 0; i < 2; i++) 2584 seq_printf(m, "%02x ", buf[i]); 2585 seq_printf(m, "\n"); 2586 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); 2587 seq_printf(m, "mst ctrl: "); 2588 for (i = 0; i < 1; i++) 2589 seq_printf(m, "%02x ", buf[i]); 2590 seq_printf(m, "\n"); 2591 2592 bret = dump_dp_payload_table(mgr, buf); 2593 if (bret == true) { 2594 seq_printf(m, "payload table: "); 2595 for (i = 0; i < 63; i++) 2596 seq_printf(m, "%02x ", buf[i]); 2597 seq_printf(m, "\n"); 2598 } 2599 2600 } 2601 2602 mutex_unlock(&mgr->lock); 2603 2604 } 2605 EXPORT_SYMBOL(drm_dp_mst_dump_topology); 2606 2607 static void drm_dp_tx_work(struct work_struct *work) 2608 { 2609 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 2610 2611 mutex_lock(&mgr->qlock); 2612 if (mgr->tx_down_in_progress) 2613 process_single_down_tx_qlock(mgr); 2614 mutex_unlock(&mgr->qlock); 2615 } 2616 2617 static void drm_dp_destroy_connector_work(struct work_struct *work) 2618 { 2619 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 2620 struct drm_dp_mst_port *port; 2621 2622 /* 2623 * Not a regular list traverse as we have to drop the destroy 2624 * connector lock before destroying the connector, to avoid AB->BA 2625 * ordering between this lock and the config mutex. 2626 */ 2627 for (;;) { 2628 mutex_lock(&mgr->destroy_connector_lock); 2629 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); 2630 if (!port) { 2631 mutex_unlock(&mgr->destroy_connector_lock); 2632 break; 2633 } 2634 list_del(&port->next); 2635 mutex_unlock(&mgr->destroy_connector_lock); 2636 2637 mgr->cbs->destroy_connector(mgr, port->connector); 2638 2639 drm_dp_port_teardown_pdt(port, port->pdt); 2640 2641 if (!port->input && port->vcpi.vcpi > 0) 2642 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 2643 kfree(port); 2644 } 2645 } 2646 2647 /** 2648 * drm_dp_mst_topology_mgr_init - initialise a topology manager 2649 * @mgr: manager struct to initialise 2650 * @dev: device providing this structure - for i2c addition. 2651 * @aux: DP helper aux channel to talk to this device 2652 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit 2653 * @max_payloads: maximum number of payloads this GPU can source 2654 * @conn_base_id: the connector object ID the MST device is connected to. 2655 * 2656 * Return 0 for success, or negative error code on failure 2657 */ 2658 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, 2659 struct device *dev, struct drm_dp_aux *aux, 2660 int max_dpcd_transaction_bytes, 2661 int max_payloads, int conn_base_id) 2662 { 2663 mutex_init(&mgr->lock); 2664 mutex_init(&mgr->qlock); 2665 mutex_init(&mgr->payload_lock); 2666 mutex_init(&mgr->destroy_connector_lock); 2667 INIT_LIST_HEAD(&mgr->tx_msg_upq); 2668 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2669 INIT_LIST_HEAD(&mgr->destroy_connector_list); 2670 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2671 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 2672 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); 2673 init_waitqueue_head(&mgr->tx_waitq); 2674 mgr->dev = dev; 2675 mgr->aux = aux; 2676 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; 2677 mgr->max_payloads = max_payloads; 2678 mgr->conn_base_id = conn_base_id; 2679 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); 2680 if (!mgr->payloads) 2681 return -ENOMEM; 2682 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); 2683 if (!mgr->proposed_vcpis) 2684 return -ENOMEM; 2685 set_bit(0, &mgr->payload_mask); 2686 test_calc_pbn_mode(); 2687 return 0; 2688 } 2689 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 2690 2691 /** 2692 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager. 2693 * @mgr: manager to destroy 2694 */ 2695 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2696 { 2697 flush_work(&mgr->destroy_connector_work); 2698 mutex_lock(&mgr->payload_lock); 2699 kfree(mgr->payloads); 2700 mgr->payloads = NULL; 2701 kfree(mgr->proposed_vcpis); 2702 mgr->proposed_vcpis = NULL; 2703 mutex_unlock(&mgr->payload_lock); 2704 mgr->dev = NULL; 2705 mgr->aux = NULL; 2706 } 2707 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 2708 2709 /* I2C device */ 2710 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, 2711 int num) 2712 { 2713 struct drm_dp_aux *aux = adapter->algo_data; 2714 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux); 2715 struct drm_dp_mst_branch *mstb; 2716 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 2717 unsigned int i; 2718 bool reading = false; 2719 struct drm_dp_sideband_msg_req_body msg; 2720 struct drm_dp_sideband_msg_tx *txmsg = NULL; 2721 int ret; 2722 2723 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 2724 if (!mstb) 2725 return -EREMOTEIO; 2726 2727 /* construct i2c msg */ 2728 /* see if last msg is a read */ 2729 if (msgs[num - 1].flags & I2C_M_RD) 2730 reading = true; 2731 2732 if (!reading) { 2733 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); 2734 ret = -EIO; 2735 goto out; 2736 } 2737 2738 msg.req_type = DP_REMOTE_I2C_READ; 2739 msg.u.i2c_read.num_transactions = num - 1; 2740 msg.u.i2c_read.port_number = port->port_num; 2741 for (i = 0; i < num - 1; i++) { 2742 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; 2743 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; 2744 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; 2745 } 2746 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; 2747 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; 2748 2749 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2750 if (!txmsg) { 2751 ret = -ENOMEM; 2752 goto out; 2753 } 2754 2755 txmsg->dst = mstb; 2756 drm_dp_encode_sideband_req(&msg, txmsg); 2757 2758 drm_dp_queue_down_tx(mgr, txmsg); 2759 2760 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2761 if (ret > 0) { 2762 2763 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ 2764 ret = -EREMOTEIO; 2765 goto out; 2766 } 2767 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { 2768 ret = -EIO; 2769 goto out; 2770 } 2771 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); 2772 ret = num; 2773 } 2774 out: 2775 kfree(txmsg); 2776 drm_dp_put_mst_branch_device(mstb); 2777 return ret; 2778 } 2779 2780 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter) 2781 { 2782 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | 2783 I2C_FUNC_SMBUS_READ_BLOCK_DATA | 2784 I2C_FUNC_SMBUS_BLOCK_PROC_CALL | 2785 I2C_FUNC_10BIT_ADDR; 2786 } 2787 2788 static const struct i2c_algorithm drm_dp_mst_i2c_algo = { 2789 .functionality = drm_dp_mst_i2c_functionality, 2790 .master_xfer = drm_dp_mst_i2c_xfer, 2791 }; 2792 2793 /** 2794 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX 2795 * @aux: DisplayPort AUX channel 2796 * 2797 * Returns 0 on success or a negative error code on failure. 2798 */ 2799 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux) 2800 { 2801 aux->ddc.algo = &drm_dp_mst_i2c_algo; 2802 aux->ddc.algo_data = aux; 2803 aux->ddc.retries = 3; 2804 2805 aux->ddc.class = I2C_CLASS_DDC; 2806 aux->ddc.owner = THIS_MODULE; 2807 aux->ddc.dev.parent = aux->dev; 2808 aux->ddc.dev.of_node = aux->dev->of_node; 2809 2810 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), 2811 sizeof(aux->ddc.name)); 2812 2813 return i2c_add_adapter(&aux->ddc); 2814 } 2815 2816 /** 2817 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter 2818 * @aux: DisplayPort AUX channel 2819 */ 2820 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) 2821 { 2822 i2c_del_adapter(&aux->ddc); 2823 } 2824