Lines Matching defs:txmsg
691 const struct drm_dp_sideband_msg_tx *txmsg)
698 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
700 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
701 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
702 drm_dp_mst_sideband_tx_state_str(txmsg->state),
703 txmsg->path_msg, buf);
705 ret = drm_dp_decode_sideband_req(txmsg, &req);
1241 struct drm_dp_sideband_msg_tx *txmsg)
1246 * All updates to txmsg->state are protected by mgr->qlock, and the two
1250 state = READ_ONCE(txmsg->state);
1256 struct drm_dp_sideband_msg_tx *txmsg)
1278 check_txmsg_state(mgr, txmsg),
1292 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1298 txmsg, txmsg->state, txmsg->seqno);
1304 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1305 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1306 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
1307 list_del(&txmsg->next);
1313 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2766 struct drm_dp_sideband_msg_tx *txmsg)
2768 struct drm_dp_mst_branch *mstb = txmsg->dst;
2771 req_type = txmsg->msg[0] & 0x7f;
2778 hdr->path_msg = txmsg->path_msg;
2795 struct drm_dp_sideband_msg_tx *txmsg,
2803 if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
2808 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
2809 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2812 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2817 len = txmsg->cur_len - txmsg->cur_offset;
2823 if (len == txmsg->cur_len)
2831 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2842 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2847 txmsg->cur_offset += tosend;
2848 if (txmsg->cur_offset == txmsg->cur_len) {
2849 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2857 struct drm_dp_sideband_msg_tx *txmsg;
2866 txmsg = list_first_entry(&mgr->tx_msg_downq,
2868 ret = process_single_tx_qlock(mgr, txmsg, false);
2871 list_del(&txmsg->next);
2872 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2878 struct drm_dp_sideband_msg_tx *txmsg)
2881 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2886 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2921 struct drm_dp_sideband_msg_tx *txmsg;
2927 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2928 if (!txmsg)
2931 txmsg->dst = mstb;
2932 build_link_address(txmsg);
2935 drm_dp_queue_down_tx(mgr, txmsg);
2938 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2943 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2949 reply = &txmsg->reply.u.link_addr;
2993 kfree(txmsg);
3001 struct drm_dp_sideband_msg_tx *txmsg;
3004 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3005 if (!txmsg)
3008 txmsg->dst = mstb;
3009 build_clear_payload_id_table(txmsg);
3011 drm_dp_queue_down_tx(mgr, txmsg);
3013 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3014 if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3017 kfree(txmsg);
3026 struct drm_dp_sideband_msg_tx *txmsg;
3029 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3030 if (!txmsg)
3033 txmsg->dst = mstb;
3034 build_enum_path_resources(txmsg, port->port_num);
3036 drm_dp_queue_down_tx(mgr, txmsg);
3038 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3041 path_res = &txmsg->reply.u.path_resources;
3043 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3067 kfree(txmsg);
3125 struct drm_dp_sideband_msg_tx *txmsg;
3142 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3143 if (!txmsg) {
3151 txmsg->dst = mstb;
3152 build_allocate_payload(txmsg, port_num,
3156 drm_dp_queue_down_tx(mgr, txmsg);
3166 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3168 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3173 kfree(txmsg);
3182 struct drm_dp_sideband_msg_tx *txmsg;
3189 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3190 if (!txmsg) {
3195 txmsg->dst = port->parent;
3196 build_power_updown_phy(txmsg, port->port_num, power_up);
3197 drm_dp_queue_down_tx(mgr, txmsg);
3199 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3201 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3206 kfree(txmsg);
3219 struct drm_dp_sideband_msg_tx *txmsg;
3223 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3224 if (!txmsg)
3244 txmsg->dst = mgr->mst_primary;
3246 build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
3248 drm_dp_queue_down_tx(mgr, txmsg);
3250 ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
3253 } else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3260 memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
3266 kfree(txmsg);
3444 struct drm_dp_sideband_msg_tx *txmsg;
3451 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3452 if (!txmsg) {
3457 build_dpcd_read(txmsg, port->port_num, offset, size);
3458 txmsg->dst = port->parent;
3460 drm_dp_queue_down_tx(mgr, txmsg);
3462 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3466 if (txmsg->reply.reply_type == 1) {
3473 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3478 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3480 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3483 kfree(txmsg);
3495 struct drm_dp_sideband_msg_tx *txmsg;
3502 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3503 if (!txmsg) {
3508 build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3509 txmsg->dst = mstb;
3511 drm_dp_queue_down_tx(mgr, txmsg);
3513 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3515 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3521 kfree(txmsg);
3541 struct drm_dp_sideband_msg_tx *txmsg;
3543 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3544 if (!txmsg)
3547 txmsg->dst = mstb;
3548 drm_dp_encode_up_ack_reply(txmsg, req_type);
3552 process_single_tx_qlock(mgr, txmsg, true);
3555 kfree(txmsg);
3885 const struct drm_dp_sideband_msg_tx *txmsg,
3889 const struct drm_dp_mst_branch *mstb = txmsg->dst;
3890 int tx_req_type = get_msg_request_type(txmsg->msg[0]);
3909 struct drm_dp_sideband_msg_tx *txmsg;
3922 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
3927 if (!txmsg || txmsg->dst != mstb) {
3936 if (!verify_rx_request_type(mgr, txmsg, msg))
3939 drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
3941 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3944 txmsg->reply.req_type,
3945 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3946 txmsg->reply.u.nak.reason,
3947 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3948 txmsg->reply.u.nak.nak_data);
3955 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3956 list_del(&txmsg->next);
4188 struct drm_dp_sideband_msg_tx *txmsg;
4192 txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
4195 if (!txmsg ||
4196 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
4197 txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
5000 struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
5012 list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
5013 if (txmsg->dst != mstb)
5016 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
5017 list_del(&txmsg->next);
5685 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5701 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5702 if (!txmsg) {
5707 txmsg->dst = mstb;
5708 drm_dp_encode_sideband_req(&msg, txmsg);
5710 drm_dp_queue_down_tx(mgr, txmsg);
5712 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5715 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5719 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5723 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5727 kfree(txmsg);
5738 struct drm_dp_sideband_msg_tx *txmsg = NULL;
5741 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5742 if (!txmsg) {
5754 memset(txmsg, 0, sizeof(*txmsg));
5755 txmsg->dst = mstb;
5757 drm_dp_encode_sideband_req(&msg, txmsg);
5758 drm_dp_queue_down_tx(mgr, txmsg);
5760 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5762 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5772 kfree(txmsg);