Two MST patches that we discovered during MST bringup with two daisy-chained MST displays.
Mykola Lysenko (2): drm/dp/mst: process broadcast messages correctly drm/dp/mst: always send reply for UP request
drivers/gpu/drm/drm_dp_mst_topology.c | 125 ++++++++++++++++++++++++++-------- include/drm/drm_dp_mst_helper.h | 2 - 2 files changed, 95 insertions(+), 32 deletions(-)
From: Mykola Lysenko Mykola.Lysenko@amd.com
In case broadcast message received in UP request, RAD cannot be used to identify message originator. Message should be parsed, originator should be found by GUID from parsed message.
Also reply with broadcast in case broadcast message received (for now it is always broadcast)
Signed-off-by: Mykola Lysenko Mykola.Lysenko@amd.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 95 +++++++++++++++++++++++++++++++---- 1 file changed, 84 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 809959d56d78..3baa95c1b14b 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1210,6 +1210,50 @@ out: return mstb; }
+static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( + struct drm_dp_mst_branch *mstb, + uint8_t *guid) +{ + struct drm_dp_mst_branch *found_mstb; + struct drm_dp_mst_port *port; + + list_for_each_entry(port, &mstb->ports, next) { + if (!port->mstb) + continue; + + if (port->guid_valid && memcmp(port->guid, guid, 16) == 0) + return port->mstb; + + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); + + if (found_mstb) + return found_mstb; + } + + return NULL; +} + +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( + struct drm_dp_mst_topology_mgr *mgr, + uint8_t *guid) +{ + struct drm_dp_mst_branch *mstb; + + /* find the port by iterating down */ + mutex_lock(&mgr->lock); + + if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) + mstb = mgr->mst_primary; + else + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + + if (mstb) + kref_get(&mstb->kref); + + mutex_unlock(&mgr->lock); + return mstb; +} + static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { @@ -1320,6 +1364,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, struct drm_dp_sideband_msg_tx *txmsg) { struct drm_dp_mst_branch *mstb = txmsg->dst; + u8 req_type;
/* both msg slots are full */ if (txmsg->seqno == -1) { @@ -1336,7 +1381,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, txmsg->seqno = 1; mstb->tx_slots[txmsg->seqno] = txmsg; } - hdr->broadcast = 0; + + req_type = txmsg->msg[0] & 0x7f; + if (req_type == DP_CONNECTION_STATUS_NOTIFY || + req_type == DP_RESOURCE_STATUS_NOTIFY) + hdr->broadcast = 1; + else + hdr->broadcast = 0; hdr->path_msg = txmsg->path_msg; hdr->lct = mstb->lct; hdr->lcr = mstb->lct - 1; @@ -2145,28 +2196,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; - struct drm_dp_mst_branch *mstb; + struct drm_dp_mst_branch *mstb = NULL; bool seqno; - mstb = drm_dp_get_mst_branch_device(mgr, - mgr->up_req_recv.initial_hdr.lct, - mgr->up_req_recv.initial_hdr.rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; + + if (!mgr->up_req_recv.initial_hdr.broadcast) { + mstb = drm_dp_get_mst_branch_device(mgr, + mgr->up_req_recv.initial_hdr.lct, + mgr->up_req_recv.initial_hdr.rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } }
seqno = mgr->up_req_recv.initial_hdr.seqno; drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + drm_dp_update_port(mstb, &msg.u.conn_stat); DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); (*mgr->cbs->hotplug)(mgr);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); + if (!mstb) + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); }
From: Mykola Lysenko Mykola.Lysenko@amd.com
We should always send reply for UP request in order to make downstream device clean-up resources appropriately.
Issue was that reply for UP request was sent only once.
Signed-off-by: Mykola Lysenko Mykola.Lysenko@amd.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 30 +++++++++++------------------- include/drm/drm_dp_mst_helper.h | 2 -- 2 files changed, 11 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 3baa95c1b14b..bda9be9a3087 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1489,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) }
/* called holding qlock */ -static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_sideband_msg_tx *txmsg) { - struct drm_dp_sideband_msg_tx *txmsg; int ret;
/* construct a chunk from the first msg in the tx_msg queue */ - if (list_empty(&mgr->tx_msg_upq)) { - mgr->tx_up_in_progress = false; - return; - } - - txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next); ret = process_single_tx_qlock(mgr, txmsg, true); - if (ret == 1) { - /* up txmsgs aren't put in slots - so free after we send it */ - list_del(&txmsg->next); - kfree(txmsg); - } else if (ret) + + if (ret != 1) DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); - mgr->tx_up_in_progress = true; + + txmsg->dst->tx_slots[txmsg->seqno] = NULL; }
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, @@ -1895,11 +1887,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock); - list_add_tail(&txmsg->next, &mgr->tx_msg_upq); - if (!mgr->tx_up_in_progress) { - process_single_up_tx_qlock(mgr); - } + + process_single_up_tx_qlock(mgr, txmsg); + mutex_unlock(&mgr->qlock); + + kfree(txmsg); return 0; }
@@ -2809,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); mutex_init(&mgr->destroy_connector_lock); - INIT_LIST_HEAD(&mgr->tx_msg_upq); INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 5340099741ae..006062a27639 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -450,9 +450,7 @@ struct drm_dp_mst_topology_mgr { the mstb tx_slots and txmsg->state once they are queued */ struct mutex qlock; struct list_head tx_msg_downq; - struct list_head tx_msg_upq; bool tx_down_in_progress; - bool tx_up_in_progress;
/* payload info + lock for it */ struct mutex payload_lock;
On 19 December 2015 at 08:14, Harry Wentland harry.wentland@amd.com wrote:
From: Mykola Lysenko Mykola.Lysenko@amd.com
We should always send reply for UP request in order to make downstream device clean-up resources appropriately.
Issue was that reply for UP request was sent only once.
What happens though if the up reply is too big and needs to be cut into pieces,
Since the return value of process_single_tx_qlock is -value - errno. 0 - part of msg sent 1 - all of msg sent
Does the attached patch make any difference?
It handles the queue like the down queue.
Dave.
Thanks, Dave.
Do you have an example when the up reply would be too big to fit into one sideband message? We don't expect that to happen.
Mykola, can you see if Dave's patch is a good alternative to your patch?
Harry
On Fri, Dec 18, 2015 at 7:57 PM -0800, "Dave Airlie" <airlied@gmail.commailto:airlied@gmail.com> wrote:
On 19 December 2015 at 08:14, Harry Wentland harry.wentland@amd.com wrote:
From: Mykola Lysenko Mykola.Lysenko@amd.com
We should always send reply for UP request in order to make downstream device clean-up resources appropriately.
Issue was that reply for UP request was sent only once.
What happens though if the up reply is too big and needs to be cut into pieces,
Since the return value of process_single_tx_qlock is -value - errno. 0 - part of msg sent 1 - all of msg sent
Does the attached patch make any difference?
It handles the queue like the down queue.
Dave.
Hi Dave,
there are few comments from my side:
1. As UP replies also got to slots (even as mentioned in code it should not happen), we should clean them somewhere. For DOWN requests they are cleaned on DOWN reply when full reply received. However we will not receive interrupt when UP reply sent;
2. I am not sure if DRM process DOWN requests or UP reply with length more than 1 SB currently, though it does process DOWN replies. First SB is sent right away message is submit. Left part intended to be send in drm_dp_tx_work, which is scheduled from drm_dp_mst_hpd_irq, which should be called from DP short pulse interrupt. However short pulse will not come for the part of DOWN request, nor for part of UP reply. As I understand we do not have issues right now as there is no DOWN request or UP reply longer than 1 SB;
3. not sure if it is needed to use queue for UP replies. As soon as full UP request received, we should send full reply right away, processing AUX_DEFER just in case;
4. If above points right, sending of MST messages bigger than 1 SB should be addressed as a separate patch, not in topic patch.
Please, let me know if points above make sense or I misunderstood code somewhere.
Mykola
From: Wentland, Harry Sent: Sunday, December 20, 2015 12:21 AM To: airlied@gmail.com; Lysenko, Mykola Mykola.Lysenko@amd.com Cc: dri-devel@lists.freedesktop.org Subject: Re: [PATCH 2/2] drm/dp/mst: always send reply for UP request
Thanks, Dave.
Do you have an example when the up reply would be too big to fit into one sideband message? We don't expect that to happen.
Mykola, can you see if Dave's patch is a good alternative to your patch?
Harry
On Fri, Dec 18, 2015 at 7:57 PM -0800, "Dave Airlie" <airlied@gmail.commailto:airlied@gmail.com> wrote: On 19 December 2015 at 08:14, Harry Wentland <harry.wentland@amd.commailto:harry.wentland@amd.com> wrote:
From: Mykola Lysenko <Mykola.Lysenko@amd.commailto:Mykola.Lysenko@amd.com>
We should always send reply for UP request in order to make downstream device clean-up resources appropriately.
Issue was that reply for UP request was sent only once.
What happens though if the up reply is too big and needs to be cut into pieces,
Since the return value of process_single_tx_qlock is -value - errno. 0 - part of msg sent 1 - all of msg sent
Does the attached patch make any difference?
It handles the queue like the down queue.
Dave.
As UP replies also got to slots (even as mentioned in code it
should not happen), we should clean them somewhere. For DOWN requests they are cleaned on DOWN reply when full reply received. However we will not receive interrupt when UP reply sent;
I am not sure if DRM process DOWN requests or UP reply with length
more than 1 SB currently, though it does process DOWN replies. First SB is sent right away message is submit. Left part intended to be send in drm_dp_tx_work, which is scheduled from drm_dp_mst_hpd_irq, which should be called from DP short pulse interrupt. However short pulse will not come for the part of DOWN request, nor for part of UP reply. As I understand we do not have issues right now as there is no DOWN request or UP reply longer than 1 SB;
not sure if it is needed to use queue for UP replies. As soon as
full UP request received, we should send full reply right away, processing AUX_DEFER just in case;
If above points right, sending of MST messages bigger than 1 SB
should be addressed as a separate patch, not in topic patch.
Okay this makes sense to me,
Ack for all four patches to go via Alex for this merge window,
Alex can you add cc stable to these and send at least these ones and the two latest ones.
Thanks, Dave.
dri-devel@lists.freedesktop.org