This just drops the return and assigning it to a len variable that never gets used.
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5f..294d904 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -631,7 +631,7 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, } }
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) +static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) { struct drm_dp_sideband_msg_req_body req;
@@ -641,20 +641,17 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 req.u.dpcd_write.num_bytes = num_bytes; req.u.dpcd_write.bytes = bytes; drm_dp_encode_sideband_req(&req, msg); - - return 0; }
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg) +static void build_link_address(struct drm_dp_sideband_msg_tx *msg) { struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS; drm_dp_encode_sideband_req(&req, msg); - return 0; }
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) +static void build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) { struct drm_dp_sideband_msg_req_body req;
@@ -662,10 +659,9 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por req.u.port_num.port_number = port_num; drm_dp_encode_sideband_req(&req, msg); msg->path_msg = true; - return 0; }
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, +static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, u8 vcpi, uint16_t pbn) { struct drm_dp_sideband_msg_req_body req; @@ -676,7 +672,6 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n req.u.allocate_payload.pbn = pbn; drm_dp_encode_sideband_req(&req, msg); msg->path_msg = true; - return 0; }
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, @@ -1461,7 +1456,6 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { - int len; struct drm_dp_sideband_msg_tx *txmsg; int ret;
@@ -1470,7 +1464,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, return -ENOMEM;
txmsg->dst = mstb; - len = build_link_address(txmsg); + build_link_address(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1510,7 +1504,6 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port) { - int len; struct drm_dp_sideband_msg_tx *txmsg; int ret;
@@ -1519,7 +1512,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, return -ENOMEM;
txmsg->dst = mstb; - len = build_enum_path_resources(txmsg, port->port_num); + build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1547,7 +1540,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; - int len, ret; + int ret;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); if (!mstb) @@ -1560,9 +1553,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, }
txmsg->dst = mstb; - len = build_allocate_payload(txmsg, port->port_num, - id, - pbn); + build_allocate_payload(txmsg, port->port_num, + id, pbn);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1772,7 +1764,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes) { - int len; int ret; struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; @@ -1787,7 +1778,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, goto fail_put; }
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); + build_dpcd_write(txmsg, port->port_num, offset, size, bytes); txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
There is a race where the reply could get processed by another work queue before we've updated the state.
Update the state before sending the msg to close it.
v2: reset value if return indicates we haven't send the msg.
Pointed out by Adam J Richter on
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91481 Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/drm_dp_mst_topology.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 294d904..831096b9 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1101,8 +1101,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
ret = drm_dp_port_setup_pdt(port); if (ret == true) { - drm_dp_send_link_address(mstb->mgr, port->mstb); port->mstb->link_address_sent = true; + ret = drm_dp_send_link_address(mstb->mgr, port->mstb); + if (ret < 0) + port->mstb->link_address_sent = false; } }
@@ -1198,8 +1200,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; if (!mstb->link_address_sent) { - drm_dp_send_link_address(mgr, mstb); + int ret; mstb->link_address_sent = true; + ret = drm_dp_send_link_address(mgr, mstb); + if (ret < 0) + mstb->link_address_sent = false; } list_for_each_entry(port, &mstb->ports, next) { if (port->input) @@ -1493,11 +1498,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); } + ret = 0; } else DRM_DEBUG_KMS("link address failed %d\n", ret);
kfree(txmsg); - return 0; + return ret; }
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
On Sun, Sep 06, 2015 at 06:53:00PM +1000, Dave Airlie wrote:
There is a race where the reply could get processed by another work queue before we've updated the state.
Update the state before sending the msg to close it.
v2: reset value if return indicates we haven't send the msg.
Pointed out by Adam J Richter on
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91481 Signed-off-by: Dave Airlie airlied@redhat.com
Ok we see to have two callers of drm_dp_send_link_address: - drm_dp_add_port, but the only caller of that in turn is again drm_dp_send_link_address. This is just self-recursion. - drm_dp_check_and_send_link_address, which in turn is called only by itself and by drm_dp_mst_link_probe_work.
drm_dp_mst_link_probe_work is only called from the mgr->work function, and a single work item is never run concurrently.
I don't see any race here at all. A good cleanup though would be what Adam's original patch has done and move the link_address_sent = true into drm_dp_send_link_address.
Cheers, Daniel
drivers/gpu/drm/drm_dp_mst_topology.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 294d904..831096b9 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1101,8 +1101,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
ret = drm_dp_port_setup_pdt(port); if (ret == true) {
drm_dp_send_link_address(mstb->mgr, port->mstb); port->mstb->link_address_sent = true;
ret = drm_dp_send_link_address(mstb->mgr, port->mstb);
if (ret < 0)
} }port->mstb->link_address_sent = false;
@@ -1198,8 +1200,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m struct drm_dp_mst_port *port; struct drm_dp_mst_branch *mstb_child; if (!mstb->link_address_sent) {
drm_dp_send_link_address(mgr, mstb);
mstb->link_address_sent = true;int ret;
ret = drm_dp_send_link_address(mgr, mstb);
if (ret < 0)
} list_for_each_entry(port, &mstb->ports, next) { if (port->input)mstb->link_address_sent = false;
@@ -1493,11 +1498,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, } (*mgr->cbs->hotplug)(mgr); }
ret = 0;
} else DRM_DEBUG_KMS("link address failed %d\n", ret);
kfree(txmsg);
- return 0;
- return ret;
}
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2.4.3
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
On 30 September 2015 at 17:44, Daniel Vetter daniel@ffwll.ch wrote:
On Sun, Sep 06, 2015 at 06:53:00PM +1000, Dave Airlie wrote:
There is a race where the reply could get processed by another work queue before we've updated the state.
Update the state before sending the msg to close it.
v2: reset value if return indicates we haven't send the msg.
Pointed out by Adam J Richter on
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91481 Signed-off-by: Dave Airlie airlied@redhat.com
Ok we see to have two callers of drm_dp_send_link_address:
- drm_dp_add_port, but the only caller of that in turn is again drm_dp_send_link_address. This is just self-recursion.
- drm_dp_check_and_send_link_address, which in turn is called only by itself and by drm_dp_mst_link_probe_work.
drm_dp_mst_link_probe_work is only called from the mgr->work function, and a single work item is never run concurrently.
I don't see any race here at all. A good cleanup though would be what Adam's original patch has done and move the link_address_sent = true into drm_dp_send_link_address.
I think we were being paranoid about the work queue vs the send_link_address if you can't have multiple then it matters not.
But I'll repost the cleaned up version as it makes things easier to grok.
Dave.
On Sun, Sep 06, 2015 at 06:52:59PM +1000, Dave Airlie wrote:
This just drops the return and assigning it to a len variable that never gets used.
Signed-off-by: Dave Airlie airlied@redhat.com
Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch
drivers/gpu/drm/drm_dp_mst_topology.c | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index e23df5f..294d904 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -631,7 +631,7 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw, } }
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) +static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes) { struct drm_dp_sideband_msg_req_body req;
@@ -641,20 +641,17 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 req.u.dpcd_write.num_bytes = num_bytes; req.u.dpcd_write.bytes = bytes; drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg) +static void build_link_address(struct drm_dp_sideband_msg_tx *msg) { struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS; drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) +static void build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) { struct drm_dp_sideband_msg_req_body req;
@@ -662,10 +659,9 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por req.u.port_num.port_number = port_num; drm_dp_encode_sideband_req(&req, msg); msg->path_msg = true;
- return 0;
}
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, +static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num, u8 vcpi, uint16_t pbn) { struct drm_dp_sideband_msg_req_body req; @@ -676,7 +672,6 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n req.u.allocate_payload.pbn = pbn; drm_dp_encode_sideband_req(&req, msg); msg->path_msg = true;
- return 0;
}
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, @@ -1461,7 +1456,6 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) {
- int len; struct drm_dp_sideband_msg_tx *txmsg; int ret;
@@ -1470,7 +1464,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, return -ENOMEM;
txmsg->dst = mstb;
- len = build_link_address(txmsg);
build_link_address(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1510,7 +1504,6 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port) {
- int len; struct drm_dp_sideband_msg_tx *txmsg; int ret;
@@ -1519,7 +1512,7 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, return -ENOMEM;
txmsg->dst = mstb;
- len = build_enum_path_resources(txmsg, port->port_num);
build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1547,7 +1540,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb;
- int len, ret;
int ret;
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); if (!mstb)
@@ -1560,9 +1553,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, }
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port->port_num,
id,
pbn);
build_allocate_payload(txmsg, port->port_num,
id, pbn);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -1772,7 +1764,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes) {
- int len; int ret; struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb;
@@ -1787,7 +1778,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, goto fail_put; }
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
build_dpcd_write(txmsg, port->port_num, offset, size, bytes); txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
-- 2.4.3
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
dri-devel@lists.freedesktop.org