To allow completion and further block of HW accesses post device PCI remove.
Signed-off-by: Andrey Grodzovsky andrey.grodzovsky@amd.com --- .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.c | 124 +++++++++++------- .../drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 24 +++- 2 files changed, 98 insertions(+), 50 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 0cdbfcd475ec..81ea5a1ea46b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -28,6 +28,7 @@ #include "amdgpu_dm.h" #include "dm_helpers.h" #include <drm/drm_hdcp.h> +#include <drm/drm_drv.h> #include "hdcp_psp.h"
/* @@ -260,20 +261,27 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; + int idx;
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, callback_dwork);
- mutex_lock(&hdcp_work->mutex); + if (drm_dev_enter(hdcp_work->aconnector->base.dev, &idx)) {
- cancel_delayed_work(&hdcp_work->callback_dwork); + mutex_lock(&hdcp_work->mutex);
- mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, - &hdcp_work->output); + cancel_delayed_work(&hdcp_work->callback_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output);
- process_output(hdcp_work); + process_output(hdcp_work);
- mutex_unlock(&hdcp_work->mutex); + mutex_unlock(&hdcp_work->mutex); + + drm_dev_exit(idx); + + }
} @@ -284,34 +292,41 @@ static void event_property_update(struct work_struct *work) struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; struct drm_device *dev = hdcp_work->aconnector->base.dev; long ret; + int idx; + + if (drm_dev_enter(dev, &idx)) { + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex);
+ if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
- if (aconnector->base.state->commit) { - ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + }
- if (ret == 0) { - DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); - hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && + hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && + hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else { + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); } - }
- if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); - } else { - drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); - }
+ mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + drm_dev_exit(idx);
- mutex_unlock(&hdcp_work->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); + } }
static void event_property_validate(struct work_struct *work) @@ -320,58 +335,77 @@ static void event_property_validate(struct work_struct *work) container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + int idx;
if (!aconnector) return;
- mutex_lock(&hdcp_work->mutex); + if (drm_dev_enter(hdcp_work->aconnector->base.dev, &idx)) {
- query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + mutex_lock(&hdcp_work->mutex);
- if (query.encryption_status != hdcp_work->encryption_status) { - hdcp_work->encryption_status = query.encryption_status; - schedule_work(&hdcp_work->property_update_work); - } + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + mutex_unlock(&hdcp_work->mutex);
- mutex_unlock(&hdcp_work->mutex); + drm_dev_exit(idx); + + } }
static void event_watchdog_timer(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; + int idx;
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, watchdog_timer_dwork);
- mutex_lock(&hdcp_work->mutex); + if (drm_dev_enter(hdcp_work->aconnector->base.dev, &idx)) {
- cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + mutex_lock(&hdcp_work->mutex);
- mod_hdcp_process_event(&hdcp_work->hdcp, - MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, - &hdcp_work->output); + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
- process_output(hdcp_work); + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output);
- mutex_unlock(&hdcp_work->mutex); + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + drm_dev_exit(idx); + }
}
static void event_cpirq(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; + int idx;
hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work);
- mutex_lock(&hdcp_work->mutex); + if (drm_dev_enter(hdcp_work->aconnector->base.dev, &idx)) {
- mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + mutex_lock(&hdcp_work->mutex);
- process_output(hdcp_work); + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output);
- mutex_unlock(&hdcp_work->mutex); + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + drm_dev_exit(idx); + }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index e0000c180ed1..d8ee552d373e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -30,6 +30,8 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_irq.h"
+#include <drm/drm_drv.h> + /** * DOC: overview * @@ -115,15 +117,27 @@ static void dm_irq_work_func(struct work_struct *work) container_of(work, struct irq_list_head, work); struct list_head *handler_list = &irq_list_head->head; struct amdgpu_dm_irq_handler_data *handler_data; + int idx; + + + handler_data = list_first_entry_or_null(handler_list, struct amdgpu_dm_irq_handler_data, list); + + if (!handler_data) + return;
- list_for_each_entry(handler_data, handler_list, list) { - DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", + if (drm_dev_enter(handler_data->dm->ddev, &idx)) { + + list_for_each_entry(handler_data, handler_list, list) { + DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", + handler_data->irq_source); + + DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", handler_data->irq_source);
- DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", - handler_data->irq_source); + handler_data->handler(handler_data->handler_arg); + }
- handler_data->handler(handler_data->handler_arg); + drm_dev_exit(idx); }
/* Call a DAL subcomponent which registered for interrupt notification