On Tue, Jun 13, 2017 at 09:23:45AM -0400, Rob Clark wrote:
Most, but not all, paths where calling the with struct_mutex held. The fast-path in msm_gem_get_iova() (plus some sub-code-paths that only run the first time) was masking this issue.
So lets just always hold struct_mutex for hw_init(). And sprinkle some WARN_ON()'s and might_lock() to avoid this sort of problem in the future.
Signed-off-by: Rob Clark robdclark@gmail.com
Acked-by: Jordan Crouse jcrouse@codeaurora.org
drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 13 +++++-------- drivers/gpu/drm/msm/adreno/a5xx_power.c | 11 ++++------- drivers/gpu/drm/msm/adreno/adreno_device.c | 2 ++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- drivers/gpu/drm/msm/msm_gem.c | 3 +++ drivers/gpu/drm/msm/msm_gpu.c | 2 ++ 6 files changed, 17 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index fc9a81a..d5d6198 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -386,31 +386,28 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, struct drm_gem_object *bo; void *ptr;
mutex_lock(&drm->struct_mutex); bo = msm_gem_new(drm, fw->size - 4, MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(bo)) return bo;
ptr = msm_gem_get_vaddr(bo);
- ptr = msm_gem_get_vaddr_locked(bo); if (!ptr) {
drm_gem_object_unreference_unlocked(bo);
drm_gem_object_unreference(bo);
return ERR_PTR(-ENOMEM); }
if (iova) {
int ret = msm_gem_get_iova(bo, gpu->id, iova);
int ret = msm_gem_get_iova_locked(bo, gpu->id, iova);
if (ret) {
drm_gem_object_unreference_unlocked(bo);
drm_gem_object_unreference(bo); return ERR_PTR(ret);
} }
memcpy(ptr, &fw->data[4], fw->size - 4);
- msm_gem_put_vaddr(bo);
- msm_gem_put_vaddr_locked(bo); return bo;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 72d52c7..7838f5f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -294,17 +294,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
mutex_lock(&drm->struct_mutex); a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize, MSM_BO_UNCACHED);
mutex_unlock(&drm->struct_mutex);
if (IS_ERR(a5xx_gpu->gpmu_bo)) goto err;
if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova))
- if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) goto err;
- ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo);
- ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); if (!ptr) goto err;
@@ -323,7 +320,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) cmds_size -= _size; }
- msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
msm_gem_put_vaddr_locked(a5xx_gpu->gpmu_bo); a5xx_gpu->gpmu_dwords = dwords;
goto out;
@@ -332,7 +329,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) if (a5xx_gpu->gpmu_iova) msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); if (a5xx_gpu->gpmu_bo)
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_bo = NULL; a5xx_gpu->gpmu_iova = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 7345a01..56fb59f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -158,7 +158,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev) int ret;
pm_runtime_get_sync(&pdev->dev);
ret = msm_gpu_hw_init(gpu);mutex_lock(&dev->struct_mutex);
pm_runtime_put_sync(&pdev->dev); if (ret) { dev_err(dev->dev, "gpu hw init failed: %d\n", ret);mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 5b63fc6..9e08b3e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 68e509b..139be54 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -314,6 +314,8 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret = 0;
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- if (!msm_obj->domain[id].iova) { struct msm_drm_private *priv = obj->dev->dev_private; struct page **pages = get_pages(obj);
@@ -345,6 +347,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) * bo is deleted: */ if (msm_obj->domain[id].iova) {
*iova = msm_obj->domain[id].iova; return 0; }might_lock(&obj->dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 97b9c38..9fceda8 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -203,6 +203,8 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) { int ret;
- WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
- if (!gpu->needs_hw_init) return 0;
-- 2.9.4