Still doesn't like this very much, but anyway
Acked-by: Thomas Hellstrom thellstrom@vmware.com
On 8/2/19 7:22 AM, Gerd Hoffmann wrote:
Signed-off-by: Gerd Hoffmann kraxel@redhat.com
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 8 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index fc6673cde289..917eeb793585 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Buffer objects need to be either pinned or reserved: */ if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
lockdep_assert_held(&dst->resv->lock.base);
if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))lockdep_assert_held(&dst->base.resv->lock.base);
lockdep_assert_held(&src->resv->lock.base);
lockdep_assert_held(&src->base.resv->lock.base);
if (dst->ttm->state == tt_unpopulated) { ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 0d9478d2e700..4a38ab0733c4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -342,7 +342,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin) uint32_t old_mem_type = bo->mem.mem_type; int ret;
- lockdep_assert_held(&bo->resv->lock.base);
lockdep_assert_held(&bo->base.resv->lock.base);
if (pin) { if (vbo->pin_count++ > 0)
@@ -690,7 +690,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, long lret;
lret = reservation_object_wait_timeout_rcu
(bo->resv, true, true,
if (!lret) return -EBUSY;(bo->base.resv, true, true, nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
@@ -1007,10 +1007,10 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
if (fence == NULL) { vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
reservation_object_add_excl_fence(bo->resv, &fence->base);
dma_fence_put(&fence->base); } elsereservation_object_add_excl_fence(bo->base.resv, &fence->base);
reservation_object_add_excl_fence(bo->resv, &fence->base);
}reservation_object_add_excl_fence(bo->base.resv, &fence->base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index b4f6e1217c9d..e142714f132c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -169,7 +169,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) } *cmd;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
lockdep_assert_held(&bo->base.resv->lock.base);
cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd)
@@ -311,7 +311,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, return 0;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
- lockdep_assert_held(&bo->resv->lock.base);
lockdep_assert_held(&bo->base.resv->lock.base);
mutex_lock(&dev_priv->binding_mutex); if (!vcotbl->scrubbed)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 1d38a8b2f2ec..ccd7f758bf8c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -402,14 +402,14 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup && new_backup != res->backup) { if (res->backup) {
lockdep_assert_held(&res->backup->base.resv->lock.base);
lockdep_assert_held(&res->backup->base.base.resv->lock.base); list_del_init(&res->mob_head); vmw_bo_unreference(&res->backup);
}
if (new_backup) { res->backup = vmw_bo_reference(new_backup);
lockdep_assert_held(&new_backup->base.resv->lock.base);
} else { res->backup = NULL;lockdep_assert_held(&new_backup->base.base.resv->lock.base); list_add_tail(&res->mob_head, &new_backup->res_list);
@@ -691,7 +691,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) .num_shared = 0 };
- lockdep_assert_held(&vbo->base.resv->lock.base);
- lockdep_assert_held(&vbo->base.base.resv->lock.base); list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) { if (!res->func->unbind) continue;