From: Christian König christian.koenig@amd.com
This reverts commit c02196834456f2d5fad334088b70e98ce4967c34.
In the meantime we moved get_user_pages() outside of the reservation lock, so that shouldn't be an issue any more
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index d7ec9bd..c47f2222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -48,7 +48,8 @@ struct amdgpu_mn { /* protected by adev->mn_lock */ struct hlist_node node;
- /* objects protected by mm->mmap_sem */ + /* objects protected by lock */ + struct mutex lock; struct rb_root objects; };
@@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock); - down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock); hash_del(&rmn->node); rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects, it.rb) { @@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work) } kfree(node); } - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm); kfree(rmn); @@ -126,6 +127,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, /* notification is exclusive, but interval is inclusive */ end -= 1;
+ mutex_lock(&rmn->lock); + it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; @@ -160,6 +163,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, amdgpu_bo_unreserve(bo); } } + + mutex_unlock(&rmn->lock); }
static const struct mmu_notifier_ops amdgpu_mn_ops = { @@ -196,6 +201,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; rmn->mn.ops = &amdgpu_mn_ops; + mutex_init(&rmn->lock); rmn->objects = RB_ROOT;
r = __mmu_notifier_register(&rmn->mn, mm); @@ -242,7 +248,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) { kfree(node); @@ -256,7 +262,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (!node) { node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); if (!node) { - up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); return -ENOMEM; } } @@ -271,7 +277,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock);
return 0; } @@ -297,7 +303,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) return; }
- down_write(&rmn->mm->mmap_sem); + mutex_lock(&rmn->lock);
/* save the next list entry for later */ head = bo->mn_list.next; @@ -312,6 +318,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) kfree(node); }
- up_write(&rmn->mm->mmap_sem); + mutex_unlock(&rmn->lock); mutex_unlock(&adev->mn_lock); }
From: Christian König christian.koenig@amd.com
Otherwise we can run into problems with the writeback code.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 98 +++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index c47f2222..9f4a45c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -106,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, }
/** + * amdgpu_mn_invalidate_node - unmap all BOs of a node + * + * @node: the node with the BOs to unmap + * + * We block for all BOs and unmap them by move them + * into system domain again. + */ +static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, + unsigned long start, + unsigned long end) +{ + struct amdgpu_bo *bo; + long r; + + list_for_each_entry(bo, &node->bos, mn_list) { + + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) + continue; + + r = amdgpu_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%ld) failed to reserve user bo\n", r); + continue; + } + + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, MAX_SCHEDULE_TIMEOUT); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); + + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%ld) failed to validate user bo\n", r); + + amdgpu_bo_unreserve(bo); + } +} + +/** + * amdgpu_mn_invalidate_page - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @address: address of invalidate page + * + * Invalidation of a single page. Blocks for all BOs mapping it + * and unmap them by move them into system domain again. + */ +static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long address) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); + struct interval_tree_node *it; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, address, address); + if (it) { + struct amdgpu_mn_node *node; + + node = container_of(it, struct amdgpu_mn_node, it); + amdgpu_mn_invalidate_node(node, address, address); + } + + mutex_unlock(&rmn->lock); +} + +/** * amdgpu_mn_invalidate_range_start - callback to notify about mm change * * @mn: our notifier @@ -132,36 +202,11 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node; - struct amdgpu_bo *bo; - long r;
node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end);
- list_for_each_entry(bo, &node->bos, mn_list) { - - if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, - end)) - continue; - - r = amdgpu_bo_reserve(bo, true); - if (r) { - DRM_ERROR("(%ld) failed to reserve user bo\n", r); - continue; - } - - r = reservation_object_wait_timeout_rcu(bo->tbo.resv, - true, false, MAX_SCHEDULE_TIMEOUT); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - - amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); - if (r) - DRM_ERROR("(%ld) failed to validate user bo\n", r); - - amdgpu_bo_unreserve(bo); - } + amdgpu_mn_invalidate_node(node, start, end); }
mutex_unlock(&rmn->lock); @@ -169,6 +214,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release, + .invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start, };
On Fri, Mar 18, 2016 at 2:29 PM, Christian König deathsimple@vodafone.de wrote:
From: Christian König christian.koenig@amd.com
Otherwise we can run into problems with the writeback code.
Signed-off-by: Christian König christian.koenig@amd.com
Applied the series.
Thanks!
Alex
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 98 +++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index c47f2222..9f4a45c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -106,6 +106,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn, }
/**
- amdgpu_mn_invalidate_node - unmap all BOs of a node
- @node: the node with the BOs to unmap
- We block for all BOs and unmap them by move them
- into system domain again.
- */
+static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
unsigned long start,
unsigned long end)
+{
struct amdgpu_bo *bo;
long r;
list_for_each_entry(bo, &node->bos, mn_list) {
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
}
+}
+/**
- amdgpu_mn_invalidate_page - callback to notify about mm change
- @mn: our notifier
- @mn: the mm this callback is about
- @address: address of invalidate page
- Invalidation of a single page. Blocks for all BOs mapping it
- and unmap them by move them into system domain again.
- */
+static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
+{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
mutex_lock(&rmn->lock);
it = interval_tree_iter_first(&rmn->objects, address, address);
if (it) {
struct amdgpu_mn_node *node;
node = container_of(it, struct amdgpu_mn_node, it);
amdgpu_mn_invalidate_node(node, address, address);
}
mutex_unlock(&rmn->lock);
+}
+/**
- amdgpu_mn_invalidate_range_start - callback to notify about mm change
- @mn: our notifier
@@ -132,36 +202,11 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
long r; node = container_of(it, struct amdgpu_mn_node, it); it = interval_tree_iter_next(it, start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
end))
continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
}
amdgpu_mn_invalidate_node(node, start, end); } mutex_unlock(&rmn->lock);
@@ -169,6 +214,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
static const struct mmu_notifier_ops amdgpu_mn_ops = { .release = amdgpu_mn_release,
.invalidate_page = amdgpu_mn_invalidate_page, .invalidate_range_start = amdgpu_mn_invalidate_range_start,
};
-- 2.5.0
dri-devel@lists.freedesktop.org