Hi
Am 15.04.21 um 13:38 schrieb Christian König:
Am 15.04.21 um 12:17 schrieb Thomas Zimmermann:
Moving the driver-specific mmap code into a GEM object function allows for using DRM helpers for various mmap callbacks.
This change resolves several inconsistencies between regular mmap and prime-based mmap. The vm_ops field in vma is now set for all mmap'ed areas. Previously it way only set for regular mmap calls, prime-based mmap used TTM's default vm_ops. The function amdgpu_verify_access() is no longer being called and therefore removed by this patch.
As a side effect, amdgpu_ttm_vm_ops and amdgpu_ttm_fault() are now implemented in amdgpu's GEM code.
v2: * rename amdgpu_ttm_vm_ops and amdgpu_ttm_fault() to amdgpu_gem_vm_ops and amdgpu_gem_fault() (Christian) * the check for kfd_bo has meanwhile been removed
Signed-off-by: Thomas Zimmermann tzimmermann@suse.de
[SNIP]
+static int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{
Mhm, just double checking this function is now a core GEM function and not prime specific?
If yes maybe drop the _prime part.
No problem, but other functions in the GEM callbacks are also named _prime_. Probably needs a clean-up.
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + unsigned long asize = amdgpu_bo_size(bo);
+ if (!vma->vm_file) + return -ENODEV;
+ if (!adev) + return -ENODEV;
+ /* Check for valid size. */ + if (asize < vma->vm_end - vma->vm_start) + return -EINVAL;
Shouldn't we have that check in the common code?
It's at [1]. I didn't really bother about tidying up the checks themselves. We can drop all these except for the usermm branch below. Same for the radeon patch.
Best regards Thomas
[1] https://elixir.bootlin.com/linux/v5.12-rc7/source/drivers/gpu/drm/drm_gem.c#...
Apart from that looks good to me.
Christian.
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || + (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { + return -EPERM; + }
+ return drm_gem_ttm_mmap(obj, vma); +}
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .free = amdgpu_gem_object_free, .open = amdgpu_gem_object_open, @@ -212,6 +266,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { .export = amdgpu_gem_prime_export, .vmap = drm_gem_ttm_vmap, .vunmap = drm_gem_ttm_vunmap, + .mmap = amdgpu_gem_prime_mmap, + .vm_ops = &amdgpu_gem_vm_ops, }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1485f33c3cc7..d4083c19402b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -152,25 +152,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, *placement = abo->placement; } -/**
- amdgpu_verify_access - Verify access for a mmap call
- @bo: The buffer object to map
- @filp: The file pointer from the process performing the mmap
- This is called by ttm_bo_mmap() to verify whether a process
- has the right to mmap a BO to their process space.
- */
-static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) -{ - struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- if (amdgpu_ttm_tt_get_usermm(bo->ttm)) - return -EPERM; - return drm_vma_node_verify_access(&abo->tbo.base.vma_node, - filp->private_data); -}
/** * amdgpu_ttm_map_buffer - Map memory into the GART windows * @bo: buffer object to map @@ -1522,7 +1503,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = { .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, - .verify_access = &amdgpu_verify_access, .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, @@ -1897,50 +1877,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) adev->mman.buffer_funcs_enabled = enable; } -static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf) -{ - struct ttm_buffer_object *bo = vmf->vma->vm_private_data; - vm_fault_t ret;
- ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret;
- ret = amdgpu_bo_fault_reserve_notify(bo); - if (ret) - goto unlock;
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret;
-unlock: - dma_resv_unlock(bo->base.resv); - return ret; -}
-static const struct vm_operations_struct amdgpu_ttm_vm_ops = { - .fault = amdgpu_ttm_fault, - .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close, - .access = ttm_bo_vm_access -};
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); - int r;
- r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); - if (unlikely(r != 0)) - return r;
- vma->vm_ops = &amdgpu_ttm_vm_ops; - return 0; -}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, uint64_t dst_offset, uint32_t byte_count, struct dma_resv *resv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index dec0db8b0b13..6e51faad7371 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -146,7 +146,6 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, struct dma_resv *resv, struct dma_fence **fence); -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);