From: Christian König christian.koenig@amd.com
This patch adds an IOCTL for turning a pointer supplied by userspace into a buffer object.
It imposes several restrictions upon the memory being mapped:
1. It must be page aligned (both start/end addresses, i.e ptr and size).
2. It must be normal system memory, not a pointer into another map of IO space (e.g. it must not be a GTT mmapping of another object).
3. The BO is mapped into GTT, so the maximum amount of memory mapped at all times is still the GTT limit.
4. The BO is only mapped readonly for now, so no write support.
5. List of backing pages is only acquired once, so they represent a snapshot of the first use.
Exporting and sharing as well as mapping of buffer objects created by this function is forbidden and results in an -EPERM.
v2: squash all previous changes into first public version v3: fix tabs, map readonly, don't use MM callback any more v4: set TTM_PAGE_FLAG_SG so that TTM never messes with the pages, pin/unpin pages on bind/unbind instead of populate/unpopulate v5: rebased on 3.17-wip, IOCTL renamed to userptr, reject any unknown flags, better handle READONLY flag, improve permission check v6: fix ptr cast warning, use set_page_dirty/mark_page_accessed on unpin v7: add warning about it's availability in the API definition
Signed-off-by: Christian König christian.koenig@amd.com Reviewed-by: Alex Deucher alexander.deucher@amd.com (v4) Reviewed-by: Jérôme Glisse jglisse@redhat.com (v4) --- drivers/gpu/drm/radeon/radeon.h | 5 ++ drivers/gpu/drm/radeon/radeon_cs.c | 25 +++++- drivers/gpu/drm/radeon/radeon_drv.c | 5 +- drivers/gpu/drm/radeon/radeon_gem.c | 68 ++++++++++++++++ drivers/gpu/drm/radeon/radeon_kms.c | 1 + drivers/gpu/drm/radeon/radeon_object.c | 3 + drivers/gpu/drm/radeon/radeon_prime.c | 10 +++ drivers/gpu/drm/radeon/radeon_ttm.c | 139 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_vm.c | 3 + include/uapi/drm/radeon_drm.h | 16 ++++ 10 files changed, 272 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732e..3c6999e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2138,6 +2138,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int radeon_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); int radeon_gem_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data, @@ -2871,6 +2873,9 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); +extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags); +extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm); extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ee712c1..1321491 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) struct radeon_cs_chunk *chunk; struct radeon_cs_buckets buckets; unsigned i, j; - bool duplicate; + bool duplicate, need_mmap_lock = false; + int r;
if (p->chunk_relocs_idx == -1) { return 0; @@ -164,6 +165,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) p->relocs[i].allowed_domains = domain; }
+ if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { + uint32_t domain = p->relocs[i].prefered_domains; + if (!(domain & RADEON_GEM_DOMAIN_GTT)) { + DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " + "allowed for userptr BOs\n"); + return -EINVAL; + } + need_mmap_lock = true; + domain = RADEON_GEM_DOMAIN_GTT; + p->relocs[i].prefered_domains = domain; + p->relocs[i].allowed_domains = domain; + } + p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; p->relocs[i].handle = r->handle;
@@ -176,8 +190,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) if (p->cs_flags & RADEON_CS_USE_VM) p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, &p->validated); + if (need_mmap_lock) + down_read(¤t->mm->mmap_sem); + + r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
- return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); + if (need_mmap_lock) + up_read(¤t->mm->mmap_sem); + + return r; }
static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a773830..5b18af9 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -114,6 +114,9 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); void radeon_gem_object_close(struct drm_gem_object *obj, struct drm_file *file_priv); +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, @@ -568,7 +571,7 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = drm_gem_prime_export, + .gem_prime_export = radeon_gem_prime_export, .gem_prime_import = drm_gem_prime_import, .gem_prime_pin = radeon_gem_prime_pin, .gem_prime_unpin = radeon_gem_prime_unpin, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index bfd7e1b..993ab22 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -272,6 +272,65 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return 0; }
+int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + struct radeon_device *rdev = dev->dev_private; + struct drm_radeon_gem_userptr *args = data; + struct drm_gem_object *gobj; + struct radeon_bo *bo; + uint32_t handle; + int r; + + if (offset_in_page(args->addr | args->size)) + return -EINVAL; + + /* we only support read only mappings for now */ + if (!(args->flags & RADEON_GEM_USERPTR_READONLY)) + return -EACCES; + + /* reject unknown flag values */ + if (args->flags & ~RADEON_GEM_USERPTR_READONLY) + return -EINVAL; + + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + down_read(&rdev->exclusive_lock); + + /* create a gem object to contain this object in */ + r = radeon_gem_object_create(rdev, args->size, 0, + RADEON_GEM_DOMAIN_CPU, 0, + false, &gobj); + if (r) + goto handle_lockup; + + bo = gem_to_radeon_bo(gobj); + r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + if (r) + goto release_object; + + r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); + if (r) + goto handle_lockup; + + args->handle = handle; + up_read(&rdev->exclusive_lock); + return 0; + +release_object: + drm_gem_object_unreference_unlocked(gobj); + +handle_lockup: + up_read(&rdev->exclusive_lock); + r = radeon_gem_handle_lockup(rdev, r); + + return r; +} + int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -315,6 +374,10 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { + drm_gem_object_unreference_unlocked(gobj); + return -EPERM; + } *offset_p = radeon_bo_mmap_offset(robj); drm_gem_object_unreference_unlocked(gobj); return 0; @@ -532,6 +595,11 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, return -ENOENT; } robj = gem_to_radeon_bo(gobj); + + r = -EPERM; + if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) + goto out; + r = radeon_bo_reserve(robj, false); if (unlikely(r)) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index eb7164d..8309b11 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -885,5 +885,6 @@ const struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(RADEON_GEM_OP, radeon_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(RADEON_GEM_USERPTR, radeon_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), }; int radeon_max_kms_ioctl = ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 480c87d..c73c1e3 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -264,6 +264,9 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, { int r, i;
+ if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return -EPERM; + if (bo->pin_count) { bo->pin_count++; if (gpu_addr) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index f7e48d3..bb18bc7 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -103,3 +103,13 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj) radeon_bo_unpin(bo); radeon_bo_unreserve(bo); } + +struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *gobj, + int flags) +{ + struct radeon_bo *bo = gem_to_radeon_bo(gobj); + if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) + return ERR_PTR(-EPERM); + return drm_gem_prime_export(dev, gobj, flags); +} diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 72afe82..0109090 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -39,6 +39,8 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/swiotlb.h> +#include <linux/swap.h> +#include <linux/pagemap.h> #include <linux/debugfs.h> #include "radeon_reg.h" #include "radeon.h" @@ -515,8 +517,96 @@ struct radeon_ttm_tt { struct ttm_dma_tt ttm; struct radeon_device *rdev; u64 offset; + + uint64_t userptr; + struct mm_struct *usermm; + uint32_t userflags; };
+/* prepare the sg table with the user pages */ +static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + unsigned pinned = 0, nents; + int r; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + if (current->mm != gtt->usermm) + return -EPERM; + + if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ, (long)gtt->userptr, + ttm->num_pages * PAGE_SIZE)) + return -EFAULT; + + do { + unsigned num_pages = ttm->num_pages - pinned; + uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; + struct page **pages = ttm->pages + pinned; + + r = get_user_pages(current, current->mm, userptr, num_pages, + write, 0, pages, NULL); + if (r < 0) + goto release_pages; + + pinned += r; + + } while (pinned < ttm->num_pages); + + r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, + ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (r) + goto release_sg; + + r = -ENOMEM; + nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + if (nents != ttm->sg->nents) + goto release_sg; + + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + gtt->ttm.dma_address, ttm->num_pages); + + return 0; + +release_sg: + kfree(ttm->sg); + +release_pages: + release_pages(ttm->pages, pinned, 0); + return r; +} + +static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) +{ + struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); + struct radeon_ttm_tt *gtt = (void *)ttm; + struct scatterlist *sg; + int i; + + int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); + enum dma_data_direction direction = write ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + + /* free the sg table and pages again */ + dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + + for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { + struct page *page = sg_page(sg); + + if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) + set_page_dirty(page); + + mark_page_accessed(page); + page_cache_release(page); + } + + sg_free_table(ttm->sg); +} + static int radeon_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -525,6 +615,11 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm, RADEON_GART_PAGE_WRITE; int r;
+ if (gtt->userptr) { + radeon_ttm_tt_pin_userptr(ttm); + flags &= ~RADEON_GART_PAGE_WRITE; + } + gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", @@ -547,6 +642,10 @@ static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); + + if (gtt->userptr) + radeon_ttm_tt_unpin_userptr(ttm); + return 0; }
@@ -603,6 +702,16 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) if (ttm->state != tt_unpopulated) return 0;
+ if (gtt->userptr) { + ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + if (!ttm->sg) + return -ENOMEM; + + ttm->page_flags |= TTM_PAGE_FLAG_SG; + ttm->state = tt_unbound; + return 0; + } + if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -652,6 +761,12 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) unsigned i; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ if (gtt->userptr) { + kfree(ttm->sg); + ttm->page_flags &= ~TTM_PAGE_FLAG_SG; + return; + } + if (slave) return;
@@ -680,6 +795,30 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); }
+int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, + uint32_t flags) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return -EINVAL; + + gtt->userptr = addr; + gtt->usermm = current->mm; + gtt->userflags = flags; + return 0; +} + +bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) +{ + struct radeon_ttm_tt *gtt = (void *)ttm; + + if (gtt == NULL) + return false; + + return !!gtt->userptr; +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ccae4d9..e41481c 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -888,6 +888,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev, bo_va->flags &= ~RADEON_VM_PAGE_VALID; bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; + if (bo_va->bo && radeon_ttm_tt_has_userptr(bo_va->bo->tbo.ttm)) + bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; + if (mem) { addr = mem->start << PAGE_SHIFT; if (mem->mem_type != TTM_PL_SYSTEM) { diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 509b2d7..3a9f209 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -511,6 +511,7 @@ typedef struct { #define DRM_RADEON_GEM_BUSY 0x2a #define DRM_RADEON_GEM_VA 0x2b #define DRM_RADEON_GEM_OP 0x2c +#define DRM_RADEON_GEM_USERPTR 0x2d
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) #define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) @@ -554,6 +555,7 @@ typedef struct { #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) #define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) #define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) +#define DRM_IOCTL_RADEON_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_USERPTR, struct drm_radeon_gem_userptr)
typedef struct drm_radeon_init { enum { @@ -808,6 +810,20 @@ struct drm_radeon_gem_create { uint32_t flags; };
+/* + * This is not a reliable API and you should expect it to fail for any + * number of reasons and have fallback path that do not use userptr to + * perform any operation. + */ +#define RADEON_GEM_USERPTR_READONLY (1 << 0) + +struct drm_radeon_gem_userptr { + uint64_t addr; + uint64_t size; + uint32_t flags; + uint32_t handle; +}; + #define RADEON_TILING_MACRO 0x1 #define RADEON_TILING_MICRO 0x2 #define RADEON_TILING_SWAP_16BIT 0x4
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab22..032736b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES;
/* reject unknown flag values */ - if (args->flags & ~RADEON_GEM_USERPTR_READONLY) + if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | + RADEON_GEM_USERPTR_ANONONLY)) return -EINVAL;
/* readonly pages not tested on older hardware */ diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0109090..54eb7bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -542,6 +542,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ttm->num_pages * PAGE_SIZE)) return -EFAULT;
+ if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { + /* check that we only pin down anonymous memory + to prevent problems with writeback */ + unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; + struct vm_area_struct *vma; + vma = find_vma(gtt->usermm, gtt->userptr); + if (!vma || vma->vm_file || vma->vm_end < end) + return -EPERM; + } + do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f209..9720e1a 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create { * perform any operation. */ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
struct drm_radeon_gem_userptr { uint64_t addr;
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Cheers, Jérôme
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab22..032736b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES;
/* reject unknown flag values */
- if (args->flags & ~RADEON_GEM_USERPTR_READONLY)
if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
RADEON_GEM_USERPTR_ANONONLY))
return -EINVAL;
/* readonly pages not tested on older hardware */
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0109090..54eb7bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -542,6 +542,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ttm->num_pages * PAGE_SIZE)) return -EFAULT;
- if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
return -EPERM;
- }
- do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f209..9720e1a 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create {
- perform any operation.
*/ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Christian.
Cheers, Jérôme
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab22..032736b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES;
/* reject unknown flag values */
- if (args->flags & ~RADEON_GEM_USERPTR_READONLY)
if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
RADEON_GEM_USERPTR_ANONONLY))
return -EINVAL;
/* readonly pages not tested on older hardware */
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0109090..54eb7bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -542,6 +542,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ttm->num_pages * PAGE_SIZE)) return -EFAULT;
- if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
return -EPERM;
- }
- do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f209..9720e1a 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create {
- perform any operation.
*/ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
Cheers, Jérôme
Christian.
Cheers, Jérôme
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab22..032736b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES; /* reject unknown flag values */
- if (args->flags & ~RADEON_GEM_USERPTR_READONLY)
- if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
return -EINVAL; /* readonly pages not tested on older hardware */RADEON_GEM_USERPTR_ANONONLY))
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0109090..54eb7bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -542,6 +542,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ttm->num_pages * PAGE_SIZE)) return -EFAULT;
- if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
return -EPERM;
- }
- do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f209..9720e1a 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create {
- perform any operation.
*/ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
Regards, Christian.
Cheers, Jérôme
Christian.
Cheers, Jérôme
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/radeon/radeon_gem.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 10 ++++++++++ include/uapi/drm/radeon_drm.h | 1 + 3 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 993ab22..032736b 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -290,7 +290,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, return -EACCES; /* reject unknown flag values */
- if (args->flags & ~RADEON_GEM_USERPTR_READONLY)
- if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
return -EINVAL; /* readonly pages not tested on older hardware */RADEON_GEM_USERPTR_ANONONLY))
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0109090..54eb7bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -542,6 +542,16 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) ttm->num_pages * PAGE_SIZE)) return -EFAULT;
- if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
return -EPERM;
- }
- do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 3a9f209..9720e1a 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -816,6 +816,7 @@ struct drm_radeon_gem_create {
- perform any operation.
*/ #define RADEON_GEM_USERPTR_READONLY (1 << 0) +#define RADEON_GEM_USERPTR_ANONONLY (1 << 1) struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
Cheers, Jérôme
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Avoid problems with writeback by limiting userptr to anonymous memory.
v2: add commit and code comments
I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
Christian.
Cheers, Jérôme
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >From: Christian König christian.koenig@amd.com > >Avoid problems with writeback by limiting userptr to anonymous memory. > >v2: add commit and code comments I guess, i have not expressed myself clearly. This is bogus, you pretend you want to avoid writeback issue but you still allow userspace to map file backed pages (which by the way might be a regular bo object from another device for instance and that would be fun).
So this patch is a no go and i would rather see that this userptr to be restricted to anon vma only no matter what. No flags here.
Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
Cheers, Jérôme
Christian.
Cheers, Jérôme
On Wed, Aug 06, 2014 at 02:34:16PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse: >On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >>From: Christian König christian.koenig@amd.com >> >>Avoid problems with writeback by limiting userptr to anonymous memory. >> >>v2: add commit and code comments >I guess, i have not expressed myself clearly. This is bogus, you pretend >you want to avoid writeback issue but you still allow userspace to map >file backed pages (which by the way might be a regular bo object from >another device for instance and that would be fun). > >So this patch is a no go and i would rather see that this userptr to >be restricted to anon vma only no matter what. No flags here. Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
Cheers, Jérôme
I should add that access_ok should be done only once inside the gem userptr ioctl as access_ok only check that the address is a valid userspace address and not some special address (like an address inside the kernel address space or a non canonical address on x86-64 ...). It it returns true the first time then it will always return true.
Only vma need to be checked on each bind.
Christian.
Cheers, Jérôme
On Wed, Aug 06, 2014 at 02:34:16PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote:
Am 05.08.2014 um 19:39 schrieb Jerome Glisse: >On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >>From: Christian König christian.koenig@amd.com >> >>Avoid problems with writeback by limiting userptr to anonymous memory. >> >>v2: add commit and code comments >I guess, i have not expressed myself clearly. This is bogus, you pretend >you want to avoid writeback issue but you still allow userspace to map >file backed pages (which by the way might be a regular bo object from >another device for instance and that would be fun). > >So this patch is a no go and i would rather see that this userptr to >be restricted to anon vma only no matter what. No flags here. Mapping of non anonymous memory (e.g. everything get_user_pages won't fail with) is restricted to read only access by the GPU.
I'm fine with making it a hard requirement for all mappings if you say it's a must have.
Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
access_ok is _only_ valid in combination with copy_from/to_user and friends and is an optimization of the access checks depending upon architecture. You always need them both, one alone is useless. -Daniel
On Wed, Aug 06, 2014 at 10:24:31PM +0200, Daniel Vetter wrote:
On Wed, Aug 06, 2014 at 02:34:16PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote: >Am 05.08.2014 um 19:39 schrieb Jerome Glisse: >>On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >>>From: Christian König christian.koenig@amd.com >>> >>>Avoid problems with writeback by limiting userptr to anonymous memory. >>> >>>v2: add commit and code comments >>I guess, i have not expressed myself clearly. This is bogus, you pretend >>you want to avoid writeback issue but you still allow userspace to map >>file backed pages (which by the way might be a regular bo object from >>another device for instance and that would be fun). >> >>So this patch is a no go and i would rather see that this userptr to >>be restricted to anon vma only no matter what. No flags here. >Mapping of non anonymous memory (e.g. everything get_user_pages won't fail >with) is restricted to read only access by the GPU. > >I'm fine with making it a hard requirement for all mappings if you say it's >a must have. > Well for time being you should force read only. The way you implement write is broken. Here is how it can abuse to allow write to a file backed mmap.
mmap(fixaddress,fixedsize,NOFD) userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) // bo is created successfully because fixedaddress is part of anonvma munmap(fixedaddress,fixedsize) // radeon get mmu_notifier_range_start callback and unbind page from the // bo but radeon does not know there was an unmap. mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) radeon_ioctl_use_my_userptrbo // bo is bind again by radeon and because all flag are set at creation // it is map with write permission allowing someone to write to a file // that might be read only for the user. // // Script kiddies it's time to learn about gpu ...
Of course if you this patch (kind of selling my own junk here) :
http://www.spinics.net/lists/linux-mm/msg75878.html
then you could know inside the range_start that you should remove the write permission and that it should be rechecked on next bind.
Note that i have not read much of your code so maybe you handle this case somehow.
I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
access_ok is _only_ valid in combination with copy_from/to_user and friends and is an optimization of the access checks depending upon architecture. You always need them both, one alone is useless.
ENOPARSE, access_ok will always return the same value for a given address at least on x86 so if address supplied at ioctl time is a valid userspace address then it will still be a valid userspace address at buffer object bind time (note that the user address is immutable here). So access_ok can be done once and only once inside the ioctl and then for the write permission you need to recheck the vma each time you bind the object (or rather each time the previous bind was invalidated by some mmu_notifier call).
That being said access_ok is kind of useless given that get_user_page will fail on kernel address and i assume for any special address any architecture might have. So strictly speaking the access_ok is just a way to fail early and flatout instead of delaying the failure to bind time.
Or do i need to go read x86 code again ?
Cheers, Jérôme
-Daniel
Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch
On Wed, Aug 06, 2014 at 11:45:48PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 10:24:31PM +0200, Daniel Vetter wrote:
On Wed, Aug 06, 2014 at 02:34:16PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote:
Am 06.08.2014 um 00:13 schrieb Jerome Glisse: >On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote: >>Am 05.08.2014 um 19:39 schrieb Jerome Glisse: >>>On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >>>>From: Christian König christian.koenig@amd.com >>>> >>>>Avoid problems with writeback by limiting userptr to anonymous memory. >>>> >>>>v2: add commit and code comments >>>I guess, i have not expressed myself clearly. This is bogus, you pretend >>>you want to avoid writeback issue but you still allow userspace to map >>>file backed pages (which by the way might be a regular bo object from >>>another device for instance and that would be fun). >>> >>>So this patch is a no go and i would rather see that this userptr to >>>be restricted to anon vma only no matter what. No flags here. >>Mapping of non anonymous memory (e.g. everything get_user_pages won't fail >>with) is restricted to read only access by the GPU. >> >>I'm fine with making it a hard requirement for all mappings if you say it's >>a must have. >> >Well for time being you should force read only. The way you implement write >is broken. Here is how it can abuse to allow write to a file backed mmap. > >mmap(fixaddress,fixedsize,NOFD) >userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) >// bo is created successfully because fixedaddress is part of anonvma >munmap(fixedaddress,fixedsize) >// radeon get mmu_notifier_range_start callback and unbind page from the >// bo but radeon does not know there was an unmap. >mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) >radeon_ioctl_use_my_userptrbo >// bo is bind again by radeon and because all flag are set at creation >// it is map with write permission allowing someone to write to a file >// that might be read only for the user. >// >// Script kiddies it's time to learn about gpu ... > >Of course if you this patch (kind of selling my own junk here) : > >http://www.spinics.net/lists/linux-mm/msg75878.html > >then you could know inside the range_start that you should remove the >write permission and that it should be rechecked on next bind. > >Note that i have not read much of your code so maybe you handle this >case somehow. I've stumbled over this attack vector as well and it's the reason why I've moved checking the access rights to the bind callback instead of BO creation time with V5 of the patch.
This way you get an -EFAULT if you try something like this on command submission time.
So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
access_ok is _only_ valid in combination with copy_from/to_user and friends and is an optimization of the access checks depending upon architecture. You always need them both, one alone is useless.
ENOPARSE, access_ok will always return the same value for a given address at least on x86 so if address supplied at ioctl time is a valid userspace address then it will still be a valid userspace address at buffer object bind time (note that the user address is immutable here). So access_ok can be done once and only once inside the ioctl and then for the write permission you need to recheck the vma each time you bind the object (or rather each time the previous bind was invalidated by some mmu_notifier call).
That being said access_ok is kind of useless given that get_user_page will fail on kernel address and i assume for any special address any architecture might have. So strictly speaking the access_ok is just a way to fail early and flatout instead of delaying the failure to bind time.
Well that's what I've tried to say. For gup you don't need access_ok, that's really just one part of copy_from/to_user machinery. And afaik it's not specified what exactly access_ok checks (on x86 it only checks for the kernel address limit) so I don't think there's a lot of use in it for gup.
Maybe I should have done an s/valid/useful/ in my short comment. -Daniel
Am 07.08.2014 um 08:55 schrieb Daniel Vetter:
On Wed, Aug 06, 2014 at 11:45:48PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 10:24:31PM +0200, Daniel Vetter wrote:
On Wed, Aug 06, 2014 at 02:34:16PM -0400, Jerome Glisse wrote:
On Wed, Aug 06, 2014 at 07:17:25PM +0200, Christian König wrote:
Am 06.08.2014 um 18:08 schrieb Jerome Glisse:
On Wed, Aug 06, 2014 at 08:55:28AM +0200, Christian König wrote: > Am 06.08.2014 um 00:13 schrieb Jerome Glisse: >> On Tue, Aug 05, 2014 at 07:45:21PM +0200, Christian König wrote: >>> Am 05.08.2014 um 19:39 schrieb Jerome Glisse: >>>> On Tue, Aug 05, 2014 at 06:05:29PM +0200, Christian König wrote: >>>>> From: Christian König christian.koenig@amd.com >>>>> >>>>> Avoid problems with writeback by limiting userptr to anonymous memory. >>>>> >>>>> v2: add commit and code comments >>>> I guess, i have not expressed myself clearly. This is bogus, you pretend >>>> you want to avoid writeback issue but you still allow userspace to map >>>> file backed pages (which by the way might be a regular bo object from >>>> another device for instance and that would be fun). >>>> >>>> So this patch is a no go and i would rather see that this userptr to >>>> be restricted to anon vma only no matter what. No flags here. >>> Mapping of non anonymous memory (e.g. everything get_user_pages won't fail >>> with) is restricted to read only access by the GPU. >>> >>> I'm fine with making it a hard requirement for all mappings if you say it's >>> a must have. >>> >> Well for time being you should force read only. The way you implement write >> is broken. Here is how it can abuse to allow write to a file backed mmap. >> >> mmap(fixaddress,fixedsize,NOFD) >> userptr_ioctl(fixedaddress, RADEON_GEM_USERPTR_ANONONLY) >> // bo is created successfully because fixedaddress is part of anonvma >> munmap(fixedaddress,fixedsize) >> // radeon get mmu_notifier_range_start callback and unbind page from the >> // bo but radeon does not know there was an unmap. >> mmap(fixaddress,fixedsize,fd_to_this_read_only_file_i_want_to_write_to) >> radeon_ioctl_use_my_userptrbo >> // bo is bind again by radeon and because all flag are set at creation >> // it is map with write permission allowing someone to write to a file >> // that might be read only for the user. >> // >> // Script kiddies it's time to learn about gpu ... >> >> Of course if you this patch (kind of selling my own junk here) : >> >> http://www.spinics.net/lists/linux-mm/msg75878.html >> >> then you could know inside the range_start that you should remove the >> write permission and that it should be rechecked on next bind. >> >> Note that i have not read much of your code so maybe you handle this >> case somehow. > I've stumbled over this attack vector as well and it's the reason why I've > moved checking the access rights to the bind callback instead of BO creation > time with V5 of the patch. > > This way you get an -EFAULT if you try something like this on command > submission time. So you seem immune to that issue but you are still not checking if the anon vma is writeable which you should again security concern here.
We check the access rights of the pointer using:
if (!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(long)gtt->userptr, ttm->num_pages * PAGE_SIZE)) return -EFAULT;
Shouldn't that be enough?
No, access_ok only check against special area on some architecture and i am pretty sure on x86 the VERIFY_WRITE or VERIFY_READ is just flat out ignored.
What you need to test is the vma vm_flags somethings like
if (write && !(vma->vm_flags VM_WRITE)) return -EPERM;
Which need to happen on all bind.
That seems to be unnecessary, since get_user_pages will check that for us anyway.
access_ok is _only_ valid in combination with copy_from/to_user and friends and is an optimization of the access checks depending upon architecture. You always need them both, one alone is useless.
ENOPARSE, access_ok will always return the same value for a given address at least on x86 so if address supplied at ioctl time is a valid userspace address then it will still be a valid userspace address at buffer object bind time (note that the user address is immutable here). So access_ok can be done once and only once inside the ioctl and then for the write permission you need to recheck the vma each time you bind the object (or rather each time the previous bind was invalidated by some mmu_notifier call).
That being said access_ok is kind of useless given that get_user_page will fail on kernel address and i assume for any special address any architecture might have. So strictly speaking the access_ok is just a way to fail early and flatout instead of delaying the failure to bind time.
Well that's what I've tried to say. For gup you don't need access_ok, that's really just one part of copy_from/to_user machinery. And afaik it's not specified what exactly access_ok checks (on x86 it only checks for the kernel address limit) so I don't think there's a lot of use in it for gup.
Maybe I should have done an s/valid/useful/ in my short comment.
I've dropped the access_ok check and also fixed another bug in the VM handling. Patches are on their way to the list.
Thanks for the comments, Christian.
-Daniel
From: Christian König christian.koenig@amd.com
This way we test userptr availability at BO creation time instead of first use.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/radeon/radeon_gem.c | 18 +++++++++++++++++- include/uapi/drm/radeon_drm.h | 1 + 2 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 032736b..4506560 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | - RADEON_GEM_USERPTR_ANONONLY)) + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE)) return -EINVAL;
/* readonly pages not tested on older hardware */ @@ -312,6 +312,22 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object;
+ if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { + down_read(¤t->mm->mmap_sem); + r = radeon_bo_reserve(bo, true); + if (r) { + up_read(¤t->mm->mmap_sem); + goto release_object; + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); + r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); + radeon_bo_unreserve(bo); + up_read(¤t->mm->mmap_sem); + if (r) + goto release_object; + } + r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(gobj); diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 9720e1a..5dc61c2 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -817,6 +817,7 @@ struct drm_radeon_gem_create { */ #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) +#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
struct drm_radeon_gem_userptr { uint64_t addr;
From: Christian König christian.koenig@amd.com
Whenever userspace mapping related to our userptr change we wait for it to become idle and unmap it from GTT.
v2: rebased, fix mutex unlock in error path v3: improve commit message
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/radeon.h | 12 ++ drivers/gpu/drm/radeon/radeon_device.c | 2 + drivers/gpu/drm/radeon/radeon_gem.c | 9 +- drivers/gpu/drm/radeon/radeon_mn.c | 272 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_object.c | 1 + include/uapi/drm/radeon_drm.h | 1 + 8 files changed, 298 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/radeon/radeon_mn.c
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9b2eedc..2745284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -115,6 +115,7 @@ config DRM_RADEON select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE + select MMU_NOTIFIER help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0..c7fa1ae 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ - ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o + ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
# add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3c6999e..511191f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -65,6 +65,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/interval_tree.h> +#include <linux/hashtable.h>
#include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -487,6 +488,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid; + + struct radeon_mn *mn; + struct interval_tree_node mn_it; }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1725,6 +1729,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, struct radeon_ring *cpB); void radeon_test_syncing(struct radeon_device *rdev);
+/* + * MMU Notifier + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); +void radeon_mn_unregister(struct radeon_bo *bo);
/* * Debugfs @@ -2372,6 +2381,9 @@ struct radeon_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size; + + struct mutex mn_lock; + DECLARE_HASHTABLE(mn_hash, 7); };
bool radeon_is_px(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050..c58f84f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1270,6 +1270,8 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); + mutex_init(&rdev->mn_lock); + hash_init(rdev->mn_hash); r = radeon_gem_init(rdev); if (r) return r; diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 4506560..2a6fbf1 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | - RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE)) + RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | + RADEON_GEM_USERPTR_REGISTER)) return -EINVAL;
/* readonly pages not tested on older hardware */ @@ -312,6 +313,12 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object;
+ if (args->flags & RADEON_GEM_USERPTR_REGISTER) { + r = radeon_mn_register(bo, args->addr); + if (r) + goto release_object; + } + if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { down_read(¤t->mm->mmap_sem); r = radeon_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 0000000..0157bc2 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -0,0 +1,272 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: + * Christian König christian.koenig@amd.com + */ + +#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/mmu_notifier.h> +#include <drm/drmP.h> +#include <drm/drm.h> + +#include "radeon.h" + +struct radeon_mn { + /* constant after initialisation */ + struct radeon_device *rdev; + struct mm_struct *mm; + struct mmu_notifier mn; + + /* only used on destruction */ + struct work_struct work; + + /* protected by rdev->mn_lock */ + struct hlist_node node; + + /* objects protected by lock */ + struct mutex lock; + struct rb_root objects; +}; + +/** + * radeon_mn_destroy - destroy the rmn + * + * @work: previously sheduled work item + * + * Lazy destroys the notifier from a work item + */ +static void radeon_mn_destroy(struct work_struct *work) +{ + struct radeon_mn *rmn = container_of(work, struct radeon_mn, work); + struct radeon_device *rdev = rmn->rdev; + struct radeon_bo *bo, *next; + + mutex_lock(&rdev->mn_lock); + mutex_lock(&rmn->lock); + hash_del(&rmn->node); + rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) { + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + } + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); + mmu_notifier_unregister(&rmn->mn, rmn->mm); + kfree(rmn); +} + +/** + * radeon_mn_release - callback to notify about mm destruction + * + * @mn: our notifier + * @mn: the mm this callback is about + * + * Shedule a work item to lazy destroy our notifier. + */ +static void radeon_mn_release(struct mmu_notifier *mn, + struct mm_struct *mm) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + INIT_WORK(&rmn->work, radeon_mn_destroy); + schedule_work(&rmn->work); +} + +/** + * radeon_mn_invalidate_range_start - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * We block for all BOs between start and end to be idle and + * unmap them by move them into system domain again. + */ +static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); + struct interval_tree_node *it; + + /* notification is exclusive, but interval is inclusive */ + end -= 1; + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, start, end); + while (it) { + struct radeon_bo *bo; + int r; + + bo = container_of(it, struct radeon_bo, mn_it); + it = interval_tree_iter_next(it, start, end); + + r = radeon_bo_reserve(bo, true); + if (r) { + DRM_ERROR("(%d) failed to reserve user bo\n", r); + continue; + } + + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); + } + + radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); + if (r) + DRM_ERROR("(%d) failed to validate user bo\n", r); + + radeon_bo_unreserve(bo); + } + + mutex_unlock(&rmn->lock); +} + +static const struct mmu_notifier_ops radeon_mn_ops = { + .release = radeon_mn_release, + .invalidate_range_start = radeon_mn_invalidate_range_start, +}; + +/** + * radeon_mn_get - create notifier context + * + * @rdev: radeon device pointer + * + * Creates a notifier context for current->mm. + */ +static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) +{ + struct mm_struct *mm = current->mm; + struct radeon_mn *rmn; + int r; + + down_write(&mm->mmap_sem); + mutex_lock(&rdev->mn_lock); + + hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm) + if (rmn->mm == mm) + goto release_locks; + + rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); + if (!rmn) { + rmn = ERR_PTR(-ENOMEM); + goto release_locks; + } + + rmn->rdev = rdev; + rmn->mm = mm; + rmn->mn.ops = &radeon_mn_ops; + mutex_init(&rmn->lock); + rmn->objects = RB_ROOT; + + r = __mmu_notifier_register(&rmn->mn, mm); + if (r) + goto free_rmn; + + hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm); + +release_locks: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + + return rmn; + +free_rmn: + mutex_unlock(&rdev->mn_lock); + up_write(&mm->mmap_sem); + kfree(rmn); + + return ERR_PTR(r); +} + +/** + * radeon_mn_register - register a BO for notifier updates + * + * @bo: radeon buffer object + * @addr: userptr addr we should monitor + * + * Registers an MMU notifier for the given BO at the specified address. + * Returns 0 on success, -ERRNO if anything goes wrong. + */ +int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) +{ + unsigned long end = addr + radeon_bo_size(bo) - 1; + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + struct interval_tree_node *it; + + rmn = radeon_mn_get(rdev); + if (IS_ERR(rmn)) + return PTR_ERR(rmn); + + mutex_lock(&rmn->lock); + + it = interval_tree_iter_first(&rmn->objects, addr, end); + if (it) { + mutex_unlock(&rmn->lock); + return -EEXIST; + } + + bo->mn = rmn; + bo->mn_it.start = addr; + bo->mn_it.last = end; + interval_tree_insert(&bo->mn_it, &rmn->objects); + + mutex_unlock(&rmn->lock); + + return 0; +} + +/** + * radeon_mn_unregister - unregister a BO for notifier updates + * + * @bo: radeon buffer object + * + * Remove any registration of MMU notifier updates from the buffer object. + */ +void radeon_mn_unregister(struct radeon_bo *bo) +{ + struct radeon_device *rdev = bo->rdev; + struct radeon_mn *rmn; + + mutex_lock(&rdev->mn_lock); + rmn = bo->mn; + if (rmn == NULL) { + mutex_unlock(&rdev->mn_lock); + return; + } + + mutex_lock(&rmn->lock); + interval_tree_remove(&bo->mn_it, &rmn->objects); + bo->mn = NULL; + mutex_unlock(&rmn->lock); + mutex_unlock(&rdev->mn_lock); +} diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c73c1e3..2875238 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); + radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list); diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 5dc61c2..c77495f 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -818,6 +818,7 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) #define RADEON_GEM_USERPTR_VALIDATE (1 << 2) +#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
struct drm_radeon_gem_userptr { uint64_t addr;
On Tue, Aug 05, 2014 at 06:05:31PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Whenever userspace mapping related to our userptr change we wait for it to become idle and unmap it from GTT.
v2: rebased, fix mutex unlock in error path v3: improve commit message
Why in hell do you not register the mmu_notifier on first userptr bo ?
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/radeon.h | 12 ++ drivers/gpu/drm/radeon/radeon_device.c | 2 + drivers/gpu/drm/radeon/radeon_gem.c | 9 +- drivers/gpu/drm/radeon/radeon_mn.c | 272 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_object.c | 1 + include/uapi/drm/radeon_drm.h | 1 + 8 files changed, 298 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/radeon/radeon_mn.c
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9b2eedc..2745284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -115,6 +115,7 @@ config DRM_RADEON select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE
- select MMU_NOTIFIER help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0..c7fa1ae 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
# add async DMA block radeon-y += \ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3c6999e..511191f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -65,6 +65,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/interval_tree.h> +#include <linux/hashtable.h>
#include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -487,6 +488,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid;
- struct radeon_mn *mn;
- struct interval_tree_node mn_it;
}; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1725,6 +1729,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, struct radeon_ring *cpB); void radeon_test_syncing(struct radeon_device *rdev);
+/*
- MMU Notifier
- */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); +void radeon_mn_unregister(struct radeon_bo *bo);
/*
- Debugfs
@@ -2372,6 +2381,9 @@ struct radeon_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size;
- struct mutex mn_lock;
- DECLARE_HASHTABLE(mn_hash, 7);
};
bool radeon_is_px(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050..c58f84f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1270,6 +1270,8 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue);
- mutex_init(&rdev->mn_lock);
- hash_init(rdev->mn_hash); r = radeon_gem_init(rdev); if (r) return r;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 4506560..2a6fbf1 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE))
RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
RADEON_GEM_USERPTR_REGISTER))
return -EINVAL;
/* readonly pages not tested on older hardware */
@@ -312,6 +313,12 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object;
- if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
r = radeon_mn_register(bo, args->addr);
if (r)
goto release_object;
- }
- if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { down_read(¤t->mm->mmap_sem); r = radeon_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 0000000..0157bc2 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -0,0 +1,272 @@ +/*
- Copyright 2014 Advanced Micro Devices, Inc.
- All Rights Reserved.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sub license, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- USE OR OTHER DEALINGS IN THE SOFTWARE.
- The above copyright notice and this permission notice (including the
- next paragraph) shall be included in all copies or substantial portions
- of the Software.
- */
+/*
- Authors:
- Christian König christian.koenig@amd.com
- */
+#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/mmu_notifier.h> +#include <drm/drmP.h> +#include <drm/drm.h>
+#include "radeon.h"
+struct radeon_mn {
- /* constant after initialisation */
- struct radeon_device *rdev;
- struct mm_struct *mm;
- struct mmu_notifier mn;
- /* only used on destruction */
- struct work_struct work;
- /* protected by rdev->mn_lock */
- struct hlist_node node;
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root objects;
+};
+/**
- radeon_mn_destroy - destroy the rmn
- @work: previously sheduled work item
- Lazy destroys the notifier from a work item
- */
+static void radeon_mn_destroy(struct work_struct *work) +{
- struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
- struct radeon_device *rdev = rmn->rdev;
- struct radeon_bo *bo, *next;
- mutex_lock(&rdev->mn_lock);
- mutex_lock(&rmn->lock);
- hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
interval_tree_remove(&bo->mn_it, &rmn->objects);
bo->mn = NULL;
- }
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
- mmu_notifier_unregister(&rmn->mn, rmn->mm);
- kfree(rmn);
+}
+/**
- radeon_mn_release - callback to notify about mm destruction
- @mn: our notifier
- @mn: the mm this callback is about
- Shedule a work item to lazy destroy our notifier.
- */
+static void radeon_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
+{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
- INIT_WORK(&rmn->work, radeon_mn_destroy);
- schedule_work(&rmn->work);
+}
+/**
- radeon_mn_invalidate_range_start - callback to notify about mm change
- @mn: our notifier
- @mn: the mm this callback is about
- @start: start of updated range
- @end: end of updated range
- We block for all BOs between start and end to be idle and
- unmap them by move them into system domain again.
- */
+static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
+{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
- struct interval_tree_node *it;
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
- mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, start, end);
- while (it) {
struct radeon_bo *bo;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
it = interval_tree_iter_next(it, start, end);
r = radeon_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%d) failed to reserve user bo\n", r);
continue;
}
if (bo->tbo.sync_obj) {
r = radeon_fence_wait(bo->tbo.sync_obj, false);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%d) failed to validate user bo\n", r);
radeon_bo_unreserve(bo);
- }
- mutex_unlock(&rmn->lock);
+}
+static const struct mmu_notifier_ops radeon_mn_ops = {
- .release = radeon_mn_release,
- .invalidate_range_start = radeon_mn_invalidate_range_start,
+};
+/**
- radeon_mn_get - create notifier context
- @rdev: radeon device pointer
- Creates a notifier context for current->mm.
- */
+static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) +{
- struct mm_struct *mm = current->mm;
- struct radeon_mn *rmn;
- int r;
- down_write(&mm->mmap_sem);
- mutex_lock(&rdev->mn_lock);
- hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
if (rmn->mm == mm)
goto release_locks;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
rmn = ERR_PTR(-ENOMEM);
goto release_locks;
- }
- rmn->rdev = rdev;
- rmn->mm = mm;
- rmn->mn.ops = &radeon_mn_ops;
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
- r = __mmu_notifier_register(&rmn->mn, mm);
- if (r)
goto free_rmn;
- hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
+release_locks:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
- return rmn;
+free_rmn:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
- kfree(rmn);
- return ERR_PTR(r);
+}
+/**
- radeon_mn_register - register a BO for notifier updates
- @bo: radeon buffer object
- @addr: userptr addr we should monitor
- Registers an MMU notifier for the given BO at the specified address.
- Returns 0 on success, -ERRNO if anything goes wrong.
- */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) +{
- unsigned long end = addr + radeon_bo_size(bo) - 1;
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- struct interval_tree_node *it;
- rmn = radeon_mn_get(rdev);
- if (IS_ERR(rmn))
return PTR_ERR(rmn);
- mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, addr, end);
- if (it) {
mutex_unlock(&rmn->lock);
return -EEXIST;
- }
- bo->mn = rmn;
- bo->mn_it.start = addr;
- bo->mn_it.last = end;
- interval_tree_insert(&bo->mn_it, &rmn->objects);
- mutex_unlock(&rmn->lock);
- return 0;
+}
+/**
- radeon_mn_unregister - unregister a BO for notifier updates
- @bo: radeon buffer object
- Remove any registration of MMU notifier updates from the buffer object.
- */
+void radeon_mn_unregister(struct radeon_bo *bo) +{
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- mutex_lock(&rdev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
mutex_unlock(&rdev->mn_lock);
return;
- }
- mutex_lock(&rmn->lock);
- interval_tree_remove(&bo->mn_it, &rmn->objects);
- bo->mn = NULL;
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
+} diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c73c1e3..2875238 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list);
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 5dc61c2..c77495f 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -818,6 +818,7 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) #define RADEON_GEM_USERPTR_VALIDATE (1 << 2) +#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Am 06.08.2014 um 17:16 schrieb Jerome Glisse:
On Tue, Aug 05, 2014 at 06:05:31PM +0200, Christian König wrote:
From: Christian König christian.koenig@amd.com
Whenever userspace mapping related to our userptr change we wait for it to become idle and unmap it from GTT.
v2: rebased, fix mutex unlock in error path v3: improve commit message
Why in hell do you not register the mmu_notifier on first userptr bo ?
The MMU notifier is registered on the first userptr bo that uses it.
Using the notifier is only optional for readonly BOs because it has quite an overhead and most uses of userptr are snapshot like uses anyway. E.g. buffer uploads that just needs the data from that user address at a specific point in time and are not interested in further updates are created without registering the notifier.
Christian.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/radeon/Makefile | 2 +- drivers/gpu/drm/radeon/radeon.h | 12 ++ drivers/gpu/drm/radeon/radeon_device.c | 2 + drivers/gpu/drm/radeon/radeon_gem.c | 9 +- drivers/gpu/drm/radeon/radeon_mn.c | 272 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/radeon/radeon_object.c | 1 + include/uapi/drm/radeon_drm.h | 1 + 8 files changed, 298 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/radeon/radeon_mn.c
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9b2eedc..2745284 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -115,6 +115,7 @@ config DRM_RADEON select HWMON select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE
- select MMU_NOTIFIER help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0..c7fa1ae 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
- ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o
# add async DMA block radeon-y += \
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3c6999e..511191f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -65,6 +65,7 @@ #include <linux/list.h> #include <linux/kref.h> #include <linux/interval_tree.h> +#include <linux/hashtable.h>
#include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> @@ -487,6 +488,9 @@ struct radeon_bo {
struct ttm_bo_kmap_obj dma_buf_vmap; pid_t pid;
- struct radeon_mn *mn;
- struct interval_tree_node mn_it; }; #define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -1725,6 +1729,11 @@ void radeon_test_ring_sync(struct radeon_device *rdev, struct radeon_ring *cpB); void radeon_test_syncing(struct radeon_device *rdev);
+/*
- MMU Notifier
- */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); +void radeon_mn_unregister(struct radeon_bo *bo);
/*
- Debugfs
@@ -2372,6 +2381,9 @@ struct radeon_device { /* tracking pinned memory */ u64 vram_pin_size; u64 gart_pin_size;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7); };
bool radeon_is_px(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050..c58f84f 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1270,6 +1270,8 @@ int radeon_device_init(struct radeon_device *rdev, init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue);
- mutex_init(&rdev->mn_lock);
- hash_init(rdev->mn_hash); r = radeon_gem_init(rdev); if (r) return r;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 4506560..2a6fbf1 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -291,7 +291,8 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
/* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE))
RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
RADEON_GEM_USERPTR_REGISTER))
return -EINVAL;
/* readonly pages not tested on older hardware */
@@ -312,6 +313,12 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object;
- if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
r = radeon_mn_register(bo, args->addr);
if (r)
goto release_object;
- }
- if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { down_read(¤t->mm->mmap_sem); r = radeon_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c new file mode 100644 index 0000000..0157bc2 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -0,0 +1,272 @@ +/*
- Copyright 2014 Advanced Micro Devices, Inc.
- All Rights Reserved.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sub license, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- USE OR OTHER DEALINGS IN THE SOFTWARE.
- The above copyright notice and this permission notice (including the
- next paragraph) shall be included in all copies or substantial portions
- of the Software.
- */
+/*
- Authors:
- Christian König christian.koenig@amd.com
- */
+#include <linux/firmware.h> +#include <linux/module.h> +#include <linux/mmu_notifier.h> +#include <drm/drmP.h> +#include <drm/drm.h>
+#include "radeon.h"
+struct radeon_mn {
- /* constant after initialisation */
- struct radeon_device *rdev;
- struct mm_struct *mm;
- struct mmu_notifier mn;
- /* only used on destruction */
- struct work_struct work;
- /* protected by rdev->mn_lock */
- struct hlist_node node;
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root objects;
+};
+/**
- radeon_mn_destroy - destroy the rmn
- @work: previously sheduled work item
- Lazy destroys the notifier from a work item
- */
+static void radeon_mn_destroy(struct work_struct *work) +{
- struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
- struct radeon_device *rdev = rmn->rdev;
- struct radeon_bo *bo, *next;
- mutex_lock(&rdev->mn_lock);
- mutex_lock(&rmn->lock);
- hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
interval_tree_remove(&bo->mn_it, &rmn->objects);
bo->mn = NULL;
- }
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
- mmu_notifier_unregister(&rmn->mn, rmn->mm);
- kfree(rmn);
+}
+/**
- radeon_mn_release - callback to notify about mm destruction
- @mn: our notifier
- @mn: the mm this callback is about
- Shedule a work item to lazy destroy our notifier.
- */
+static void radeon_mn_release(struct mmu_notifier *mn,
struct mm_struct *mm)
+{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
- INIT_WORK(&rmn->work, radeon_mn_destroy);
- schedule_work(&rmn->work);
+}
+/**
- radeon_mn_invalidate_range_start - callback to notify about mm change
- @mn: our notifier
- @mn: the mm this callback is about
- @start: start of updated range
- @end: end of updated range
- We block for all BOs between start and end to be idle and
- unmap them by move them into system domain again.
- */
+static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
+{
- struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
- struct interval_tree_node *it;
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
- mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, start, end);
- while (it) {
struct radeon_bo *bo;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
it = interval_tree_iter_next(it, start, end);
r = radeon_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%d) failed to reserve user bo\n", r);
continue;
}
if (bo->tbo.sync_obj) {
r = radeon_fence_wait(bo->tbo.sync_obj, false);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%d) failed to validate user bo\n", r);
radeon_bo_unreserve(bo);
- }
- mutex_unlock(&rmn->lock);
+}
+static const struct mmu_notifier_ops radeon_mn_ops = {
- .release = radeon_mn_release,
- .invalidate_range_start = radeon_mn_invalidate_range_start,
+};
+/**
- radeon_mn_get - create notifier context
- @rdev: radeon device pointer
- Creates a notifier context for current->mm.
- */
+static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev) +{
- struct mm_struct *mm = current->mm;
- struct radeon_mn *rmn;
- int r;
- down_write(&mm->mmap_sem);
- mutex_lock(&rdev->mn_lock);
- hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
if (rmn->mm == mm)
goto release_locks;
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
rmn = ERR_PTR(-ENOMEM);
goto release_locks;
- }
- rmn->rdev = rdev;
- rmn->mm = mm;
- rmn->mn.ops = &radeon_mn_ops;
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
- r = __mmu_notifier_register(&rmn->mn, mm);
- if (r)
goto free_rmn;
- hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
+release_locks:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
- return rmn;
+free_rmn:
- mutex_unlock(&rdev->mn_lock);
- up_write(&mm->mmap_sem);
- kfree(rmn);
- return ERR_PTR(r);
+}
+/**
- radeon_mn_register - register a BO for notifier updates
- @bo: radeon buffer object
- @addr: userptr addr we should monitor
- Registers an MMU notifier for the given BO at the specified address.
- Returns 0 on success, -ERRNO if anything goes wrong.
- */
+int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) +{
- unsigned long end = addr + radeon_bo_size(bo) - 1;
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- struct interval_tree_node *it;
- rmn = radeon_mn_get(rdev);
- if (IS_ERR(rmn))
return PTR_ERR(rmn);
- mutex_lock(&rmn->lock);
- it = interval_tree_iter_first(&rmn->objects, addr, end);
- if (it) {
mutex_unlock(&rmn->lock);
return -EEXIST;
- }
- bo->mn = rmn;
- bo->mn_it.start = addr;
- bo->mn_it.last = end;
- interval_tree_insert(&bo->mn_it, &rmn->objects);
- mutex_unlock(&rmn->lock);
- return 0;
+}
+/**
- radeon_mn_unregister - unregister a BO for notifier updates
- @bo: radeon buffer object
- Remove any registration of MMU notifier updates from the buffer object.
- */
+void radeon_mn_unregister(struct radeon_bo *bo) +{
- struct radeon_device *rdev = bo->rdev;
- struct radeon_mn *rmn;
- mutex_lock(&rdev->mn_lock);
- rmn = bo->mn;
- if (rmn == NULL) {
mutex_unlock(&rdev->mn_lock);
return;
- }
- mutex_lock(&rmn->lock);
- interval_tree_remove(&bo->mn_it, &rmn->objects);
- bo->mn = NULL;
- mutex_unlock(&rmn->lock);
- mutex_unlock(&rdev->mn_lock);
+} diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index c73c1e3..2875238 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -75,6 +75,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) bo = container_of(tbo, struct radeon_bo, tbo);
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
radeon_mn_unregister(bo);
mutex_lock(&bo->rdev->gem.mutex); list_del_init(&bo->list);
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h index 5dc61c2..c77495f 100644 --- a/include/uapi/drm/radeon_drm.h +++ b/include/uapi/drm/radeon_drm.h @@ -818,6 +818,7 @@ struct drm_radeon_gem_create { #define RADEON_GEM_USERPTR_READONLY (1 << 0) #define RADEON_GEM_USERPTR_ANONONLY (1 << 1) #define RADEON_GEM_USERPTR_VALIDATE (1 << 2) +#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
struct drm_radeon_gem_userptr { uint64_t addr; -- 1.9.1
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
From: Christian König christian.koenig@amd.com
It needs to be anonymous memory (no file mappings) and we are requried to install an MMU notifier.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/gpu/drm/radeon/radeon_gem.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 2a6fbf1..01b5894 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -285,19 +285,24 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, if (offset_in_page(args->addr | args->size)) return -EINVAL;
- /* we only support read only mappings for now */ - if (!(args->flags & RADEON_GEM_USERPTR_READONLY)) - return -EACCES; - /* reject unknown flag values */ if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | RADEON_GEM_USERPTR_REGISTER)) return -EINVAL;
- /* readonly pages not tested on older hardware */ - if (rdev->family < CHIP_R600) - return -EINVAL; + if (args->flags & RADEON_GEM_USERPTR_READONLY) { + /* readonly pages not tested on older hardware */ + if (rdev->family < CHIP_R600) + return -EINVAL; + + } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || + !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { + + /* if we want to write to it we must require anonymous + memory and install a MMU notifier */ + return -EACCES; + }
down_read(&rdev->exclusive_lock);
dri-devel@lists.freedesktop.org