Okay I've listened and people said this should use rcu, so I've tried to work out what that looks like, and I present my first pass at using rcu for sync_file. I'm pretty sure I've probably missed some fundamental things here.
As to Chris's reserveration_object questions, yes it does the rcu stuff and I suppose it does the exclusive fence (with 0 shared fences), so we'd want to make sync_file wrap reservation objects, not semaphores, since the main reason I'm at sync files in the first place is the file part for sharing them between processes.
Dave.
From: Dave Airlie airlied@redhat.com
This isn't needed currently, but to reuse sync file for Vulkan permanent shared semaphore semantics, we need to be able to swap the fence backing a sync file. This patch adds a mutex to the sync file and uses to protect accesses to the fence and cb members.
v1.1: fix the locking (Julia Lawall). v2: use rcu try one
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/dma-buf/sync_file.c | 82 +++++++++++++++++++++++++++++++++++---------- include/linux/sync_file.h | 5 ++- 2 files changed, 69 insertions(+), 18 deletions(-)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 2321035..8b34f21 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -28,6 +28,10 @@
static const struct file_operations sync_file_fops;
+#define sync_file_held(obj) lockdep_is_held(&(obj)->lock) +#define sync_file_assert_held(obj) \ + lockdep_assert_held(&(obj)->lock) + static struct sync_file *sync_file_alloc(void) { struct sync_file *sync_file; @@ -47,6 +51,9 @@ static struct sync_file *sync_file_alloc(void)
INIT_LIST_HEAD(&sync_file->cb.node);
+ RCU_INIT_POINTER(sync_file->fence, NULL); + + mutex_init(&sync_file->lock); return sync_file;
err: @@ -80,7 +87,9 @@ struct sync_file *sync_file_create(struct dma_fence *fence) if (!sync_file) return NULL;
- sync_file->fence = dma_fence_get(fence); + dma_fence_get(fence); + + RCU_INIT_POINTER(sync_file->fence, fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d", fence->ops->get_driver_name(fence), @@ -124,13 +133,26 @@ struct dma_fence *sync_file_get_fence(int fd) if (!sync_file) return NULL;
- fence = dma_fence_get(sync_file->fence); + if (!rcu_access_pointer(sync_file->fence)) + return NULL; + + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&sync_file->fence); + rcu_read_unlock(); + fput(sync_file->file);
return fence; } EXPORT_SYMBOL(sync_file_get_fence);
+static inline struct dma_fence * +sync_file_get_fence_locked(struct sync_file *sync_file) +{ + return rcu_dereference_protected(sync_file->fence, + sync_file_held(sync_file)); +} + static int sync_file_set_fence(struct sync_file *sync_file, struct dma_fence **fences, int num_fences) { @@ -143,7 +165,7 @@ static int sync_file_set_fence(struct sync_file *sync_file, * we own the reference of the dma_fence_array creation. */ if (num_fences == 1) { - sync_file->fence = fences[0]; + RCU_INIT_POINTER(sync_file->fence, fences[0]); kfree(fences); } else { array = dma_fence_array_create(num_fences, fences, @@ -152,17 +174,20 @@ static int sync_file_set_fence(struct sync_file *sync_file, if (!array) return -ENOMEM;
- sync_file->fence = &array->base; + RCU_INIT_POINTER(sync_file->fence, &array->base); }
return 0; }
+/* must be called with sync_file lock taken */ static struct dma_fence **get_fences(struct sync_file *sync_file, int *num_fences) { - if (dma_fence_is_array(sync_file->fence)) { - struct dma_fence_array *array = to_dma_fence_array(sync_file->fence); + struct dma_fence *fence = sync_file_get_fence_locked(sync_file); + + if (dma_fence_is_array(fence)) { + struct dma_fence_array *array = to_dma_fence_array(fence);
*num_fences = array->num_fences; return array->fences; @@ -204,10 +229,13 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, if (!sync_file) return NULL;
+ mutex_lock(&a->lock); + mutex_lock(&b->lock); a_fences = get_fences(a, &a_num_fences); b_fences = get_fences(b, &b_num_fences); - if (a_num_fences > INT_MAX - b_num_fences) - return NULL; + if (a_num_fences > INT_MAX - b_num_fences) { + goto unlock; + }
num_fences = a_num_fences + b_num_fences;
@@ -268,11 +296,17 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, goto err; }
+ mutex_unlock(&b->lock); + mutex_unlock(&a->lock); + strlcpy(sync_file->name, name, sizeof(sync_file->name)); return sync_file;
err: fput(sync_file->file); +unlock: + mutex_unlock(&b->lock); + mutex_unlock(&a->lock); return NULL;
} @@ -281,10 +315,15 @@ static void sync_file_free(struct kref *kref) { struct sync_file *sync_file = container_of(kref, struct sync_file, kref); + struct dma_fence *fence; + + fence = rcu_dereference_protected(sync_file->fence, 1); + if (fence) { + if (test_bit(POLL_ENABLED, &fence->flags)) + dma_fence_remove_callback(fence, &sync_file->cb); + dma_fence_put(fence); + }
- if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) - dma_fence_remove_callback(sync_file->fence, &sync_file->cb); - dma_fence_put(sync_file->fence); kfree(sync_file); }
@@ -299,16 +338,20 @@ static int sync_file_release(struct inode *inode, struct file *file) static unsigned int sync_file_poll(struct file *file, poll_table *wait) { struct sync_file *sync_file = file->private_data; + unsigned int ret_val;
poll_wait(file, &sync_file->wq, wait);
+ mutex_lock(&sync_file->lock); if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, fence_check_cb_func) < 0) wake_up_all(&sync_file->wq); } + ret_val = dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; + mutex_unlock(&sync_file->lock);
- return dma_fence_is_signaled(sync_file->fence) ? POLLIN : 0; + return ret_val; }
static long sync_file_ioctl_merge(struct sync_file *sync_file, @@ -393,6 +436,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (info.flags || info.pad) return -EINVAL;
+ mutex_lock(&sync_file->lock); fences = get_fences(sync_file, &num_fences);
/* @@ -404,13 +448,17 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, if (!info.num_fences) goto no_fences;
- if (info.num_fences < num_fences) - return -EINVAL; + if (info.num_fences < num_fences) { + ret = -EINVAL; + goto out; + }
size = num_fences * sizeof(*fence_info); fence_info = kzalloc(size, GFP_KERNEL); - if (!fence_info) - return -ENOMEM; + if (!fence_info) { + ret = -ENOMEM; + goto out; + }
for (i = 0; i < num_fences; i++) sync_fill_fence_info(fences[i], &fence_info[i]); @@ -433,7 +481,7 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
out: kfree(fence_info); - + mutex_unlock(&sync_file->lock); return ret; }
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 3e3ab84..006412f 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -30,6 +30,7 @@ * @wq: wait queue for fence signaling * @fence: fence with the fences in the sync_file * @cb: fence callback information + * @lock: mutex to protect fence/cb - used for semaphores */ struct sync_file { struct file *file; @@ -41,8 +42,10 @@ struct sync_file {
wait_queue_head_t wq;
- struct dma_fence *fence; + struct dma_fence __rcu *fence; struct dma_fence_cb cb; + /* protects the fence pointer and cb */ + struct mutex lock; };
#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
From: Dave Airlie airlied@redhat.com
Using sync_file to back vulkan semaphores means need to replace the fence underlying the sync file. This replace function removes the callback, swaps the fence, and returns the old one. This also exports the alloc and fdget functionality for the semaphore wrapper code.
v2: use rcu.
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/dma-buf/sync_file.c | 42 ++++++++++++++++++++++++++++++++++++++++-- include/linux/sync_file.h | 5 ++++- 2 files changed, 44 insertions(+), 3 deletions(-)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 8b34f21..9fcc0a4 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -32,7 +32,14 @@ static const struct file_operations sync_file_fops; #define sync_file_assert_held(obj) \ lockdep_assert_held(&(obj)->lock)
-static struct sync_file *sync_file_alloc(void) +/** + * sync_file_alloc() - allocate an unfenced sync file + * + * Creates a sync_file. + * The sync_file can be released with fput(sync_file->file). + * Returns the sync_file or NULL in case of error. + */ +struct sync_file *sync_file_alloc(void) { struct sync_file *sync_file;
@@ -60,6 +67,7 @@ static struct sync_file *sync_file_alloc(void) kfree(sync_file); return NULL; } +EXPORT_SYMBOL(sync_file_alloc);
static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) { @@ -100,7 +108,7 @@ struct sync_file *sync_file_create(struct dma_fence *fence) } EXPORT_SYMBOL(sync_file_create);
-static struct sync_file *sync_file_fdget(int fd) +struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd);
@@ -116,6 +124,7 @@ static struct sync_file *sync_file_fdget(int fd) fput(file); return NULL; } +EXPORT_SYMBOL(sync_file_fdget);
/** * sync_file_get_fence - get the fence related to the sync_file fd @@ -153,6 +162,35 @@ sync_file_get_fence_locked(struct sync_file *sync_file) sync_file_held(sync_file)); }
+/** + * sync_file_replace_fence - replace the fence related to the sync_file + * @sync_file: sync file to replace fence in + * @fence: fence to replace with (or NULL for no fence). + * Returns previous fence. + */ +struct dma_fence *sync_file_replace_fence(struct sync_file *sync_file, + struct dma_fence *fence) +{ + struct dma_fence *ret_fence = NULL; + + if (fence) + dma_fence_get(fence); + + mutex_lock(&sync_file->lock); + + ret_fence = sync_file_get_fence_locked(sync_file); + if (ret_fence) { + if (test_bit(POLL_ENABLED, &ret_fence->flags)) + dma_fence_remove_callback(ret_fence, &sync_file->cb); + } + + RCU_INIT_POINTER(sync_file->fence, fence); + + mutex_unlock(&sync_file->lock); + return ret_fence; +} +EXPORT_SYMBOL(sync_file_replace_fence); + static int sync_file_set_fence(struct sync_file *sync_file, struct dma_fence **fences, int num_fences) { diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h index 006412f..555ae99 100644 --- a/include/linux/sync_file.h +++ b/include/linux/sync_file.h @@ -50,7 +50,10 @@ struct sync_file {
#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS
+struct sync_file *sync_file_alloc(void); struct sync_file *sync_file_create(struct dma_fence *fence); struct dma_fence *sync_file_get_fence(int fd); - +struct sync_file *sync_file_fdget(int fd); +struct dma_fence *sync_file_replace_fence(struct sync_file *sync_file, + struct dma_fence *fence); #endif /* _LINUX_SYNC_H */
From: Dave Airlie airlied@redhat.com
This just splits out the fence depenency checking into it's own function to make it easier to add semaphore dependencies.
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 86 +++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 99424cb..4671432 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -963,56 +963,66 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return 0; }
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev, - struct amdgpu_cs_parser *p) +static int amdgpu_process_fence_dep(struct amdgpu_device *adev, + struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - int i, j, r; - - for (i = 0; i < p->nchunks; ++i) { - struct drm_amdgpu_cs_chunk_dep *deps; - struct amdgpu_cs_chunk *chunk; - unsigned num_deps; + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_dep *deps;
- chunk = &p->chunks[i]; + deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_dep);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) - continue; + for (i = 0; i < num_deps; ++i) { + struct amdgpu_ring *ring; + struct amdgpu_ctx *ctx; + struct dma_fence *fence;
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; - num_deps = chunk->length_dw * 4 / - sizeof(struct drm_amdgpu_cs_chunk_dep); + r = amdgpu_cs_get_ring(adev, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &ring); + if (r) + return r;
- for (j = 0; j < num_deps; ++j) { - struct amdgpu_ring *ring; - struct amdgpu_ctx *ctx; - struct dma_fence *fence; + ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); + if (ctx == NULL) + return -EINVAL;
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type, - deps[j].ip_instance, - deps[j].ring, &ring); + fence = amdgpu_ctx_get_fence(ctx, ring, + deps[i].handle); + if (IS_ERR(fence)) { + r = PTR_ERR(fence); + amdgpu_ctx_put(ctx); + return r; + } else if (fence) { + r = amdgpu_sync_fence(adev, &p->job->sync, + fence); + dma_fence_put(fence); + amdgpu_ctx_put(ctx); if (r) return r; + } + } + return 0; +}
- ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); - if (ctx == NULL) - return -EINVAL; +static int amdgpu_cs_dependencies(struct amdgpu_device *adev, + struct amdgpu_cs_parser *p) +{ + int i, r;
- fence = amdgpu_ctx_get_fence(ctx, ring, - deps[j].handle); - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); - return r; + for (i = 0; i < p->nchunks; ++i) { + struct amdgpu_cs_chunk *chunk;
- } else if (fence) { - r = amdgpu_sync_fence(adev, &p->job->sync, - fence); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } + chunk = &p->chunks[i]; + + if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { + r = amdgpu_process_fence_dep(adev, p, chunk); + if (r) + return r; } }
From: Dave Airlie airlied@redhat.com
This creates a new interface for amdgpu with ioctls to create/destroy/import and export shared semaphores using sem object backed by the sync_file code. The semaphores are not installed as fd (except for export), but rather like other driver internal objects in an idr. The idr holds the initial reference on the sync file.
The command submission interface is enhanced with two new chunks, one for semaphore waiting, one for semaphore signalling and just takes a list of handles for each.
This is based on work originally done by David Zhou at AMD, with input from Christian Konig on what things should look like.
NOTE: this interface addition needs a version bump to expose it to userspace.
v1.1: keep file reference on import.
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 12 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 70 ++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 8 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c | 203 ++++++++++++++++++++++++++++++++ include/uapi/drm/amdgpu_drm.h | 28 +++++ 6 files changed, 321 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2814aad..404bcba 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -24,7 +24,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ - amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o + amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_sem.o
# add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c1b9135..4ed8811 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -702,6 +702,8 @@ struct amdgpu_fpriv { struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; + spinlock_t sem_handles_lock; + struct idr sem_handles; };
/* @@ -1814,5 +1816,15 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo); int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+int amdgpu_sem_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); +void amdgpu_sem_destroy(struct amdgpu_fpriv *fpriv, u32 handle); +int amdgpu_sem_lookup_and_signal(struct amdgpu_fpriv *fpriv, + uint32_t handle, + struct dma_fence *fence); +int amdgpu_sem_lookup_and_sync(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + struct amdgpu_sync *sync, + uint32_t handle); #include "amdgpu_object.h" #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4671432..80fc94b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -217,6 +217,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break;
case AMDGPU_CHUNK_ID_DEPENDENCIES: + case AMDGPU_CHUNK_ID_SEM_WAIT: + case AMDGPU_CHUNK_ID_SEM_SIGNAL: break;
default: @@ -1009,6 +1011,28 @@ static int amdgpu_process_fence_dep(struct amdgpu_device *adev, return 0; }
+static int amdgpu_process_sem_wait_dep(struct amdgpu_device *adev, + struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_sem *deps; + + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_sem); + + for (i = 0; i < num_deps; ++i) { + r = amdgpu_sem_lookup_and_sync(adev, fpriv, &p->job->sync, + deps[i].handle); + if (r) + return r; + } + return 0; +} + static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { @@ -1023,12 +1047,56 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, r = amdgpu_process_fence_dep(adev, p, chunk); if (r) return r; + } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SEM_WAIT) { + r = amdgpu_process_sem_wait_dep(adev, p, chunk); + if (r) + return r; } }
return 0; }
+static int amdgpu_process_sem_signal_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk, + struct dma_fence *fence) +{ + struct amdgpu_fpriv *fpriv = p->filp->driver_priv; + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_sem *deps; + + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_sem); + + for (i = 0; i < num_deps; ++i) { + r = amdgpu_sem_lookup_and_signal(fpriv, deps[i].handle, + fence); + if (r) + return r; + } + return 0; +} + +static int amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) +{ + int i, r; + + for (i = 0; i < p->nchunks; ++i) { + struct amdgpu_cs_chunk *chunk; + + chunk = &p->chunks[i]; + + if (chunk->chunk_id == AMDGPU_CHUNK_ID_SEM_SIGNAL) { + r = amdgpu_process_sem_signal_dep(p, chunk, p->fence); + if (r) + return r; + } + } + return 0; +} + static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { @@ -1056,7 +1124,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base);
- return 0; + return amdgpu_cs_post_dependencies(p); }
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 61d94c7..013aed1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -664,6 +664,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) mutex_init(&fpriv->bo_list_lock); idr_init(&fpriv->bo_list_handles);
+ spin_lock_init(&fpriv->sem_handles_lock); + idr_init(&fpriv->sem_handles); amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
file_priv->driver_priv = fpriv; @@ -689,6 +691,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, struct amdgpu_device *adev = dev->dev_private; struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_bo_list *list; + struct amdgpu_sem *sem; int handle;
if (!fpriv) @@ -715,6 +718,10 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, idr_destroy(&fpriv->bo_list_handles); mutex_destroy(&fpriv->bo_list_lock);
+ idr_for_each_entry(&fpriv->sem_handles, sem, handle) + amdgpu_sem_destroy(fpriv, handle); + idr_destroy(&fpriv->sem_handles); + kfree(fpriv); file_priv->driver_priv = NULL;
@@ -896,6 +903,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(AMDGPU_SEM, amdgpu_sem_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c new file mode 100644 index 0000000..94a637f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sem.c @@ -0,0 +1,203 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Chunming Zhou david1.zhou@amd.com + */ +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/seq_file.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/anon_inodes.h> +#include <linux/sync_file.h> +#include "amdgpu.h" +#include <drm/drmP.h> + +static inline struct sync_file *amdgpu_sync_file_lookup(struct amdgpu_fpriv *fpriv, u32 handle) +{ + struct sync_file *sync_file; + + spin_lock(&fpriv->sem_handles_lock); + + /* Check if we currently have a reference on the object */ + sync_file = idr_find(&fpriv->sem_handles, handle); + + spin_unlock(&fpriv->sem_handles_lock); + + return sync_file; +} + +static int amdgpu_sem_create(struct amdgpu_fpriv *fpriv, u32 *handle) +{ + struct sync_file *sync_file = sync_file_alloc(); + int ret; + + if (!sync_file) + return -ENOMEM; + + snprintf(sync_file->name, sizeof(sync_file->name), "sync_sem"); + + /* we get a file reference and we use that in the idr. */ + idr_preload(GFP_KERNEL); + spin_lock(&fpriv->sem_handles_lock); + + ret = idr_alloc(&fpriv->sem_handles, sync_file, 1, 0, GFP_NOWAIT); + + spin_unlock(&fpriv->sem_handles_lock); + idr_preload_end(); + + if (ret < 0) + return ret; + + *handle = ret; + return 0; +} + +void amdgpu_sem_destroy(struct amdgpu_fpriv *fpriv, u32 handle) +{ + struct sync_file *sync_file = amdgpu_sync_file_lookup(fpriv, handle); + if (!sync_file) + return; + + spin_lock(&fpriv->sem_handles_lock); + idr_remove(&fpriv->sem_handles, handle); + spin_unlock(&fpriv->sem_handles_lock); + + fput(sync_file->file); +} + + +int amdgpu_sem_lookup_and_signal(struct amdgpu_fpriv *fpriv, + uint32_t handle, + struct dma_fence *fence) +{ + struct sync_file *sync_file; + struct dma_fence *old_fence; + sync_file = amdgpu_sync_file_lookup(fpriv, handle); + if (!sync_file) + return -EINVAL; + + old_fence = sync_file_replace_fence(sync_file, fence); + dma_fence_put(old_fence); + return 0; +} + +static int amdgpu_sem_import(struct amdgpu_fpriv *fpriv, + int fd, u32 *handle) +{ + struct sync_file *sync_file = sync_file_fdget(fd); + int ret; + + if (!sync_file) + return -EINVAL; + + idr_preload(GFP_KERNEL); + spin_lock(&fpriv->sem_handles_lock); + + ret = idr_alloc(&fpriv->sem_handles, sync_file, 1, 0, GFP_NOWAIT); + + spin_unlock(&fpriv->sem_handles_lock); + idr_preload_end(); + + if (ret < 0) + goto err_out; + + *handle = ret; + return 0; +err_out: + return ret; + +} + +static int amdgpu_sem_export(struct amdgpu_fpriv *fpriv, + u32 handle, int *fd) +{ + struct sync_file *sync_file; + int ret; + + sync_file = amdgpu_sync_file_lookup(fpriv, handle); + if (!sync_file) + return -EINVAL; + + ret = get_unused_fd_flags(O_CLOEXEC); + if (ret < 0) + goto err_put_file; + + fd_install(ret, sync_file->file); + + *fd = ret; + return 0; +err_put_file: + return ret; +} + +int amdgpu_sem_lookup_and_sync(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, + struct amdgpu_sync *sync, + uint32_t handle) +{ + int r; + struct sync_file *sync_file; + struct dma_fence *fence; + + sync_file = amdgpu_sync_file_lookup(fpriv, handle); + if (!sync_file) + return -EINVAL; + + fence = sync_file_replace_fence(sync_file, NULL); + r = amdgpu_sync_fence(adev, sync, fence); + dma_fence_put(fence); + + return r; +} + +int amdgpu_sem_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp) +{ + union drm_amdgpu_sem *args = data; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + int r = 0; + + switch (args->in.op) { + case AMDGPU_SEM_OP_CREATE_SEM: + r = amdgpu_sem_create(fpriv, &args->out.handle); + break; + case AMDGPU_SEM_OP_IMPORT_SEM: + r = amdgpu_sem_import(fpriv, args->in.handle, &args->out.handle); + break; + case AMDGPU_SEM_OP_EXPORT_SEM: + r = amdgpu_sem_export(fpriv, args->in.handle, &args->out.fd); + break; + case AMDGPU_SEM_OP_DESTROY_SEM: + amdgpu_sem_destroy(fpriv, args->in.handle); + break; + default: + r = -EINVAL; + break; + } + + return r; +} diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 5797283..646b103 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -51,6 +51,7 @@ extern "C" { #define DRM_AMDGPU_GEM_OP 0x10 #define DRM_AMDGPU_GEM_USERPTR 0x11 #define DRM_AMDGPU_WAIT_FENCES 0x12 +#define DRM_AMDGPU_SEM 0x13
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) @@ -65,6 +66,7 @@ extern "C" { #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) +#define DRM_IOCTL_AMDGPU_SEM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_SEM, union drm_amdgpu_sem)
#define AMDGPU_GEM_DOMAIN_CPU 0x1 #define AMDGPU_GEM_DOMAIN_GTT 0x2 @@ -335,6 +337,26 @@ union drm_amdgpu_wait_fences { struct drm_amdgpu_wait_fences_out out; };
+#define AMDGPU_SEM_OP_CREATE_SEM 0 +#define AMDGPU_SEM_OP_IMPORT_SEM 1 +#define AMDGPU_SEM_OP_EXPORT_SEM 2 +#define AMDGPU_SEM_OP_DESTROY_SEM 3 + +struct drm_amdgpu_sem_in { + __u32 op; + __u32 handle; +}; + +struct drm_amdgpu_sem_out { + __u32 fd; + __u32 handle; +}; + +union drm_amdgpu_sem { + struct drm_amdgpu_sem_in in; + struct drm_amdgpu_sem_out out; +}; + #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 #define AMDGPU_GEM_OP_SET_PLACEMENT 1
@@ -390,6 +412,8 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 +#define AMDGPU_CHUNK_ID_SEM_WAIT 0x04 +#define AMDGPU_CHUNK_ID_SEM_SIGNAL 0x05
struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -454,6 +478,10 @@ struct drm_amdgpu_cs_chunk_fence { __u32 offset; };
+struct drm_amdgpu_cs_chunk_sem { + __u32 handle; +}; + struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data;
dri-devel@lists.freedesktop.org