I've fixed up a few things in here from Daniel's comments, Daniel I didn't change things exactly as suggested but I removed fd_flags from API completely.
Christian, I think I got the post deps hook in the right place now in the amdgpu patch.
Would be nice if someone can validate the timeout stuff in the second patch makes sense, I've decided to stick to an absolute timeout.
I will probably at least merge the first 3 into drm-next, and we can work out what to do with the two amdgpu ones.
Dave.
From: Dave Airlie airlied@redhat.com
Sync objects are new toplevel drm object, that contain a pointer to a fence. This fence can be updated via command submission ioctls via drivers.
There is also a generic wait obj API modelled on the vulkan wait API (with code modelled on some amdgpu code).
These objects can be converted to an opaque fd that can be passes between processes.
v2: rename reference/unreference to put/get (Chris) fix leaked reference (David Zhou) drop mutex in favour of cmpxchg (Chris) v3: cleanups from danvet, rebase on drm_fops rename check fd_flags is 0 in ioctls.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com --- Documentation/gpu/drm-internals.rst | 3 + Documentation/gpu/drm-mm.rst | 12 ++ drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_file.c | 8 + drivers/gpu/drm/drm_internal.h | 13 ++ drivers/gpu/drm/drm_ioctl.c | 12 ++ drivers/gpu/drm/drm_syncobj.c | 377 ++++++++++++++++++++++++++++++++++++ include/drm/drmP.h | 1 - include/drm/drm_drv.h | 1 + include/drm/drm_file.h | 5 + include/drm/drm_syncobj.h | 87 +++++++++ include/uapi/drm/drm.h | 24 +++ 12 files changed, 543 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/drm_syncobj.c create mode 100644 include/drm/drm_syncobj.h
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst index babfb61..2b23d78 100644 --- a/Documentation/gpu/drm-internals.rst +++ b/Documentation/gpu/drm-internals.rst @@ -98,6 +98,9 @@ DRIVER_ATOMIC implement appropriate obj->atomic_get_property() vfuncs for any modeset objects with driver specific properties.
+DRIVER_SYNCOBJ + Driver support drm sync objects. + Major, Minor and Patchlevel ~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 96b9c34..9412798 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -484,3 +484,15 @@ DRM Cache Handling
.. kernel-doc:: drivers/gpu/drm/drm_cache.c :export: + +DRM Sync Objects +=========================== + +.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c + :doc: Overview + +.. kernel-doc:: include/drm/drm_syncobj.h + :export: + +.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c + :export: diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 59f0f9b..6f42188 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -16,7 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_framebuffer.o drm_connector.o drm_blend.o \ drm_encoder.o drm_mode_object.o drm_property.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ - drm_dumb_buffers.o drm_mode_config.o + drm_dumb_buffers.o drm_mode_config.o drm_syncobj.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 3783b65..a20d6a9 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -229,6 +229,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_open(dev, priv);
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_open(priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_init_file_private(&priv->prime);
@@ -276,6 +279,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) out_prime_destroy: if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&priv->prime); + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(priv); if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, priv); put_pid(priv->pid); @@ -398,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_property_destroy_user_blobs(dev, file_priv); }
+ if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(file_priv); + if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3d8e8f8..3fdef2c 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -142,4 +142,17 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { return 0; } + #endif + +/* drm_syncobj.c */ +void drm_syncobj_open(struct drm_file *file_private); +void drm_syncobj_release(struct drm_file *file_private); +int drm_syncobj_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 865e3ee..f1e5681 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -241,6 +241,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; return 0; + case DRM_CAP_SYNCOBJ: + req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ); + return 0; }
/* Other caps only work with KMS drivers */ @@ -645,6 +648,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), };
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c new file mode 100644 index 0000000..b611480 --- /dev/null +++ b/drivers/gpu/drm/drm_syncobj.c @@ -0,0 +1,377 @@ +/* + * Copyright 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ + +/** + * DOC: Overview + * + * DRM synchronisation objects (syncobj) are a persistent objects, + * that contain an optional fence. The fence can be updated with a new + * fence, or be NULL. + * + * syncobj's can be export to fd's and back, these fd's are opaque and + * have no other use case, except passing the syncobj between processes. + * + * Their primary use-case is to implement Vulkan fences and semaphores. + * + * syncobj have a kref reference count, but also have an optional file. + * The file is only created once the syncobj is exported. + * The file takes a reference on the kref. + */ + +#include <drm/drmP.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> + +#include "drm_internal.h" +#include <drm/drm_syncobj.h> + +static struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, + u32 handle) +{ + struct drm_syncobj *syncobj; + + spin_lock(&file_private->syncobj_table_lock); + + /* Check if we currently have a reference on the object */ + syncobj = idr_find(&file_private->syncobj_idr, handle); + if (syncobj) + drm_syncobj_get(syncobj); + + spin_unlock(&file_private->syncobj_table_lock); + + return syncobj; +} + +/** + * drm_syncobj_replace_fence - lookup and replace fence in a sync object. + * @file_private - drm file private pointer. + * @handle - syncobj handle to lookup + * @fence - fence to install in sync file. + * Returns: + * 0 on success, or -EINVAL when the handle doesn't point at a valid sem file. + * + * This looks up a sync object and replaces the fence on it, freeing + * the old one. + */ +int drm_syncobj_replace_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence *fence) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + struct dma_fence *old_fence = NULL; + + if (!syncobj) + return -EINVAL; + + if (fence) + dma_fence_get(fence); + old_fence = xchg(&syncobj->fence, fence); + + dma_fence_put(old_fence); + drm_syncobj_put(syncobj); + + return 0; +} +EXPORT_SYMBOL(drm_syncobj_replace_fence); + +int drm_syncobj_fence_get(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + int ret = 0; + + if (!syncobj) + return -ENOENT; + + *fence = dma_fence_get(syncobj->fence); + if (!*fence) { + ret = -EINVAL; + } + drm_syncobj_put(syncobj); + return ret; +} +EXPORT_SYMBOL(drm_syncobj_fence_get); + +void drm_syncobj_free(struct kref *kref) +{ + struct drm_syncobj *syncobj = container_of(kref, + struct drm_syncobj, + refcount); + dma_fence_put(syncobj->fence); + kfree(syncobj); +} + +static int drm_syncobj_create(struct drm_file *file_private, + u32 *handle) +{ + int ret; + struct drm_syncobj *syncobj; + + syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); + if (!syncobj) + return -ENOMEM; + + kref_init(&syncobj->refcount); + + idr_preload(GFP_KERNEL); + spin_lock(&file_private->syncobj_table_lock); + ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); + spin_unlock(&file_private->syncobj_table_lock); + + idr_preload_end(); + + if (ret < 0) { + drm_syncobj_put(syncobj); + return ret; + } + + *handle = ret; + return 0; +} + +static int drm_syncobj_destroy(struct drm_file *file_private, + u32 handle) +{ + struct drm_syncobj *syncobj; + + spin_lock(&file_private->syncobj_table_lock); + syncobj = idr_remove(&file_private->syncobj_idr, handle); + spin_unlock(&file_private->syncobj_table_lock); + + if (!syncobj) + return -EINVAL; + + drm_syncobj_put(syncobj); + return 0; +} + +static int drm_syncobj_file_release(struct inode *inode, struct file *file) +{ + struct drm_syncobj *syncobj = file->private_data; + + drm_syncobj_put(syncobj); + return 0; +} + +static const struct file_operations drm_syncobj_file_fops = { + .release = drm_syncobj_file_release, +}; + +static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) +{ + struct file *file = anon_inode_getfile("syncobj_file", + &drm_syncobj_file_fops, + syncobj, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + + drm_syncobj_get(syncobj); + if (cmpxchg(&syncobj->file, NULL, file)) { + /* lost the race */ + fput(file); + } + + return 0; +} + +static int drm_syncobj_handle_to_fd(struct drm_file *file_private, + u32 handle, int *p_fd) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + int ret; + int fd; + + if (!syncobj) + return -EINVAL; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + drm_syncobj_put(syncobj); + return fd; + } + + if (!syncobj->file) { + ret = drm_syncobj_alloc_file(syncobj); + if (ret) + goto out_put_fd; + } + fd_install(fd, syncobj->file); + drm_syncobj_put(syncobj); + *p_fd = fd; + return 0; +out_put_fd: + put_unused_fd(fd); + drm_syncobj_put(syncobj); + return ret; +} + +static struct drm_syncobj *drm_syncobj_fdget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + if (file->f_op != &drm_syncobj_file_fops) + goto err; + + return file->private_data; +err: + fput(file); + return NULL; +}; + +static int drm_syncobj_fd_to_handle(struct drm_file *file_private, + int fd, u32 *handle) +{ + struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); + int ret; + + if (!syncobj) + return -EINVAL; + + /* take a reference to put in the idr */ + drm_syncobj_get(syncobj); + + idr_preload(GFP_KERNEL); + spin_lock(&file_private->syncobj_table_lock); + ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); + spin_unlock(&file_private->syncobj_table_lock); + idr_preload_end(); + + if (ret < 0) { + fput(syncobj->file); + return ret; + } + *handle = ret; + return 0; +} + +/** + * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time + * @dev: drm_device which is being opened by userspace + * @file_private: drm file-private structure to set up + * + * Called at device open time, sets up the structure for handling refcounting + * of sync objects. + */ +void +drm_syncobj_open(struct drm_file *file_private) +{ + idr_init(&file_private->syncobj_idr); + spin_lock_init(&file_private->syncobj_table_lock); +} + +static int +drm_syncobj_release_handle(int id, void *ptr, void *data) +{ + struct drm_syncobj *syncobj = ptr; + + drm_syncobj_put(syncobj); + return 0; +} + +/** + * drm_syncobj_release - release file-private sync object resources + * @dev: drm_device which is being closed by userspace + * @file_private: drm file-private structure to clean up + * + * Called at close time when the filp is going away. + * + * Releases any remaining references on objects by this filp. + */ +void +drm_syncobj_release(struct drm_file *file_private) +{ + idr_for_each(&file_private->syncobj_idr, + &drm_syncobj_release_handle, file_private); + idr_destroy(&file_private->syncobj_idr); +} + +int +drm_syncobj_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_create *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + /* no valid flags yet */ + if (args->flags) + return -EINVAL; + + return drm_syncobj_create(file_private, + &args->handle); +} + +int +drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_destroy *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + /* make sure padding is empty */ + if (args->pad) + return -EINVAL; + return drm_syncobj_destroy(file_private, args->handle); +} + +int +drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad || args->flags) + return -EINVAL; + + return drm_syncobj_handle_to_fd(file_private, args->handle, + &args->fd); +} + +int +drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad || args->flags) + return -EINVAL; + + return drm_syncobj_fd_to_handle(file_private, args->fd, + &args->handle); +} diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e1daa4f..4fad9f2 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -319,7 +319,6 @@ struct pci_controller;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
- /* Flags and return codes for get_vblank_timestamp() driver function. */ #define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 53b9832..1c66c1c 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -53,6 +53,7 @@ struct drm_mode_create_dumb; #define DRIVER_RENDER 0x8000 #define DRIVER_ATOMIC 0x10000 #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 +#define DRIVER_SYNCOBJ 0x40000
/** * struct drm_driver - DRM driver structure diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 5dd27ae..cea7354 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -231,6 +231,11 @@ struct drm_file { /** @table_lock: Protects @object_idr. */ spinlock_t table_lock;
+ /** @syncobj_idr: Mapping of sync object handles to object pointers. */ + struct idr syncobj_idr; + /** @syncobj_table_lock: Protects @syncobj_idr. */ + spinlock_t syncobj_table_lock; + /** @filp: Pointer to the core file structure. */ struct file *filp;
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h new file mode 100644 index 0000000..372c398 --- /dev/null +++ b/include/drm/drm_syncobj.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ +#ifndef __DRM_SYNCOBJ_H__ +#define __DRM_SYNCOBJ_H__ + +#include "linux/dma-fence.h" + +/** + * struct drm_syncobj - sync object. + * + * This structure defines a generic sync object which wraps a dma fence. + */ +struct drm_syncobj { + /** + * @refcount: + * + * Reference count of this object. + */ + struct kref refcount; + /** + * @fence: + * NULL or a pointer to the fence bound to this object. + */ + struct dma_fence *fence; + /** + * @file: + * a file backing for this syncobj. + */ + struct file *file; +}; + +void drm_syncobj_free(struct kref *kref); + +/** + * drm_syncobj_get - acquire a syncobj reference + * @obj: sync object + * + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ +static inline void +drm_syncobj_get(struct drm_syncobj *obj) +{ + kref_get(&obj->refcount); +} + +/** + * drm_syncobj_put - release a reference to a sync object. + * @obj: sync object. + */ +static inline void +drm_syncobj_put(struct drm_syncobj *obj) +{ + kref_put(&obj->refcount, drm_syncobj_free); +} + +int drm_syncobj_fence_get(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence); +int drm_syncobj_replace_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence *fence); + +#endif diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 42d9f64..96c5c78 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -648,6 +648,7 @@ struct drm_gem_open { #define DRM_CAP_ADDFB2_MODIFIERS 0x10 #define DRM_CAP_PAGE_FLIP_TARGET 0x11 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 +#define DRM_CAP_SYNCOBJ 0x13
/** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -697,6 +698,24 @@ struct drm_prime_handle { __s32 fd; };
+struct drm_syncobj_create { + __u32 handle; + __u32 flags; +}; + +struct drm_syncobj_destroy { + __u32 handle; + __u32 pad; +}; + +struct drm_syncobj_handle { + __u32 handle; + __u32 flags; + + __s32 fd; + __u32 pad; +}; + #if defined(__cplusplus) } #endif @@ -815,6 +834,11 @@ extern "C" { #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) +#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) +#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) + /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f.
I can't really review for all of the kernel details (though the seem ok to me) so this mostly applies to the API:
Reviewed-by: Jason Ekstrand jason@jlekstrand.net
On Wed, May 24, 2017 at 12:06 AM, Dave Airlie airlied@gmail.com wrote:
From: Dave Airlie airlied@redhat.com
Sync objects are new toplevel drm object, that contain a pointer to a fence. This fence can be updated via command submission ioctls via drivers.
There is also a generic wait obj API modelled on the vulkan wait API (with code modelled on some amdgpu code).
These objects can be converted to an opaque fd that can be passes between processes.
v2: rename reference/unreference to put/get (Chris) fix leaked reference (David Zhou) drop mutex in favour of cmpxchg (Chris) v3: cleanups from danvet, rebase on drm_fops rename check fd_flags is 0 in ioctls.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com
Documentation/gpu/drm-internals.rst | 3 + Documentation/gpu/drm-mm.rst | 12 ++ drivers/gpu/drm/Makefile | 2 +- drivers/gpu/drm/drm_file.c | 8 + drivers/gpu/drm/drm_internal.h | 13 ++ drivers/gpu/drm/drm_ioctl.c | 12 ++ drivers/gpu/drm/drm_syncobj.c | 377 ++++++++++++++++++++++++++++++ ++++++ include/drm/drmP.h | 1 - include/drm/drm_drv.h | 1 + include/drm/drm_file.h | 5 + include/drm/drm_syncobj.h | 87 +++++++++ include/uapi/drm/drm.h | 24 +++ 12 files changed, 543 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/drm_syncobj.c create mode 100644 include/drm/drm_syncobj.h
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm- internals.rst index babfb61..2b23d78 100644 --- a/Documentation/gpu/drm-internals.rst +++ b/Documentation/gpu/drm-internals.rst @@ -98,6 +98,9 @@ DRIVER_ATOMIC implement appropriate obj->atomic_get_property() vfuncs for any modeset objects with driver specific properties.
+DRIVER_SYNCOBJ
- Driver support drm sync objects.
Major, Minor and Patchlevel
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index 96b9c34..9412798 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -484,3 +484,15 @@ DRM Cache Handling .. kernel-doc:: drivers/gpu/drm/drm_cache.c :export: + +DRM Sync Objects +=========================== + +.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c + :doc: Overview + +.. kernel-doc:: include/drm/drm_syncobj.h + :export: + +.. kernel-doc:: drivers/gpu/drm/drm_syncobj.c + :export: diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 59f0f9b..6f42188 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -16,7 +16,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_framebuffer.o drm_connector.o drm_blend.o \ drm_encoder.o drm_mode_object.o drm_property.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ - drm_dumb_buffers.o drm_mode_config.o + drm_dumb_buffers.o drm_mode_config.o drm_syncobj.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_DRM_VM) += drm_vm.o diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 3783b65..a20d6a9 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -229,6 +229,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_open(dev, priv); + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_open(priv); + if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_init_file_private(&priv->prime); @@ -276,6 +279,8 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) out_prime_destroy: if (drm_core_check_feature(dev, DRIVER_PRIME)) drm_prime_destroy_file_private(&priv->prime); + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(priv); if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, priv); put_pid(priv->pid); @@ -398,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp) drm_property_destroy_user_blobs(dev, file_priv); } + if (drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + drm_syncobj_release(file_priv); + if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file_priv); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_ internal.h index 3d8e8f8..3fdef2c 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -142,4 +142,17 @@ static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { return 0; } + #endif + +/* drm_syncobj.c */ +void drm_syncobj_open(struct drm_file *file_private); +void drm_syncobj_release(struct drm_file *file_private); +int drm_syncobj_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); +int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 865e3ee..f1e5681 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -241,6 +241,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; return 0; + case DRM_CAP_SYNCOBJ: + req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ); + return 0; } /* Other caps only work with KMS drivers */ @@ -645,6 +648,15 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATEPROPBLOB, drm_mode_createblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROYPROPBLOB, drm_mode_destroyblob_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_CREATE, drm_syncobj_create_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_DESTROY, drm_syncobj_destroy_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, drm_syncobj_handle_to_fd_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c new file mode 100644 index 0000000..b611480 --- /dev/null +++ b/drivers/gpu/drm/drm_syncobj.c @@ -0,0 +1,377 @@ +/* + * Copyright 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ + +/** + * DOC: Overview + * + * DRM synchronisation objects (syncobj) are a persistent objects, + * that contain an optional fence. The fence can be updated with a new + * fence, or be NULL. + * + * syncobj's can be export to fd's and back, these fd's are opaque and + * have no other use case, except passing the syncobj between processes. + * + * Their primary use-case is to implement Vulkan fences and semaphores. + * + * syncobj have a kref reference count, but also have an optional file. + * The file is only created once the syncobj is exported. + * The file takes a reference on the kref. + */ + +#include <drm/drmP.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> + +#include "drm_internal.h" +#include <drm/drm_syncobj.h> + +static struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private, + u32 handle) +{ + struct drm_syncobj *syncobj; + + spin_lock(&file_private->syncobj_table_lock); + + /* Check if we currently have a reference on the object */ + syncobj = idr_find(&file_private->syncobj_idr, handle); + if (syncobj) + drm_syncobj_get(syncobj); + + spin_unlock(&file_private->syncobj_table_lock); + + return syncobj; +} + +/** + * drm_syncobj_replace_fence - lookup and replace fence in a sync object. + * @file_private - drm file private pointer. + * @handle - syncobj handle to lookup + * @fence - fence to install in sync file. + * Returns: + * 0 on success, or -EINVAL when the handle doesn't point at a valid sem file. + * + * This looks up a sync object and replaces the fence on it, freeing + * the old one. + */ +int drm_syncobj_replace_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence *fence) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + struct dma_fence *old_fence = NULL; + + if (!syncobj) + return -EINVAL; + + if (fence) + dma_fence_get(fence); + old_fence = xchg(&syncobj->fence, fence); + + dma_fence_put(old_fence); + drm_syncobj_put(syncobj); + + return 0; +} +EXPORT_SYMBOL(drm_syncobj_replace_fence); + +int drm_syncobj_fence_get(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + int ret = 0; + + if (!syncobj) + return -ENOENT; + + *fence = dma_fence_get(syncobj->fence); + if (!*fence) { + ret = -EINVAL; + } + drm_syncobj_put(syncobj); + return ret; +} +EXPORT_SYMBOL(drm_syncobj_fence_get); + +void drm_syncobj_free(struct kref *kref) +{ + struct drm_syncobj *syncobj = container_of(kref, + struct drm_syncobj, + refcount); + dma_fence_put(syncobj->fence); + kfree(syncobj); +} + +static int drm_syncobj_create(struct drm_file *file_private, + u32 *handle) +{ + int ret; + struct drm_syncobj *syncobj; + + syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL); + if (!syncobj) + return -ENOMEM; + + kref_init(&syncobj->refcount); + + idr_preload(GFP_KERNEL); + spin_lock(&file_private->syncobj_table_lock); + ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); + spin_unlock(&file_private->syncobj_table_lock); + + idr_preload_end(); + + if (ret < 0) { + drm_syncobj_put(syncobj); + return ret; + } + + *handle = ret; + return 0; +} + +static int drm_syncobj_destroy(struct drm_file *file_private, + u32 handle) +{ + struct drm_syncobj *syncobj; + + spin_lock(&file_private->syncobj_table_lock); + syncobj = idr_remove(&file_private->syncobj_idr, handle); + spin_unlock(&file_private->syncobj_table_lock); + + if (!syncobj) + return -EINVAL; + + drm_syncobj_put(syncobj); + return 0; +} + +static int drm_syncobj_file_release(struct inode *inode, struct file *file) +{ + struct drm_syncobj *syncobj = file->private_data; + + drm_syncobj_put(syncobj); + return 0; +} + +static const struct file_operations drm_syncobj_file_fops = { + .release = drm_syncobj_file_release, +}; + +static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) +{ + struct file *file = anon_inode_getfile("syncobj_file", + &drm_syncobj_file_fops, + syncobj, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + + drm_syncobj_get(syncobj); + if (cmpxchg(&syncobj->file, NULL, file)) { + /* lost the race */ + fput(file); + } + + return 0; +} + +static int drm_syncobj_handle_to_fd(struct drm_file *file_private, + u32 handle, int *p_fd) +{ + struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle); + int ret; + int fd; + + if (!syncobj) + return -EINVAL; + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + drm_syncobj_put(syncobj); + return fd; + } + + if (!syncobj->file) { + ret = drm_syncobj_alloc_file(syncobj); + if (ret) + goto out_put_fd; + } + fd_install(fd, syncobj->file); + drm_syncobj_put(syncobj); + *p_fd = fd; + return 0; +out_put_fd: + put_unused_fd(fd); + drm_syncobj_put(syncobj); + return ret; +} + +static struct drm_syncobj *drm_syncobj_fdget(int fd) +{ + struct file *file = fget(fd); + + if (!file) + return NULL; + if (file->f_op != &drm_syncobj_file_fops) + goto err; + + return file->private_data; +err: + fput(file); + return NULL; +}; + +static int drm_syncobj_fd_to_handle(struct drm_file *file_private, + int fd, u32 *handle) +{ + struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); + int ret; + + if (!syncobj) + return -EINVAL; + + /* take a reference to put in the idr */ + drm_syncobj_get(syncobj); + + idr_preload(GFP_KERNEL); + spin_lock(&file_private->syncobj_table_lock); + ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT); + spin_unlock(&file_private->syncobj_table_lock); + idr_preload_end(); + + if (ret < 0) { + fput(syncobj->file); + return ret; + } + *handle = ret; + return 0; +} + +/** + * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time + * @dev: drm_device which is being opened by userspace + * @file_private: drm file-private structure to set up + * + * Called at device open time, sets up the structure for handling refcounting + * of sync objects. + */ +void +drm_syncobj_open(struct drm_file *file_private) +{ + idr_init(&file_private->syncobj_idr); + spin_lock_init(&file_private->syncobj_table_lock); +} + +static int +drm_syncobj_release_handle(int id, void *ptr, void *data) +{ + struct drm_syncobj *syncobj = ptr; + + drm_syncobj_put(syncobj); + return 0; +} + +/** + * drm_syncobj_release - release file-private sync object resources + * @dev: drm_device which is being closed by userspace + * @file_private: drm file-private structure to clean up + * + * Called at close time when the filp is going away. + * + * Releases any remaining references on objects by this filp. + */ +void +drm_syncobj_release(struct drm_file *file_private) +{ + idr_for_each(&file_private->syncobj_idr, + &drm_syncobj_release_handle, file_private); + idr_destroy(&file_private->syncobj_idr); +} + +int +drm_syncobj_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_create *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + /* no valid flags yet */ + if (args->flags) + return -EINVAL; + + return drm_syncobj_create(file_private, + &args->handle); +} + +int +drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_destroy *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + /* make sure padding is empty */ + if (args->pad) + return -EINVAL; + return drm_syncobj_destroy(file_private, args->handle); +} + +int +drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad || args->flags) + return -EINVAL; + + return drm_syncobj_handle_to_fd(file_private, args->handle, + &args->fd); +} + +int +drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_handle *args = data; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->pad || args->flags) + return -EINVAL; + + return drm_syncobj_fd_to_handle(file_private, args->fd, + &args->handle); +} diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e1daa4f..4fad9f2 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -319,7 +319,6 @@ struct pci_controller; #define DRM_IF_VERSION(maj, min) (maj << 16 | min) - /* Flags and return codes for get_vblank_timestamp() driver function. */ #define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 53b9832..1c66c1c 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -53,6 +53,7 @@ struct drm_mode_create_dumb; #define DRIVER_RENDER 0x8000 #define DRIVER_ATOMIC 0x10000 #define DRIVER_KMS_LEGACY_CONTEXT 0x20000 +#define DRIVER_SYNCOBJ 0x40000 /** * struct drm_driver - DRM driver structure diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 5dd27ae..cea7354 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -231,6 +231,11 @@ struct drm_file { /** @table_lock: Protects @object_idr. */ spinlock_t table_lock; + /** @syncobj_idr: Mapping of sync object handles to object pointers. */ + struct idr syncobj_idr; + /** @syncobj_table_lock: Protects @syncobj_idr. */ + spinlock_t syncobj_table_lock; + /** @filp: Pointer to the core file structure. */ struct file *filp; diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h new file mode 100644 index 0000000..372c398 --- /dev/null +++ b/include/drm/drm_syncobj.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * + */ +#ifndef __DRM_SYNCOBJ_H__ +#define __DRM_SYNCOBJ_H__ + +#include "linux/dma-fence.h" + +/** + * struct drm_syncobj - sync object. + * + * This structure defines a generic sync object which wraps a dma fence. + */ +struct drm_syncobj { + /** + * @refcount: + * + * Reference count of this object. + */ + struct kref refcount; + /** + * @fence: + * NULL or a pointer to the fence bound to this object. + */ + struct dma_fence *fence; + /** + * @file: + * a file backing for this syncobj. + */ + struct file *file; +}; + +void drm_syncobj_free(struct kref *kref); + +/** + * drm_syncobj_get - acquire a syncobj reference + * @obj: sync object + * + * This acquires additional reference to @obj. It is illegal to call this + * without already holding a reference. No locks required. + */ +static inline void +drm_syncobj_get(struct drm_syncobj *obj) +{ + kref_get(&obj->refcount); +} + +/** + * drm_syncobj_put - release a reference to a sync object. + * @obj: sync object. + */ +static inline void +drm_syncobj_put(struct drm_syncobj *obj) +{ + kref_put(&obj->refcount, drm_syncobj_free); +} + +int drm_syncobj_fence_get(struct drm_file *file_private, + u32 handle, + struct dma_fence **fence); +int drm_syncobj_replace_fence(struct drm_file *file_private, + u32 handle, + struct dma_fence *fence); + +#endif diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 42d9f64..96c5c78 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -648,6 +648,7 @@ struct drm_gem_open { #define DRM_CAP_ADDFB2_MODIFIERS 0x10 #define DRM_CAP_PAGE_FLIP_TARGET 0x11 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 +#define DRM_CAP_SYNCOBJ 0x13 /** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -697,6 +698,24 @@ struct drm_prime_handle { __s32 fd; }; +struct drm_syncobj_create { + __u32 handle; + __u32 flags; +}; + +struct drm_syncobj_destroy { + __u32 handle; + __u32 pad; +}; + +struct drm_syncobj_handle { + __u32 handle; + __u32 flags; + + __s32 fd; + __u32 pad; +}; + #if defined(__cplusplus) } #endif @@ -815,6 +834,11 @@ extern "C" { #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob) +#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) +#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) +#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) + /** * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. -- 2.9.4 _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On Wed, May 24, 2017 at 05:06:11PM +1000, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
Sync objects are new toplevel drm object, that contain a pointer to a fence. This fence can be updated via command submission ioctls via drivers.
There is also a generic wait obj API modelled on the vulkan wait API (with code modelled on some amdgpu code).
These objects can be converted to an opaque fd that can be passes between processes.
v2: rename reference/unreference to put/get (Chris) fix leaked reference (David Zhou) drop mutex in favour of cmpxchg (Chris) v3: cleanups from danvet, rebase on drm_fops rename check fd_flags is 0 in ioctls.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 42d9f64..96c5c78 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -648,6 +648,7 @@ struct drm_gem_open { #define DRM_CAP_ADDFB2_MODIFIERS 0x10 #define DRM_CAP_PAGE_FLIP_TARGET 0x11 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 +#define DRM_CAP_SYNCOBJ 0x13
/** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -697,6 +698,24 @@ struct drm_prime_handle { __s32 fd; };
+struct drm_syncobj_create {
- __u32 handle;
- __u32 flags;
+};
+struct drm_syncobj_destroy {
- __u32 handle;
- __u32 pad;
+};
+struct drm_syncobj_handle {
- __u32 handle;
- __u32 flags;
- __s32 fd;
- __u32 pad;
+};
#if defined(__cplusplus) } #endif @@ -815,6 +834,11 @@ extern "C" { #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) +#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
These two only need DRM_IOW.
+#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle)
With that, Reviewed-by: Chris Wilson chris@chris-wilson.co.uk -Chris
On 25 May 2017 at 18:30, Chris Wilson chris@chris-wilson.co.uk wrote:
On Wed, May 24, 2017 at 05:06:11PM +1000, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
Sync objects are new toplevel drm object, that contain a pointer to a fence. This fence can be updated via command submission ioctls via drivers.
There is also a generic wait obj API modelled on the vulkan wait API (with code modelled on some amdgpu code).
These objects can be converted to an opaque fd that can be passes between processes.
v2: rename reference/unreference to put/get (Chris) fix leaked reference (David Zhou) drop mutex in favour of cmpxchg (Chris) v3: cleanups from danvet, rebase on drm_fops rename check fd_flags is 0 in ioctls.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 42d9f64..96c5c78 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -648,6 +648,7 @@ struct drm_gem_open { #define DRM_CAP_ADDFB2_MODIFIERS 0x10 #define DRM_CAP_PAGE_FLIP_TARGET 0x11 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 +#define DRM_CAP_SYNCOBJ 0x13
/** DRM_IOCTL_GET_CAP ioctl argument type */ struct drm_get_cap { @@ -697,6 +698,24 @@ struct drm_prime_handle { __s32 fd; };
+struct drm_syncobj_create {
__u32 handle;
__u32 flags;
+};
+struct drm_syncobj_destroy {
__u32 handle;
__u32 pad;
+};
+struct drm_syncobj_handle {
__u32 handle;
__u32 flags;
__s32 fd;
__u32 pad;
+};
#if defined(__cplusplus) } #endif @@ -815,6 +834,11 @@ extern "C" { #define DRM_IOCTL_MODE_CREATEPROPBLOB DRM_IOWR(0xBD, struct drm_mode_create_blob) #define DRM_IOCTL_MODE_DESTROYPROPBLOB DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
+#define DRM_IOCTL_SYNCOBJ_CREATE DRM_IOWR(0xBF, struct drm_syncobj_create) +#define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy)
These two only need DRM_IOW.
They do now, but at least create takes some flags, destroy is probably fine. are we okay to change these flags later?
I can never remember and I'd rather not have to think about it too much.
With that, Reviewed-by: Chris Wilson chris@chris-wilson.co.uk
Thanks, Dave.
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 +++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl, + DRM_UNLOCKED|DRM_RENDER_ALLOW), };
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /* * Copyright 2017 Red Hat + * Parts ported from amdgpu (fence wait code). + * Copyright 2016 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -31,6 +33,9 @@ * that contain an optional fence. The fence can be updated with a new * fence, or be NULL. * + * syncobj's can be waited upon, where it will wait for the underlying + * fence. + * * syncobj's can be export to fd's and back, these fd's are opaque and * have no other use case, except passing the syncobj between processes. * @@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); } + + +/** + * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value + * + * @timeout_ns: timeout in ns + * + * Calculate the timeout in jiffies from an absolute timeout in ns. + */ +unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns) +{ + unsigned long timeout_jiffies; + ktime_t timeout; + + /* clamp timeout if it's to large */ + if (((int64_t)timeout_ns) < 0) + return MAX_SCHEDULE_TIMEOUT; + + timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); + if (ktime_to_ns(timeout) < 0) + return 0; + + timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); + /* clamp timeout to avoid unsigned-> signed overflow */ + if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) + return MAX_SCHEDULE_TIMEOUT - 1; + + return timeout_jiffies; +} + +static int drm_syncobj_wait_all_fences(struct drm_device *dev, + struct drm_file *file_private, + struct drm_syncobj_wait *wait, + uint32_t *handles) +{ + uint32_t i; + int ret; + unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns); + + for (i = 0; i < wait->count_handles; i++) { + struct dma_fence *fence; + + ret = drm_syncobj_fence_get(file_private, handles[i], + &fence); + if (ret) + return ret; + + if (!fence) + continue; + + ret = dma_fence_wait_timeout(fence, true, timeout); + + dma_fence_put(fence); + if (ret < 0) + return ret; + if (ret == 0) + break; + timeout = ret; + } + + wait->out_timeout_ns = jiffies_to_nsecs(ret); + wait->out_status = (ret > 0); + wait->first_signaled = 0; + return 0; +} + +static int drm_syncobj_wait_any_fence(struct drm_device *dev, + struct drm_file *file_private, + struct drm_syncobj_wait *wait, + uint32_t *handles) +{ + unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns); + struct dma_fence **array; + uint32_t i; + int ret; + uint32_t first = ~0; + + /* Prepare the fence array */ + array = kcalloc(wait->count_handles, + sizeof(struct dma_fence *), GFP_KERNEL); + + if (array == NULL) + return -ENOMEM; + + for (i = 0; i < wait->count_handles; i++) { + struct dma_fence *fence; + + ret = drm_syncobj_fence_get(file_private, handles[i], + &fence); + if (ret) + goto err_free_fence_array; + else if (fence) + array[i] = fence; + else { /* NULL, the fence has been already signaled */ + ret = 1; + goto out; + } + } + + ret = dma_fence_wait_any_timeout(array, wait->count_handles, true, timeout, + &first); + if (ret < 0) + goto err_free_fence_array; +out: + wait->out_timeout_ns = jiffies_to_nsecs(ret); + wait->out_status = (ret > 0); + wait->first_signaled = first; + /* set return value 0 to indicate success */ + ret = 0; + +err_free_fence_array: + for (i = 0; i < wait->count_handles; i++) + dma_fence_put(array[i]); + kfree(array); + + return ret; +} + +int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_private) +{ + struct drm_syncobj_wait *args = data; + uint32_t *handles; + int ret = 0; + + if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) + return -ENODEV; + + if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) + return -EINVAL; + + if (args->count_handles == 0) + return 0; + + /* Get the handles from userspace */ + handles = kmalloc_array(args->count_handles, sizeof(uint32_t), + GFP_KERNEL); + if (handles == NULL) + return -ENOMEM; + + if (copy_from_user(handles, + (void __user *)(unsigned long)(args->handles), + sizeof(uint32_t) * args->count_handles)) { + ret = -EFAULT; + goto err_free_handles; + } + + if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) + ret = drm_syncobj_wait_all_fences(dev, file_private, + args, handles); + else + ret = drm_syncobj_wait_any_fence(dev, file_private, + args, handles); +err_free_handles: + kfree(handles); + + return ret; +} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 96c5c78..d6e2f62 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -716,6 +716,19 @@ struct drm_syncobj_handle { __u32 pad; };
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +struct drm_syncobj_wait { + __u64 handles; + /* absolute timeout */ + __u64 timeout_ns; + /* remaining timeout */ + __u64 out_timeout_ns; + __u32 count_handles; + __u32 flags; + __u32 out_status; + __u32 first_signaled; /* on valid when not waiting all */ +}; + #if defined(__cplusplus) } #endif @@ -838,6 +851,7 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
/** * Device specific ioctls should only be in their respective headers
On Wed, May 24, 2017 at 12:06 AM, Dave Airlie airlied@gmail.com wrote:
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 ++++++++++++++++++++++++++++++ +++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_ internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /*
- Copyright 2017 Red Hat
- Parts ported from amdgpu (fence wait code).
- Copyright 2016 Advanced Micro Devices, Inc.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the
"Software"), @@ -31,6 +33,9 @@
- that contain an optional fence. The fence can be updated with a new
- fence, or be NULL.
- syncobj's can be waited upon, where it will wait for the underlying
- fence.
- syncobj's can be export to fd's and back, these fd's are opaque and
- have no other use case, except passing the syncobj between processes.
@@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); }
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute
value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns) +{
unsigned long timeout_jiffies;
ktime_t timeout;
/* clamp timeout if it's to large */
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
/* clamp timeout to avoid unsigned-> signed overflow */
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
uint32_t i;
int ret;
unsigned long timeout = drm_timeout_abs_to_jiffies(
wait->timeout_ns);
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
return ret;
if (!fence)
continue;
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
}
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = 0;
return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
unsigned long timeout = drm_timeout_abs_to_jiffies(
wait->timeout_ns);
struct dma_fence **array;
uint32_t i;
int ret;
uint32_t first = ~0;
/* Prepare the fence array */
array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
if (array == NULL)
return -ENOMEM;
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
}
ret = dma_fence_wait_any_timeout(array, wait->count_handles,
true, timeout,
&first);
if (ret < 0)
goto err_free_fence_array;
+out:
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = first;
/* set return value 0 to indicate success */
ret = 0;
+err_free_fence_array:
for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
kfree(array);
return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
struct drm_syncobj_wait *args = data;
uint32_t *handles;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_
ALL)
return -EINVAL;
if (args->count_handles == 0)
return 0;
/* Get the handles from userspace */
handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
GFP_KERNEL);
if (handles == NULL)
return -ENOMEM;
if (copy_from_user(handles,
(void __user *)(unsigned long)(args->handles),
sizeof(uint32_t) * args->count_handles)) {
ret = -EFAULT;
goto err_free_handles;
}
if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
ret = drm_syncobj_wait_all_fences(dev, file_private,
args, handles);
else
ret = drm_syncobj_wait_any_fence(dev, file_private,
args, handles);
+err_free_handles:
kfree(handles);
return ret;
+} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 96c5c78..d6e2f62 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -716,6 +716,19 @@ struct drm_syncobj_handle { __u32 pad; };
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
__u64 timeout_ns;
/* remaining timeout */
__u64 out_timeout_ns;
Any particular reason why we don't just make timeout_ns an in/out? I don't really care and haven't given it much thought; mostly just curious.
__u32 count_handles;
__u32 flags;
__u32 out_status;
__u32 first_signaled; /* on valid when not waiting all */
+};
#if defined(__cplusplus) } #endif @@ -838,6 +851,7 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
/**
- Device specific ioctls should only be in their respective headers
-- 2.9.4
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On Wed, May 24, 2017 at 10:25 AM, Jason Ekstrand jason@jlekstrand.net wrote:
On Wed, May 24, 2017 at 12:06 AM, Dave Airlie airlied@gmail.com wrote:
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 ++++++++++++++++++++++++++++++ +++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj. c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /*
- Copyright 2017 Red Hat
- Parts ported from amdgpu (fence wait code).
- Copyright 2016 Advanced Micro Devices, Inc.
- Permission is hereby granted, free of charge, to any person obtaining
a
- copy of this software and associated documentation files (the
"Software"), @@ -31,6 +33,9 @@
- that contain an optional fence. The fence can be updated with a new
- fence, or be NULL.
- syncobj's can be waited upon, where it will wait for the underlying
- fence.
- syncobj's can be export to fd's and back, these fd's are opaque and
- have no other use case, except passing the syncobj between processes.
@@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); }
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute
value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns) +{
unsigned long timeout_jiffies;
ktime_t timeout;
/* clamp timeout if it's to large */
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
/* clamp timeout to avoid unsigned-> signed overflow */
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
uint32_t i;
int ret;
unsigned long timeout = drm_timeout_abs_to_jiffies(wai
t->timeout_ns);
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
return ret;
if (!fence)
continue;
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
}
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = 0;
return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
unsigned long timeout = drm_timeout_abs_to_jiffies(wai
t->timeout_ns);
struct dma_fence **array;
uint32_t i;
int ret;
uint32_t first = ~0;
/* Prepare the fence array */
array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
if (array == NULL)
return -ENOMEM;
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
}
These two functions have unexpected and subtly different behaviors when drm_syncobj_fence_get fails. wait_all may fail in the middle of waiting whereas wait_any fails up-front and holds a reference to ensure it doesn't fail later. Given that freeing things is inherently racy, it seems less than ideal to wait some arbitrary amount of time governed by fence[0] before getting fence[1]. Would it make sense to move the array setup into _ioctl and make both hold a reference to every fence?
ret = dma_fence_wait_any_timeout(array, wait->count_handles,
true, timeout,
&first);
if (ret < 0)
goto err_free_fence_array;
+out:
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = first;
/* set return value 0 to indicate success */
ret = 0;
+err_free_fence_array:
for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
kfree(array);
return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
struct drm_syncobj_wait *args = data;
uint32_t *handles;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
if (args->flags != 0 && args->flags !=
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
return -EINVAL;
if (args->count_handles == 0)
return 0;
/* Get the handles from userspace */
handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
GFP_KERNEL);
if (handles == NULL)
return -ENOMEM;
if (copy_from_user(handles,
(void __user *)(unsigned long)(args->handles),
sizeof(uint32_t) * args->count_handles)) {
ret = -EFAULT;
goto err_free_handles;
}
if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
ret = drm_syncobj_wait_all_fences(dev, file_private,
args, handles);
else
ret = drm_syncobj_wait_any_fence(dev, file_private,
args, handles);
+err_free_handles:
kfree(handles);
return ret;
+} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 96c5c78..d6e2f62 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -716,6 +716,19 @@ struct drm_syncobj_handle { __u32 pad; };
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
__u64 timeout_ns;
/* remaining timeout */
__u64 out_timeout_ns;
Any particular reason why we don't just make timeout_ns an in/out? I don't really care and haven't given it much thought; mostly just curious.
__u32 count_handles;
__u32 flags;
__u32 out_status;
__u32 first_signaled; /* on valid when not waiting all */
+};
#if defined(__cplusplus) } #endif @@ -838,6 +851,7 @@ extern "C" { #define DRM_IOCTL_SYNCOBJ_DESTROY DRM_IOWR(0xC0, struct drm_syncobj_destroy) #define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD DRM_IOWR(0xC1, struct drm_syncobj_handle) #define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE DRM_IOWR(0xC2, struct drm_syncobj_handle) +#define DRM_IOCTL_SYNCOBJ_WAIT DRM_IOWR(0xC3, struct drm_syncobj_wait)
/**
- Device specific ioctls should only be in their respective headers
-- 2.9.4
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On 25 May 2017 at 03:25, Jason Ekstrand jason@jlekstrand.net wrote:
On Wed, May 24, 2017 at 12:06 AM, Dave Airlie airlied@gmail.com wrote:
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 +++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /*
- Copyright 2017 Red Hat
- Parts ported from amdgpu (fence wait code).
- Copyright 2016 Advanced Micro Devices, Inc.
- Permission is hereby granted, free of charge, to any person obtaining
a
- copy of this software and associated documentation files (the
"Software"), @@ -31,6 +33,9 @@
- that contain an optional fence. The fence can be updated with a new
- fence, or be NULL.
- syncobj's can be waited upon, where it will wait for the underlying
- fence.
- syncobj's can be export to fd's and back, these fd's are opaque and
- have no other use case, except passing the syncobj between processes.
@@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); }
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute
value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns) +{
unsigned long timeout_jiffies;
ktime_t timeout;
/* clamp timeout if it's to large */
if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
if (ktime_to_ns(timeout) < 0)
return 0;
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
/* clamp timeout to avoid unsigned-> signed overflow */
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
uint32_t i;
int ret;
unsigned long timeout =
drm_timeout_abs_to_jiffies(wait->timeout_ns);
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
return ret;
if (!fence)
continue;
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
}
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = 0;
return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
unsigned long timeout =
drm_timeout_abs_to_jiffies(wait->timeout_ns);
struct dma_fence **array;
uint32_t i;
int ret;
uint32_t first = ~0;
/* Prepare the fence array */
array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
if (array == NULL)
return -ENOMEM;
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
}
ret = dma_fence_wait_any_timeout(array, wait->count_handles, true,
timeout,
&first);
if (ret < 0)
goto err_free_fence_array;
+out:
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = first;
/* set return value 0 to indicate success */
ret = 0;
+err_free_fence_array:
for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
kfree(array);
return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
struct drm_syncobj_wait *args = data;
uint32_t *handles;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
if (args->flags != 0 && args->flags !=
DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
return -EINVAL;
if (args->count_handles == 0)
return 0;
/* Get the handles from userspace */
handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
GFP_KERNEL);
if (handles == NULL)
return -ENOMEM;
if (copy_from_user(handles,
(void __user *)(unsigned long)(args->handles),
sizeof(uint32_t) * args->count_handles)) {
ret = -EFAULT;
goto err_free_handles;
}
if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
ret = drm_syncobj_wait_all_fences(dev, file_private,
args, handles);
else
ret = drm_syncobj_wait_any_fence(dev, file_private,
args, handles);
+err_free_handles:
kfree(handles);
return ret;
+} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 96c5c78..d6e2f62 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -716,6 +716,19 @@ struct drm_syncobj_handle { __u32 pad; };
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) +struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
__u64 timeout_ns;
/* remaining timeout */
__u64 out_timeout_ns;
Any particular reason why we don't just make timeout_ns an in/out? I don't really care and haven't given it much thought; mostly just curious.
Timeout's are absolute, so if you replace the in timeout with a new value, and get -ERESTARTSYS, then you'd restart the ioctl with the wrong value.
Dave.
On Wed, May 24, 2017 at 05:06:12PM +1000, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 +++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /*
- Copyright 2017 Red Hat
- Parts ported from amdgpu (fence wait code).
- Copyright 2016 Advanced Micro Devices, Inc.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
- that contain an optional fence. The fence can be updated with a new
- fence, or be NULL.
- syncobj's can be waited upon, where it will wait for the underlying
- fence.
- syncobj's can be export to fd's and back, these fd's are opaque and
- have no other use case, except passing the syncobj between processes.
@@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); }
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns)
Not in any header or otherwise exported, so static?
+{
- unsigned long timeout_jiffies;
- ktime_t timeout;
- /* clamp timeout if it's to large */
- if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
- if (ktime_to_ns(timeout) < 0)
return 0;
- timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
- /* clamp timeout to avoid unsigned-> signed overflow */
- if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
This about avoiding the conversion into an infinite timeout, right? I think the comment is misleading (certainly doesn't explain MAX_SCHEDULE_TIMEOUT-1) and the test should be >= MAX_SCHEDULE_TIMEOUT.
- return timeout_jiffies;
Timeouts tend to use timeout_jiffies + 1 to avoid the timer interrupt screwing up the calculation, or just generally returning a fraction too early.
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
- uint32_t i;
- int ret;
- unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
Also note that this doesn't handle timeout = 0 very gracefully with multiple fences. (dma_fence_wait_timeout will convert that on return to 1). Hmm, by using an absolute timeout we can't pass into timeout=0 to do a poll.
- for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
return ret;
if (!fence)
continue;
I thought no fence for the syncobj was the *unsignaled* case, and to wait upon it was a user error. I second Jason's idea to make the lookup and error checking on the fences uniform between WAIT_ALL and WAIT_ANY.
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
- }
- wait->out_timeout_ns = jiffies_to_nsecs(ret);
- wait->out_status = (ret > 0);
- wait->first_signaled = 0;
- return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
- unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
- struct dma_fence **array;
- uint32_t i;
- int ret;
- uint32_t first = ~0;
- /* Prepare the fence array */
- array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
- if (array == NULL)
return -ENOMEM;
- for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
- }
- ret = dma_fence_wait_any_timeout(array, wait->count_handles, true, timeout,
&first);
- if (ret < 0)
goto err_free_fence_array;
+out:
- wait->out_timeout_ns = jiffies_to_nsecs(ret);
- wait->out_status = (ret > 0);
Didn't the amdgpu interface report which fence completed first? (I may be imagining that.)
- wait->first_signaled = first;
- /* set return value 0 to indicate success */
- ret = 0;
+err_free_fence_array:
- for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
- kfree(array);
- return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
- struct drm_syncobj_wait *args = data;
- uint32_t *handles;
- int ret = 0;
- if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
- if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
return -EINVAL;
- if (args->count_handles == 0)
return 0;
Hmm, returning success without updating any of the status fields. -EINVAL? -ENOTHINGTODO.
- /* Get the handles from userspace */
- handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
GFP_KERNEL);
- if (handles == NULL)
return -ENOMEM;
- if (copy_from_user(handles,
(void __user *)(unsigned long)(args->handles),
sizeof(uint32_t) * args->count_handles)) {
ret = -EFAULT;
goto err_free_handles;
- }
- if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
ret = drm_syncobj_wait_all_fences(dev, file_private,
args, handles);
- else
ret = drm_syncobj_wait_any_fence(dev, file_private,
args, handles);
+err_free_handles:
- kfree(handles);
- return ret;
+}
Am 25.05.2017 um 10:47 schrieb Chris Wilson:
On Wed, May 24, 2017 at 05:06:12PM +1000, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
This interface will allow sync object to be used to back Vulkan fences. This API is pretty much the vulkan fence waiting API, and I've ported the code from amdgpu.
v2: accept relative timeout, pass remaining time back to userspace. v3: return to absolute timeouts.
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_internal.h | 2 + drivers/gpu/drm/drm_ioctl.c | 2 + drivers/gpu/drm/drm_syncobj.c | 164 +++++++++++++++++++++++++++++++++++++++++ include/uapi/drm/drm.h | 14 ++++ 4 files changed, 182 insertions(+)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 3fdef2c..53e3f6b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -156,3 +156,5 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_private); +int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index f1e5681..385ce74 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -657,6 +657,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index b611480..8b87594 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -1,5 +1,7 @@ /*
- Copyright 2017 Red Hat
- Parts ported from amdgpu (fence wait code).
- Copyright 2016 Advanced Micro Devices, Inc.
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
@@ -31,6 +33,9 @@
- that contain an optional fence. The fence can be updated with a new
- fence, or be NULL.
- syncobj's can be waited upon, where it will wait for the underlying
- fence.
- syncobj's can be export to fd's and back, these fd's are opaque and
- have no other use case, except passing the syncobj between processes.
@@ -375,3 +380,162 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); }
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns)
Not in any header or otherwise exported, so static?
That function should be exported and the version we have in amdgpu replaced by using this instead.
But feel free to make it static for now, I can take care of the cleanup later on.
Christian.
+{
- unsigned long timeout_jiffies;
- ktime_t timeout;
- /* clamp timeout if it's to large */
- if (((int64_t)timeout_ns) < 0)
return MAX_SCHEDULE_TIMEOUT;
- timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
- if (ktime_to_ns(timeout) < 0)
return 0;
- timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
- /* clamp timeout to avoid unsigned-> signed overflow */
- if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
This about avoiding the conversion into an infinite timeout, right? I think the comment is misleading (certainly doesn't explain MAX_SCHEDULE_TIMEOUT-1) and the test should be >= MAX_SCHEDULE_TIMEOUT.
- return timeout_jiffies;
Timeouts tend to use timeout_jiffies + 1 to avoid the timer interrupt screwing up the calculation, or just generally returning a fraction too early.
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
- uint32_t i;
- int ret;
- unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
Also note that this doesn't handle timeout = 0 very gracefully with multiple fences. (dma_fence_wait_timeout will convert that on return to 1). Hmm, by using an absolute timeout we can't pass into timeout=0 to do a poll.
- for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
return ret;
if (!fence)
continue;
I thought no fence for the syncobj was the *unsignaled* case, and to wait upon it was a user error. I second Jason's idea to make the lookup and error checking on the fences uniform between WAIT_ALL and WAIT_ANY.
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
- }
- wait->out_timeout_ns = jiffies_to_nsecs(ret);
- wait->out_status = (ret > 0);
- wait->first_signaled = 0;
- return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
- unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
- struct dma_fence **array;
- uint32_t i;
- int ret;
- uint32_t first = ~0;
- /* Prepare the fence array */
- array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
- if (array == NULL)
return -ENOMEM;
- for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
- }
- ret = dma_fence_wait_any_timeout(array, wait->count_handles, true, timeout,
&first);
- if (ret < 0)
goto err_free_fence_array;
+out:
- wait->out_timeout_ns = jiffies_to_nsecs(ret);
- wait->out_status = (ret > 0);
Didn't the amdgpu interface report which fence completed first? (I may be imagining that.)
- wait->first_signaled = first;
- /* set return value 0 to indicate success */
- ret = 0;
+err_free_fence_array:
- for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
- kfree(array);
- return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
- struct drm_syncobj_wait *args = data;
- uint32_t *handles;
- int ret = 0;
- if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
- if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
return -EINVAL;
- if (args->count_handles == 0)
return 0;
Hmm, returning success without updating any of the status fields. -EINVAL? -ENOTHINGTODO.
- /* Get the handles from userspace */
- handles = kmalloc_array(args->count_handles, sizeof(uint32_t),
GFP_KERNEL);
- if (handles == NULL)
return -ENOMEM;
- if (copy_from_user(handles,
(void __user *)(unsigned long)(args->handles),
sizeof(uint32_t) * args->count_handles)) {
ret = -EFAULT;
goto err_free_handles;
- }
- if (args->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
ret = drm_syncobj_wait_all_fences(dev, file_private,
args, handles);
- else
ret = drm_syncobj_wait_any_fence(dev, file_private,
args, handles);
+err_free_handles:
- kfree(handles);
- return ret;
+}
Hopefully addressing most of these in the upcoming set, some comments below.
+/**
- drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
- @timeout_ns: timeout in ns
- Calculate the timeout in jiffies from an absolute timeout in ns.
- */
+unsigned long drm_timeout_abs_to_jiffies(uint64_t timeout_ns)
Not in any header or otherwise exported, so static?
Done.
/* clamp timeout to avoid unsigned-> signed overflow */
if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1;
This about avoiding the conversion into an infinite timeout, right? I think the comment is misleading (certainly doesn't explain MAX_SCHEDULE_TIMEOUT-1) and the test should be >= MAX_SCHEDULE_TIMEOUT.
Taken from AMD code, but I see your logic, I've adjust the code and added the one as requested.
return timeout_jiffies;
Timeouts tend to use timeout_jiffies + 1 to avoid the timer interrupt screwing up the calculation, or just generally returning a fraction too early.
+}
+static int drm_syncobj_wait_all_fences(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
uint32_t i;
int ret;
unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
Also note that this doesn't handle timeout = 0 very gracefully with multiple fences. (dma_fence_wait_timeout will convert that on return to 1). Hmm, by using an absolute timeout we can't pass into timeout=0 to do a poll.
I've decdied to make timeout = 0 be poll, I can't see anyone having a realtime clock of 0 making sense anyways.
There is a fix in flight for dma_fence_wait_timeout doing that, it's in drm-next now.
&fence);
if (ret)
return ret;
if (!fence)
continue;
I thought no fence for the syncobj was the *unsignaled* case, and to wait upon it was a user error. I second Jason's idea to make the lookup and error checking on the fences uniform between WAIT_ALL and WAIT_ANY.
ret = dma_fence_wait_timeout(fence, true, timeout);
dma_fence_put(fence);
if (ret < 0)
return ret;
if (ret == 0)
break;
timeout = ret;
}
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
wait->first_signaled = 0;
return 0;
+}
+static int drm_syncobj_wait_any_fence(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
uint32_t *handles)
+{
unsigned long timeout = drm_timeout_abs_to_jiffies(wait->timeout_ns);
struct dma_fence **array;
uint32_t i;
int ret;
uint32_t first = ~0;
/* Prepare the fence array */
array = kcalloc(wait->count_handles,
sizeof(struct dma_fence *), GFP_KERNEL);
if (array == NULL)
return -ENOMEM;
for (i = 0; i < wait->count_handles; i++) {
struct dma_fence *fence;
ret = drm_syncobj_fence_get(file_private, handles[i],
&fence);
if (ret)
goto err_free_fence_array;
else if (fence)
array[i] = fence;
else { /* NULL, the fence has been already signaled */
ret = 1;
goto out;
}
}
ret = dma_fence_wait_any_timeout(array, wait->count_handles, true, timeout,
&first);
if (ret < 0)
goto err_free_fence_array;
+out:
wait->out_timeout_ns = jiffies_to_nsecs(ret);
wait->out_status = (ret > 0);
Didn't the amdgpu interface report which fence completed first? (I may be imagining that.)
Just below your comment down there is first_signaled getting assigned!
wait->first_signaled = first;
/* set return value 0 to indicate success */
ret = 0;
+err_free_fence_array:
for (i = 0; i < wait->count_handles; i++)
dma_fence_put(array[i]);
kfree(array);
return ret;
+}
+int +drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
+{
struct drm_syncobj_wait *args = data;
uint32_t *handles;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
return -ENODEV;
if (args->flags != 0 && args->flags != DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)
return -EINVAL;
if (args->count_handles == 0)
return 0;
Hmm, returning success without updating any of the status fields. -EINVAL? -ENOTHINGTODO.
Done, -EINVAL is its.
Otherwise I've pretty much done what Jason asked, gathered all the fences before waiting, and then waited,
Will resend new patch shortly.
Dave.
From: Dave Airlie airlied@redhat.com
This interface allows importing the fence from a sync_file into an existing drm sync object, or exporting the fence attached to an existing drm sync object into a new sync file object.
This should only be used to interact with sync files where necessary.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/drm_syncobj.c | 64 +++++++++++++++++++++++++++++++++++++++++-- include/uapi/drm/drm.h | 2 ++ 2 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 8b87594..54d751e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -50,6 +50,7 @@ #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> +#include <linux/sync_file.h>
#include "drm_internal.h" #include <drm/drm_syncobj.h> @@ -276,6 +277,48 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, return 0; }
+int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, + int fd, int handle) +{ + struct dma_fence *fence = sync_file_get_fence(fd); + if (!fence) + return -EINVAL; + + return drm_syncobj_replace_fence(file_private, handle, fence); +} + +int drm_syncobj_export_sync_file(struct drm_file *file_private, + int handle, int *p_fd) +{ + int ret; + struct dma_fence *fence; + struct sync_file *sync_file; + int fd = get_unused_fd_flags(O_CLOEXEC); + + if (fd < 0) + return fd; + + ret = drm_syncobj_fence_get(file_private, handle, &fence); + if (ret) + goto err_put_fd; + + sync_file = sync_file_create(fence); + if (!sync_file) { + ret = -EINVAL; + goto err_fence_put; + } + + fd_install(fd, sync_file->file); + + dma_fence_put(fence); + *p_fd = fd; + return 0; +err_fence_put: + dma_fence_put(fence); +err_put_fd: + put_unused_fd(fd); + return ret; +} /** * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time * @dev: drm_device which is being opened by userspace @@ -358,9 +401,17 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -ENODEV;
- if (args->pad || args->flags) + if (args->pad) + return -EINVAL; + + if (args->flags != 0 && + args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_FENCE_SYNC_FILE) return -EINVAL;
+ if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_FENCE_SYNC_FILE) + return drm_syncobj_export_sync_file(file_private, args->handle, + &args->fd); + return drm_syncobj_handle_to_fd(file_private, args->handle, &args->fd); } @@ -374,9 +425,18 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -ENODEV;
- if (args->pad || args->flags) + if (args->pad) return -EINVAL;
+ if (args->flags != 0 && + args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE_FENCE) + return -EINVAL; + + if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE_FENCE) + return drm_syncobj_import_sync_file_fence(file_private, + args->fd, + args->handle); + return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle); } diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index d6e2f62..94c75be 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -708,6 +708,8 @@ struct drm_syncobj_destroy { __u32 pad; };
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE_FENCE (1 << 0) +#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_FENCE_SYNC_FILE (1 << 0) struct drm_syncobj_handle { __u32 handle; __u32 flags;
Yes, I believe this is the proper way for sync_file and syncobj to interact. Again, I can't really review for all of the kernel details (though the seem ok to me) so this mostly applies to the API:
Reviewed-by: Jason Ekstrand jason@jlekstrand.net
On Wed, May 24, 2017 at 12:06 AM, Dave Airlie airlied@gmail.com wrote:
From: Dave Airlie airlied@redhat.com
This interface allows importing the fence from a sync_file into an existing drm sync object, or exporting the fence attached to an existing drm sync object into a new sync file object.
This should only be used to interact with sync files where necessary.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_syncobj.c | 64 ++++++++++++++++++++++++++++++ +++++++++++-- include/uapi/drm/drm.h | 2 ++ 2 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 8b87594..54d751e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -50,6 +50,7 @@ #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> +#include <linux/sync_file.h>
#include "drm_internal.h" #include <drm/drm_syncobj.h> @@ -276,6 +277,48 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, return 0; }
+int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
int fd, int handle)
+{
struct dma_fence *fence = sync_file_get_fence(fd);
if (!fence)
return -EINVAL;
return drm_syncobj_replace_fence(file_private, handle, fence);
+}
+int drm_syncobj_export_sync_file(struct drm_file *file_private,
int handle, int *p_fd)
+{
int ret;
struct dma_fence *fence;
struct sync_file *sync_file;
int fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
return fd;
ret = drm_syncobj_fence_get(file_private, handle, &fence);
if (ret)
goto err_put_fd;
sync_file = sync_file_create(fence);
if (!sync_file) {
ret = -EINVAL;
goto err_fence_put;
}
fd_install(fd, sync_file->file);
dma_fence_put(fence);
*p_fd = fd;
return 0;
+err_fence_put:
dma_fence_put(fence);
+err_put_fd:
put_unused_fd(fd);
return ret;
+} /**
- drm_syncobj_open - initalizes syncobj file-private structures at
devnode open time
- @dev: drm_device which is being opened by userspace
@@ -358,9 +401,17 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -ENODEV;
if (args->pad || args->flags)
if (args->pad)
return -EINVAL;
if (args->flags != 0 &&
args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_
FLAGS_EXPORT_FENCE_SYNC_FILE) return -EINVAL;
if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_
FLAGS_EXPORT_FENCE_SYNC_FILE)
return drm_syncobj_export_sync_file(file_private,
args->handle,
&args->fd);
return drm_syncobj_handle_to_fd(file_private, args->handle, &args->fd);
} @@ -374,9 +425,18 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -ENODEV;
if (args->pad || args->flags)
if (args->pad) return -EINVAL;
if (args->flags != 0 &&
args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_
FLAGS_IMPORT_SYNC_FILE_FENCE)
return -EINVAL;
if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_
FLAGS_IMPORT_SYNC_FILE_FENCE)
return drm_syncobj_import_sync_file_fence(file_private,
args->fd,
args->handle);
return drm_syncobj_fd_to_handle(file_private, args->fd, &args->handle);
} diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index d6e2f62..94c75be 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -708,6 +708,8 @@ struct drm_syncobj_destroy { __u32 pad; };
+#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE_FENCE (1 << 0) +#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_FENCE_SYNC_FILE (1 << 0) struct drm_syncobj_handle { __u32 handle; __u32 flags; -- 2.9.4
dri-devel mailing list dri-devel@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/dri-devel
On Wed, May 24, 2017 at 05:06:13PM +1000, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
This interface allows importing the fence from a sync_file into an existing drm sync object, or exporting the fence attached to an existing drm sync object into a new sync file object.
This should only be used to interact with sync files where necessary.
Reviewed-by: Sean Paul seanpaul@chromium.org Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/drm_syncobj.c | 64 +++++++++++++++++++++++++++++++++++++++++-- include/uapi/drm/drm.h | 2 ++ 2 files changed, 64 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 8b87594..54d751e 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -50,6 +50,7 @@ #include <linux/file.h> #include <linux/fs.h> #include <linux/anon_inodes.h> +#include <linux/sync_file.h>
#include "drm_internal.h" #include <drm/drm_syncobj.h> @@ -276,6 +277,48 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, return 0; }
+int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
int fd, int handle)
+{
- struct dma_fence *fence = sync_file_get_fence(fd);
- if (!fence)
return -EINVAL;
- return drm_syncobj_replace_fence(file_private, handle, fence);
Didn't replace_fence() acquire its own reference to fence, and so we need to drop our reference afterwards?
+}
+int drm_syncobj_export_sync_file(struct drm_file *file_private,
int handle, int *p_fd)
+{
- int ret;
- struct dma_fence *fence;
- struct sync_file *sync_file;
- int fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0)
return fd;
- ret = drm_syncobj_fence_get(file_private, handle, &fence);
- if (ret)
goto err_put_fd;
- sync_file = sync_file_create(fence);
Could move dma_fence_put() here and so drop the err_fence_put.
- if (!sync_file) {
ret = -EINVAL;
goto err_fence_put;
- }
- fd_install(fd, sync_file->file);
- dma_fence_put(fence);
- *p_fd = fd;
- return 0;
+err_fence_put:
- dma_fence_put(fence);
+err_put_fd:
- put_unused_fd(fd);
- return ret;
+} /**
- drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
- @dev: drm_device which is being opened by userspace
@@ -358,9 +401,17 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ)) return -ENODEV;
- if (args->pad || args->flags)
- if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
return -EINVAL;args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_FENCE_SYNC_FILE)
DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_AS_SYNC_FILE ?
I'm sure if the addition of FENCE makes it any clearer in the import/export names. The conversion of import is syncobj -> sync_file and vice versa. -Chris
From: Dave Airlie airlied@redhat.com
This just splits out the fence depenency checking into it's own function to make it easier to add semaphore dependencies.
Reviewed-by: Christian König christian.koenig@amd.com Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 85 +++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 38 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 4e6b950..30225d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -995,56 +995,65 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return 0; }
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev, - struct amdgpu_cs_parser *p) +static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) { struct amdgpu_fpriv *fpriv = p->filp->driver_priv; - int i, j, r; - - for (i = 0; i < p->nchunks; ++i) { - struct drm_amdgpu_cs_chunk_dep *deps; - struct amdgpu_cs_chunk *chunk; - unsigned num_deps; + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_dep *deps;
- chunk = &p->chunks[i]; + deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_dep);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) - continue; + for (i = 0; i < num_deps; ++i) { + struct amdgpu_ring *ring; + struct amdgpu_ctx *ctx; + struct dma_fence *fence;
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; - num_deps = chunk->length_dw * 4 / - sizeof(struct drm_amdgpu_cs_chunk_dep); + r = amdgpu_cs_get_ring(p->adev, deps[i].ip_type, + deps[i].ip_instance, + deps[i].ring, &ring); + if (r) + return r;
- for (j = 0; j < num_deps; ++j) { - struct amdgpu_ring *ring; - struct amdgpu_ctx *ctx; - struct dma_fence *fence; + ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id); + if (ctx == NULL) + return -EINVAL;
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type, - deps[j].ip_instance, - deps[j].ring, &ring); + fence = amdgpu_ctx_get_fence(ctx, ring, + deps[i].handle); + if (IS_ERR(fence)) { + r = PTR_ERR(fence); + amdgpu_ctx_put(ctx); + return r; + } else if (fence) { + r = amdgpu_sync_fence(p->adev, &p->job->sync, + fence); + dma_fence_put(fence); + amdgpu_ctx_put(ctx); if (r) return r; + } + } + return 0; +}
- ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); - if (ctx == NULL) - return -EINVAL; +static int amdgpu_cs_dependencies(struct amdgpu_device *adev, + struct amdgpu_cs_parser *p) +{ + int i, r;
- fence = amdgpu_ctx_get_fence(ctx, ring, - deps[j].handle); - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); - return r; + for (i = 0; i < p->nchunks; ++i) { + struct amdgpu_cs_chunk *chunk;
- } else if (fence) { - r = amdgpu_sync_fence(adev, &p->job->sync, - fence); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } + chunk = &p->chunks[i]; + + if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) { + r = amdgpu_cs_process_fence_dep(p, chunk); + if (r) + return r; } }
From: Dave Airlie airlied@redhat.com
This creates a new command submission chunk for amdgpu to add in and out sync objects around the submission.
Sync objects are managed via the drm syncobj ioctls.
The command submission interface is enhanced with two new chunks, one for syncobj pre submission dependencies, and one for post submission sync obj signalling, and just takes a list of handles for each.
This is based on work originally done by David Zhou at AMD, with input from Christian Konig on what things should look like.
In theory VkFences could be backed with sync objects and just get passed into the cs as syncobj handles as well.
NOTE: this interface addition needs a version bump to expose it to userspace.
v1.1: keep file reference on import. v2: move to using syncobjs v2.1: change some APIs to just use p pointer. v3: make more robust against CS failures, we now add the wait sems but only remove them once the CS job has been submitted. v4: rewrite names of API and base on new syncobj code. v5: move post deps earlier, rename some apis
Signed-off-by: Dave Airlie airlied@redhat.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 87 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- include/uapi/drm/amdgpu_drm.h | 6 +++ 3 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 30225d7..3cbd547 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -27,6 +27,7 @@ #include <linux/pagemap.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_syncobj.h> #include "amdgpu.h" #include "amdgpu_trace.h"
@@ -226,6 +227,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break;
case AMDGPU_CHUNK_ID_DEPENDENCIES: + case AMDGPU_CHUNK_ID_SYNCOBJ_IN: + case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: break;
default: @@ -1040,6 +1043,40 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return 0; }
+static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, + uint32_t handle) +{ + int r; + struct dma_fence *fence; + r = drm_syncobj_fence_get(p->filp, handle, &fence); + if (r) + return r; + + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence); + dma_fence_put(fence); + + return r; +} + +static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_sem *deps; + + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_sem); + + for (i = 0; i < num_deps; ++i) { + r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); + if (r) + return r; + } + return 0; +} + static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) { @@ -1054,12 +1091,54 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r; + } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { + r = amdgpu_cs_process_syncobj_in_dep(p, chunk); + if (r) + return r; } }
return 0; }
+static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, + struct amdgpu_cs_chunk *chunk) +{ + unsigned num_deps; + int i, r; + struct drm_amdgpu_cs_chunk_sem *deps; + + deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_sem); + + for (i = 0; i < num_deps; ++i) { + r = drm_syncobj_replace_fence(p->filp, deps[i].handle, + p->fence); + if (r) + return r; + } + return 0; +} + +static int amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) +{ + int i, r; + + for (i = 0; i < p->nchunks; ++i) { + struct amdgpu_cs_chunk *chunk; + + chunk = &p->chunks[i]; + + if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { + r = amdgpu_cs_process_syncobj_out_dep(p, chunk); + if (r) + return r; + } + } + return 0; +} + static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) { @@ -1080,6 +1159,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->fence_ctx = entity->fence_context; p->fence = dma_fence_get(&job->base.s_fence->finished); + + r = amdgpu_cs_post_dependencies(p); + if (r) { + amdgpu_job_free(job); + return r; + } + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job); @@ -1087,7 +1173,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); - return 0; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e..5329e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -719,7 +719,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms, diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6c249e5..cc702ca 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -416,6 +416,8 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 +#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 +#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -483,6 +485,10 @@ struct drm_amdgpu_cs_chunk_fence { __u32 offset; };
+struct drm_amdgpu_cs_chunk_sem { + __u32 handle; +}; + struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data;
On 2017年05月24日 15:06, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
This creates a new command submission chunk for amdgpu to add in and out sync objects around the submission.
Sync objects are managed via the drm syncobj ioctls.
The command submission interface is enhanced with two new chunks, one for syncobj pre submission dependencies, and one for post submission sync obj signalling, and just takes a list of handles for each.
This is based on work originally done by David Zhou at AMD, with input from Christian Konig on what things should look like.
In theory VkFences could be backed with sync objects and just get passed into the cs as syncobj handles as well.
NOTE: this interface addition needs a version bump to expose it to userspace.
v1.1: keep file reference on import. v2: move to using syncobjs v2.1: change some APIs to just use p pointer. v3: make more robust against CS failures, we now add the wait sems but only remove them once the CS job has been submitted. v4: rewrite names of API and base on new syncobj code. v5: move post deps earlier, rename some apis
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 87 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- include/uapi/drm/amdgpu_drm.h | 6 +++ 3 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 30225d7..3cbd547 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -27,6 +27,7 @@ #include <linux/pagemap.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_syncobj.h> #include "amdgpu.h" #include "amdgpu_trace.h"
@@ -226,6 +227,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: break;
default:
@@ -1040,6 +1043,40 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return 0; }
+static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
uint32_t handle)
+{
- int r;
- struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
- if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
We just fixed a sched fence issue by adding new job->dep_sync member, so here should use &p->job->dep_sync instead of &p->job->sync.
Regards, David Zhou
- dma_fence_put(fence);
- return r;
+}
+static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
+{
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
if (r)
return r;
- }
- return 0;
+}
- static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) {
@@ -1054,12 +1091,54 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
} }
return 0; }
+static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
+{
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
r = drm_syncobj_replace_fence(p->filp, deps[i].handle,
p->fence);
if (r)
return r;
- }
- return 0;
+}
+static int amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) +{
- int i, r;
- for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
chunk = &p->chunks[i];
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
}
- }
- return 0;
+}
- static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) {
@@ -1080,6 +1159,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->fence_ctx = entity->fence_context; p->fence = dma_fence_get(&job->base.s_fence->finished);
- r = amdgpu_cs_post_dependencies(p);
- if (r) {
amdgpu_job_free(job);
return r;
- }
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job);
@@ -1087,7 +1173,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base);
- return 0; }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e..5329e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -719,7 +719,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
.load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms,DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6c249e5..cc702ca 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -416,6 +416,8 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 +#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 +#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -483,6 +485,10 @@ struct drm_amdgpu_cs_chunk_fence { __u32 offset; };
+struct drm_amdgpu_cs_chunk_sem {
- __u32 handle;
+};
- struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data;
Am 24.05.2017 um 09:25 schrieb zhoucm1:
On 2017年05月24日 15:06, Dave Airlie wrote:
From: Dave Airlie airlied@redhat.com
This creates a new command submission chunk for amdgpu to add in and out sync objects around the submission.
Sync objects are managed via the drm syncobj ioctls.
The command submission interface is enhanced with two new chunks, one for syncobj pre submission dependencies, and one for post submission sync obj signalling, and just takes a list of handles for each.
This is based on work originally done by David Zhou at AMD, with input from Christian Konig on what things should look like.
In theory VkFences could be backed with sync objects and just get passed into the cs as syncobj handles as well.
NOTE: this interface addition needs a version bump to expose it to userspace.
v1.1: keep file reference on import. v2: move to using syncobjs v2.1: change some APIs to just use p pointer. v3: make more robust against CS failures, we now add the wait sems but only remove them once the CS job has been submitted. v4: rewrite names of API and base on new syncobj code. v5: move post deps earlier, rename some apis
Signed-off-by: Dave Airlie airlied@redhat.com
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 87 ++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- include/uapi/drm/amdgpu_drm.h | 6 +++ 3 files changed, 93 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 30225d7..3cbd547 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -27,6 +27,7 @@ #include <linux/pagemap.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_syncobj.h> #include "amdgpu.h" #include "amdgpu_trace.h" @@ -226,6 +227,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) break; case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: break; default:
@@ -1040,6 +1043,40 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return 0; } +static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
uint32_t handle)
+{
- int r;
- struct dma_fence *fence;
- r = drm_syncobj_fence_get(p->filp, handle, &fence);
- if (r)
return r;
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence);
We just fixed a sched fence issue by adding new job->dep_sync member, so here should use &p->job->dep_sync instead of &p->job->sync.
To be precise job->sync is now for the implicit fences from the BOs (e.g. buffers moves and such) and job->dep_sync is for all explicit fences from dependencies/semaphores.
The handling is now slightly different to fix a bug with two consecutive jobs scheduled to the same ring.
But apart from that the patch now looks good to me, so with that fixed it is Reviewed-by: Christian König christian.koenig@amd.com.
Regards, Christian.
Regards, David Zhou
- dma_fence_put(fence);
- return r;
+}
+static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
+{
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
if (r)
return r;
- }
- return 0;
+}
- static int amdgpu_cs_dependencies(struct amdgpu_device *adev, struct amdgpu_cs_parser *p) {
@@ -1054,12 +1091,54 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, r = amdgpu_cs_process_fence_dep(p, chunk); if (r) return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
} +static int amdgpu_cs_process_syncobj_out_dep(structreturn r; } } return 0;
amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
+{
- unsigned num_deps;
- int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
- deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- for (i = 0; i < num_deps; ++i) {
r = drm_syncobj_replace_fence(p->filp, deps[i].handle,
p->fence);
if (r)
return r;
- }
- return 0;
+}
+static int amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) +{
- int i, r;
- for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
chunk = &p->chunks[i];
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
}
- }
- return 0;
+}
- static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs) {
@@ -1080,6 +1159,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->fence_ctx = entity->fence_context; p->fence = dma_fence_get(&job->base.s_fence->finished);
- r = amdgpu_cs_post_dependencies(p);
- if (r) {
amdgpu_job_free(job);
return r;
- }
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; amdgpu_job_free_resources(job);
@@ -1087,7 +1173,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base);
} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.creturn 0;
b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f2d705e..5329e7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -719,7 +719,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, .postclose = amdgpu_driver_postclose_kms,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 6c249e5..cc702ca 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -416,6 +416,8 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 +#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 +#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 struct drm_amdgpu_cs_chunk { __u32 chunk_id; @@ -483,6 +485,10 @@ struct drm_amdgpu_cs_chunk_fence { __u32 offset; }; +struct drm_amdgpu_cs_chunk_sem {
- __u32 handle;
+};
- struct drm_amdgpu_cs_chunk_data { union { struct drm_amdgpu_cs_chunk_ib ib_data;
amd-gfx mailing list amd-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/amd-gfx
dri-devel@lists.freedesktop.org