Hi,
The idea is to create a GEM bo in one process and pass the prime handle of the it to another process, which in turn uses the handle only to map and write. This could be useful for Chrome OS architecture, where the Web content ("unpriviledged process") maps and CPU-draws a buffer, which was previously allocated in the GPU process ("priviledged process").
In v2, I've added a patch that Daniel kindly drafted to allow the unpriviledged process flush through a prime fd. In v3, I've fixed a few concerns and then added end_cpu_access to i915.
To validate all this I'm using igt, and sending the tests for review now. Please take a look.
Tiago
Daniel Thompson (1): drm: prime: Honour O_RDWR during prime-handle-to-fd
Daniel Vetter (1): dma-buf: Add ioctls to allow userspace to flush
Tiago Vignatti (2): drm/i915: Implement end_cpu_access drm/i915: Use CPU mapping for userspace dma-buf mmap()
drivers/dma-buf/dma-buf.c | 47 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_prime.c | 10 +++----- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 28 +++++++++++++++++++- include/uapi/drm/drm.h | 1 + include/uapi/linux/dma-buf.h | 39 ++++++++++++++++++++++++++++ 5 files changed, 117 insertions(+), 8 deletions(-) create mode 100644 include/uapi/linux/dma-buf.h
From: Daniel Thompson daniel.thompson@linaro.org
Currently DRM_IOCTL_PRIME_HANDLE_TO_FD rejects all flags except (DRM|O)_CLOEXEC making it difficult (maybe impossible) for userspace to mmap() the resulting dma-buf even when this is supported by the DRM driver.
It is trivial to relax the restriction and permit read/write access. This is safe because the flags are seldom touched by drm; mostly they are passed verbatim to dma_buf calls.
v3 (Tiago): removed unused flags variable from drm_prime_handle_to_fd_ioctl.
Signed-off-by: Daniel Thompson daniel.thompson@linaro.org Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- drivers/gpu/drm/drm_prime.c | 10 +++------- include/uapi/drm/drm.h | 1 + 2 files changed, 4 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 27aa718..df6cdc7 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -329,7 +329,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { * drm_gem_prime_export - helper library implementation of the export callback * @dev: drm_device to export from * @obj: GEM object to export - * @flags: flags like DRM_CLOEXEC + * @flags: flags like DRM_CLOEXEC and DRM_RDWR * * This is the implementation of the gem_prime_export functions for GEM drivers * using the PRIME helpers. @@ -628,7 +628,6 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_prime_handle *args = data; - uint32_t flags;
if (!drm_core_check_feature(dev, DRIVER_PRIME)) return -EINVAL; @@ -637,14 +636,11 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, return -ENOSYS;
/* check flags are valid */ - if (args->flags & ~DRM_CLOEXEC) + if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR)) return -EINVAL;
- /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */ - flags = args->flags & DRM_CLOEXEC; - return dev->driver->prime_handle_to_fd(dev, file_priv, - args->handle, flags, &args->fd); + args->handle, args->flags, &args->fd); }
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 3801584..ad8223e 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -668,6 +668,7 @@ struct drm_set_client_cap { __u64 value; };
+#define DRM_RDWR O_RDWR #define DRM_CLOEXEC O_CLOEXEC struct drm_prime_handle { __u32 handle;
From: Daniel Vetter daniel.vetter@ffwll.ch
FIXME: Update kerneldoc for begin/end to make it clear that those are for mmap too.
Open: Do we need a special indication that the begin/end is from userspace mmap and not from kernel mmap?
There's also the question already about kernel internal users - vmap and kmap interfaces are much different ... We might need to add a mapping enum to the begin/end dma-buf functions.
v2 (Tiago): Fix header file type names (u64 -> __u64)
Signed-off-by: Daniel Vetter daniel.vetter@intel.com Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- drivers/dma-buf/dma-buf.c | 47 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/dma-buf.h | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 include/uapi/linux/dma-buf.h
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 155c146..4820d61 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -34,6 +34,8 @@ #include <linux/poll.h> #include <linux/reservation.h>
+#include <uapi/linux/dma-buf.h> + static inline int is_dma_buf_file(struct file *);
struct dma_buf_list { @@ -251,11 +253,56 @@ out: return events; }
+static long dma_buf_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct dma_buf *dmabuf; + struct dma_buf_sync sync; + enum dma_data_direction direction; + + dmabuf = file->private_data; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + switch (cmd) { + case DMA_BUF_IOCTL_SYNC: + if (copy_from_user(&sync, (void __user *) arg, sizeof(sync))) + return -EFAULT; + + if (sync.flags & DMA_BUF_SYNC_RW) + direction = DMA_BIDIRECTIONAL; + else if (sync.flags & DMA_BUF_SYNC_READ) + direction = DMA_FROM_DEVICE; + else if (sync.flags & DMA_BUF_SYNC_WRITE) + direction = DMA_TO_DEVICE; + else + return -EINVAL; + + if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK) + return -EINVAL; + + /* FIXME: Check for overflows in start/length. */ + + if (sync.flags & DMA_BUF_SYNC_END) + dma_buf_end_cpu_access(dmabuf, sync.start, + sync.length, direction); + else + dma_buf_begin_cpu_access(dmabuf, sync.start, + sync.length, direction); + + return 0; + default: + return -ENOTTY; + } +} + static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll, + .unlocked_ioctl = dma_buf_ioctl, };
/* diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h new file mode 100644 index 0000000..e5327df --- /dev/null +++ b/include/uapi/linux/dma-buf.h @@ -0,0 +1,39 @@ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2015 Intel Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see http://www.gnu.org/licenses/. + */ + +#ifndef _DMA_BUF_UAPI_H_ +#define _DMA_BUF_UAPI_H_ + +struct dma_buf_sync { + __u64 flags; + __u64 start; + __u64 length; +}; + +#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (3 << 0) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \ + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOWR(DMA_BUF_BASE, 0, struct dma_buf_sync) + +#endif
Hi Tiago,
Thanks for the patch!
On 12 August 2015 at 02:29, Tiago Vignatti tiago.vignatti@intel.com wrote:
From: Daniel Vetter daniel.vetter@ffwll.ch
FIXME: Update kerneldoc for begin/end to make it clear that those are for mmap too.
I think if we're going to add this patch upstream, atleast the FIXMEs should be fixed.
Open: Do we need a special indication that the begin/end is from userspace mmap and not from kernel mmap?
There's also the question already about kernel internal users - vmap and kmap interfaces are much different ... We might need to add a mapping enum to the begin/end dma-buf functions.
Also, this looks like another thing that could be added right now while we're adding this change.
v2 (Tiago): Fix header file type names (u64 -> __u64)
Signed-off-by: Daniel Vetter daniel.vetter@intel.com Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com
drivers/dma-buf/dma-buf.c | 47 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/dma-buf.h | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 include/uapi/linux/dma-buf.h
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 155c146..4820d61 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -34,6 +34,8 @@ #include <linux/poll.h> #include <linux/reservation.h>
+#include <uapi/linux/dma-buf.h>
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list { @@ -251,11 +253,56 @@ out: return events; }
+static long dma_buf_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
+{
struct dma_buf *dmabuf;
struct dma_buf_sync sync;
enum dma_data_direction direction;
dmabuf = file->private_data;
if (!is_dma_buf_file(file))
return -EINVAL;
switch (cmd) {
case DMA_BUF_IOCTL_SYNC:
if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
return -EFAULT;
if (sync.flags & DMA_BUF_SYNC_RW)
direction = DMA_BIDIRECTIONAL;
else if (sync.flags & DMA_BUF_SYNC_READ)
direction = DMA_FROM_DEVICE;
else if (sync.flags & DMA_BUF_SYNC_WRITE)
direction = DMA_TO_DEVICE;
else
return -EINVAL;
if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
return -EINVAL;
/* FIXME: Check for overflows in start/length. */
Even this for fixing in this patch itself.
if (sync.flags & DMA_BUF_SYNC_END)
dma_buf_end_cpu_access(dmabuf, sync.start,
sync.length, direction);
else
dma_buf_begin_cpu_access(dmabuf, sync.start,
sync.length, direction);
return 0;
default:
return -ENOTTY;
}
+}
static const struct file_operations dma_buf_fops = { .release = dma_buf_release, .mmap = dma_buf_mmap_internal, .llseek = dma_buf_llseek, .poll = dma_buf_poll,
.unlocked_ioctl = dma_buf_ioctl,
};
/* diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h new file mode 100644 index 0000000..e5327df --- /dev/null +++ b/include/uapi/linux/dma-buf.h @@ -0,0 +1,39 @@ +/*
- Framework for buffer objects that can be shared across devices/subsystems.
- Copyright(C) 2015 Intel Ltd
- This program is free software; you can redistribute it and/or modify it
- under the terms of the GNU General Public License version 2 as published by
- the Free Software Foundation.
- This program is distributed in the hope that it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
- You should have received a copy of the GNU General Public License along with
- this program. If not, see http://www.gnu.org/licenses/.
- */
+#ifndef _DMA_BUF_UAPI_H_ +#define _DMA_BUF_UAPI_H_
+struct dma_buf_sync {
__u64 flags;
__u64 start;
__u64 length;
+};
+#define DMA_BUF_SYNC_READ (1 << 0) +#define DMA_BUF_SYNC_WRITE (2 << 0) +#define DMA_BUF_SYNC_RW (3 << 0) +#define DMA_BUF_SYNC_START (0 << 2) +#define DMA_BUF_SYNC_END (1 << 2) +#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)
+#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOWR(DMA_BUF_BASE, 0, struct dma_buf_sync)
+#endif
2.1.0
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index e9c2bfd..8447ba4 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -212,6 +212,15 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size return ret; }
+static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) +{ + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); + + if (i915_gem_object_set_to_gtt_domain(obj, write)) + DRM_ERROR("failed to set bo into the GTT\n"); +} + static const struct dma_buf_ops i915_dmabuf_ops = { .map_dma_buf = i915_gem_map_dma_buf, .unmap_dma_buf = i915_gem_unmap_dma_buf, @@ -224,6 +233,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = { .vmap = i915_gem_dmabuf_vmap, .vunmap = i915_gem_dmabuf_vunmap, .begin_cpu_access = i915_gem_begin_cpu_access, + .end_cpu_access = i915_gem_end_cpu_access, };
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
Userspace is the one in charge of flush CPU by wrapping mmap with begin{,end}_cpu_access.
v2: Remove LLC check cause we have dma-buf sync providers now. Also, fix return before transferring ownership when mmap fails. v3: Fix return values.
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 8447ba4..8b87c86 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -193,7 +193,23 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) { - return -EINVAL; + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); + int ret; + + if (obj->base.size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (WARN_ON(!obj->base.filp)) + return -ENODEV; + + ret = obj->base.filp->f_op->mmap(obj->base.filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->base.filp); + + return 0; }
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
On Tue, Aug 11, 2015 at 05:59:23PM -0300, Tiago Vignatti wrote:
Userspace is the one in charge of flush CPU by wrapping mmap with begin{,end}_cpu_access.
v2: Remove LLC check cause we have dma-buf sync providers now. Also, fix return before transferring ownership when mmap fails. v3: Fix return values.
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 8447ba4..8b87c86 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -193,7 +193,23 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) {
- return -EINVAL;
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- int ret;
- if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (WARN_ON(!obj->base.filp))
return -ENODEV;
That's user triggerable, mmap(dma_buf_export(userptr)), so remove the WARN. -Chris
On Tue, Aug 11, 2015 at 11:20:46PM +0100, Chris Wilson wrote:
On Tue, Aug 11, 2015 at 05:59:23PM -0300, Tiago Vignatti wrote:
Userspace is the one in charge of flush CPU by wrapping mmap with begin{,end}_cpu_access.
v2: Remove LLC check cause we have dma-buf sync providers now. Also, fix return before transferring ownership when mmap fails. v3: Fix return values.
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 8447ba4..8b87c86 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -193,7 +193,23 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) {
- return -EINVAL;
- struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
- int ret;
- if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (WARN_ON(!obj->base.filp))
return -ENODEV;
That's user triggerable, mmap(dma_buf_export(userptr)), so remove the
Sounds like we need another testcase here ;-) -Daniel
From: Rob Bradford rob@linux.intel.com
This test has the following subtests: - test_correct for correctness of the data - test_map_unmap checks for mapping idempotency - test_reprime checks for dma-buf creation idempotency - test_forked checks for multiprocess access - test_refcounting checks for buffer reference counting - test_dup chats that dup()ing the fd works - test_errors checks the error return values for failures - test_aperture_limit tests multiple buffer creation at the gtt aperture limit
Signed-off-by: Rob Bradford rob@linux.intel.com Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- tests/Makefile.sources | 1 + tests/prime_mmap.c | 383 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 384 insertions(+) create mode 100644 tests/prime_mmap.c
diff --git a/tests/Makefile.sources b/tests/Makefile.sources index c94714b..5b2072e 100644 --- a/tests/Makefile.sources +++ b/tests/Makefile.sources @@ -90,6 +90,7 @@ TESTS_progs_M = \ pm_rps \ pm_rc6_residency \ pm_sseu \ + prime_mmap \ prime_self_import \ template \ $(NULL) diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c new file mode 100644 index 0000000..4dc2055 --- /dev/null +++ b/tests/prime_mmap.c @@ -0,0 +1,383 @@ +/* + * Copyright © 2014 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Rob Bradford <rob at linux.intel.com> + * + */ + +/* + * Testcase: Check whether mmap()ing dma-buf works + */ +#define _GNU_SOURCE +#include <unistd.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <fcntl.h> +#include <inttypes.h> +#include <errno.h> +#include <sys/stat.h> +#include <sys/ioctl.h> +#include <pthread.h> + +#include "drm.h" +#include "i915_drm.h" +#include "drmtest.h" +#include "igt_debugfs.h" +#include "ioctl_wrappers.h" + +#define BO_SIZE (16*1024) + +static int fd; + +char pattern[] = {0xff, 0x00, 0x00, 0x00, + 0x00, 0xff, 0x00, 0x00, + 0x00, 0x00, 0xff, 0x00, + 0x00, 0x00, 0x00, 0xff}; + +static void +fill_bo(uint32_t handle, size_t size) +{ + off_t i; + for (i = 0; i < size; i+=sizeof(pattern)) + { + gem_write(fd, handle, i, pattern, sizeof(pattern)); + } +} + +static int +pattern_check(char *ptr, size_t size) +{ + off_t i; + for (i = 0; i < size; i+=sizeof(pattern)) + { + if (memcmp(ptr, pattern, sizeof(pattern)) != 0) + return 1; + } + + return 0; +} + +static void +test_correct(void) +{ + int dma_buf_fd; + char *ptr1, *ptr2; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + /* Check correctness vs GEM_MMAP_GTT */ + ptr1 = gem_mmap(fd, handle, BO_SIZE, PROT_READ | PROT_WRITE); + ptr2 = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr1 != MAP_FAILED); + igt_assert(ptr2 != MAP_FAILED); + igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0); + + /* Check pattern correctness */ + igt_assert(pattern_check(ptr2, BO_SIZE) == 0); + + munmap(ptr1, BO_SIZE); + munmap(ptr2, BO_SIZE); + close(dma_buf_fd); + gem_close(fd, handle); +} + +static void +test_map_unmap(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + + /* Unmap and remap */ + munmap(ptr, BO_SIZE); + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + gem_close(fd, handle); +} + +/* prime and then unprime and then prime again the same handle */ +static void +test_reprime(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + + close (dma_buf_fd); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + munmap(ptr, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + gem_close(fd, handle); +} + +/* map from another process */ +static void +test_forked(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + igt_fork(childno, 1) { + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + } + close(dma_buf_fd); + igt_waitchildren(); + gem_close(fd, handle); +} + +static void +test_refcounting(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + /* Close gem object before mapping */ + gem_close(fd, handle); + + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + munmap(ptr, BO_SIZE); + close (dma_buf_fd); +} + +/* dup before mmap */ +static void +test_dup(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + + dma_buf_fd = dup(prime_handle_to_fd(fd, handle)); + igt_assert(errno == 0); + + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + igt_assert(pattern_check(ptr, BO_SIZE) == 0); + munmap(ptr, BO_SIZE); + gem_close(fd, handle); + close (dma_buf_fd); +} + + +/* Used for error case testing to avoid wrapper */ +static int prime_handle_to_fd_no_assert(uint32_t handle, int *fd_out) +{ + struct drm_prime_handle args; + int ret; + + args.handle = handle; + args.flags = DRM_CLOEXEC; + args.fd = -1; + + ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); + + *fd_out = args.fd; + + return ret; +} + +static void +test_errors(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + /* Close gem object before priming */ + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + gem_close(fd, handle); + prime_handle_to_fd_no_assert(handle, &dma_buf_fd); + igt_assert(dma_buf_fd == -1 && errno == ENOENT); + errno = 0; + + /* close fd before mapping */ + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + close(dma_buf_fd); + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr == MAP_FAILED && errno == EBADF); + errno = 0; + gem_close(fd, handle); + + /* Map too big */ + handle = gem_create(fd, BO_SIZE); + fill_bo(handle, BO_SIZE); + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + ptr = mmap(NULL, BO_SIZE * 2, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr == MAP_FAILED && errno == EINVAL); + errno = 0; + close(dma_buf_fd); + gem_close(fd, handle); +} + +static void +test_aperture_limit(void) +{ + int dma_buf_fd1, dma_buf_fd2; + char *ptr1, *ptr2; + uint32_t handle1, handle2; + /* Two buffers the sum of which > mappable aperture */ + uint64_t size1 = (gem_mappable_aperture_size() * 7) / 8; + uint64_t size2 = (gem_mappable_aperture_size() * 3) / 8; + + handle1 = gem_create(fd, size1); + fill_bo(handle1, BO_SIZE); + + dma_buf_fd1 = prime_handle_to_fd(fd, handle1); + igt_assert(errno == 0); + ptr1 = mmap(NULL, size1, PROT_READ, MAP_SHARED, dma_buf_fd1, 0); + igt_assert(ptr1 != MAP_FAILED); + igt_assert(pattern_check(ptr1, BO_SIZE) == 0); + + handle2 = gem_create(fd, size1); + fill_bo(handle2, BO_SIZE); + dma_buf_fd2 = prime_handle_to_fd(fd, handle2); + igt_assert(errno == 0); + ptr2 = mmap(NULL, size2, PROT_READ, MAP_SHARED, dma_buf_fd2, 0); + igt_assert(ptr2 != MAP_FAILED); + igt_assert(pattern_check(ptr2, BO_SIZE) == 0); + + igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0); + + munmap(ptr1, size1); + munmap(ptr2, size2); + close(dma_buf_fd1); + close(dma_buf_fd2); + gem_close(fd, handle1); + gem_close(fd, handle2); +} + +static int +check_for_dma_buf_mmap(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + int ret = 1; + + handle = gem_create(fd, BO_SIZE); + dma_buf_fd = prime_handle_to_fd(fd, handle); + ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); + if (ptr != MAP_FAILED) + ret = 0; + munmap(ptr, BO_SIZE); + gem_close(fd, handle); + close (dma_buf_fd); + return ret; +} + +igt_main +{ + struct { + const char *name; + void (*fn)(void); + } tests[] = { + { "test_correct", test_correct }, + { "test_map_unmap", test_map_unmap }, + { "test_reprime", test_reprime }, + { "test_forked", test_forked }, + { "test_refcounting", test_refcounting }, + { "test_dup", test_dup }, + { "test_errors", test_errors }, + { "test_aperture_limit", test_aperture_limit }, + }; + int i; + + igt_fixture { + fd = drm_open_any(); + errno = 0; + } + + igt_skip_on((check_for_dma_buf_mmap() != 0)); + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + igt_subtest(tests[i].name) + tests[i].fn(); + } + + igt_fixture + close(fd); +}
- Remove pattern_check(), which was walking through a useless iterator - Remove superfluous PROT_WRITE from gem_mmap, in test_correct() - Add binary file to .gitignore
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- tests/.gitignore | 1 + tests/prime_mmap.c | 37 ++++++++++++------------------------- 2 files changed, 13 insertions(+), 25 deletions(-)
diff --git a/tests/.gitignore b/tests/.gitignore index 0af0899..5bc4a58 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -163,6 +163,7 @@ pm_sseu prime_nv_api prime_nv_pcopy prime_nv_test +prime_mmap prime_self_import prime_udl template diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c index 4dc2055..dc59e8f 100644 --- a/tests/prime_mmap.c +++ b/tests/prime_mmap.c @@ -65,19 +65,6 @@ fill_bo(uint32_t handle, size_t size) } }
-static int -pattern_check(char *ptr, size_t size) -{ - off_t i; - for (i = 0; i < size; i+=sizeof(pattern)) - { - if (memcmp(ptr, pattern, sizeof(pattern)) != 0) - return 1; - } - - return 0; -} - static void test_correct(void) { @@ -92,14 +79,14 @@ test_correct(void) igt_assert(errno == 0);
/* Check correctness vs GEM_MMAP_GTT */ - ptr1 = gem_mmap(fd, handle, BO_SIZE, PROT_READ | PROT_WRITE); + ptr1 = gem_mmap(fd, handle, BO_SIZE, PROT_READ); ptr2 = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr1 != MAP_FAILED); igt_assert(ptr2 != MAP_FAILED); igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0);
/* Check pattern correctness */ - igt_assert(pattern_check(ptr2, BO_SIZE) == 0); + igt_assert(memcmp(ptr2, pattern, sizeof(pattern)) == 0);
munmap(ptr1, BO_SIZE); munmap(ptr2, BO_SIZE); @@ -122,13 +109,13 @@ test_map_unmap(void)
ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
/* Unmap and remap */ munmap(ptr, BO_SIZE); ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
munmap(ptr, BO_SIZE); close(dma_buf_fd); @@ -151,16 +138,16 @@ test_reprime(void)
ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
close (dma_buf_fd); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); munmap(ptr, BO_SIZE);
dma_buf_fd = prime_handle_to_fd(fd, handle); ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0);
munmap(ptr, BO_SIZE); close(dma_buf_fd); @@ -184,7 +171,7 @@ test_forked(void) igt_fork(childno, 1) { ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); munmap(ptr, BO_SIZE); close(dma_buf_fd); } @@ -210,7 +197,7 @@ test_refcounting(void)
ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); munmap(ptr, BO_SIZE); close (dma_buf_fd); } @@ -231,7 +218,7 @@ test_dup(void)
ptr = mmap(NULL, BO_SIZE, PROT_READ, MAP_SHARED, dma_buf_fd, 0); igt_assert(ptr != MAP_FAILED); - igt_assert(pattern_check(ptr, BO_SIZE) == 0); + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); munmap(ptr, BO_SIZE); gem_close(fd, handle); close (dma_buf_fd); @@ -310,7 +297,7 @@ test_aperture_limit(void) igt_assert(errno == 0); ptr1 = mmap(NULL, size1, PROT_READ, MAP_SHARED, dma_buf_fd1, 0); igt_assert(ptr1 != MAP_FAILED); - igt_assert(pattern_check(ptr1, BO_SIZE) == 0); + igt_assert(memcmp(ptr1, pattern, sizeof(pattern)) == 0);
handle2 = gem_create(fd, size1); fill_bo(handle2, BO_SIZE); @@ -318,7 +305,7 @@ test_aperture_limit(void) igt_assert(errno == 0); ptr2 = mmap(NULL, size2, PROT_READ, MAP_SHARED, dma_buf_fd2, 0); igt_assert(ptr2 != MAP_FAILED); - igt_assert(pattern_check(ptr2, BO_SIZE) == 0); + igt_assert(memcmp(ptr2, pattern, sizeof(pattern)) == 0);
igt_assert(memcmp(ptr1, ptr2, BO_SIZE) == 0);
This patch adds test_correct_cpu_write, which maps the texture buffer through a prime fd and then writes directly to it using the CPU. It stresses the driver to guarantee cache synchronization among the different domains.
This test also adds test_forked_cpu_write, which creates the GEM bo in one process and pass the prime handle of the it to another process, which in turn uses the handle only to map and write. Grossly speaking this test simulates Chrome OS architecture, where the Web content ("unpriviledged process") maps and CPU-draws a buffer, which was previously allocated in the GPU process ("priviledged process").
This requires kernel modifications (Daniel Thompson's "drm: prime: Honour O_RDWR during prime-handle-to-fd").
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- lib/ioctl_wrappers.c | 5 +++- tests/prime_mmap.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-)
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c index 53bd635..941fa66 100644 --- a/lib/ioctl_wrappers.c +++ b/lib/ioctl_wrappers.c @@ -1125,6 +1125,9 @@ void gem_require_ring(int fd, int ring_id)
/* prime */
+#ifndef DRM_RDWR +#define DRM_RDWR O_RDWR +#endif /** * prime_handle_to_fd: * @fd: open i915 drm file descriptor @@ -1142,7 +1145,7 @@ int prime_handle_to_fd(int fd, uint32_t handle)
memset(&args, 0, sizeof(args)); args.handle = handle; - args.flags = DRM_CLOEXEC; + args.flags = DRM_CLOEXEC | DRM_RDWR; args.fd = -1;
do_ioctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args); diff --git a/tests/prime_mmap.c b/tests/prime_mmap.c index dc59e8f..ad91371 100644 --- a/tests/prime_mmap.c +++ b/tests/prime_mmap.c @@ -22,6 +22,7 @@ * * Authors: * Rob Bradford <rob at linux.intel.com> + * Tiago Vignatti <tiago.vignatti at intel.com> * */
@@ -66,6 +67,12 @@ fill_bo(uint32_t handle, size_t size) }
static void +fill_bo_cpu(char *ptr) +{ + memcpy(ptr, pattern, sizeof(pattern)); +} + +static void test_correct(void) { int dma_buf_fd; @@ -180,6 +187,62 @@ test_forked(void) gem_close(fd, handle); }
+/* test CPU write. This has a rather big implication for the driver which must + * guarantee cache synchronization when writing the bo using CPU. */ +static void +test_correct_cpu_write(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + /* Check correctness of map using write protection (PROT_WRITE) */ + ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + + /* Fill bo using CPU */ + fill_bo_cpu(ptr); + + /* Check pattern correctness */ + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); + + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + gem_close(fd, handle); +} + +/* map from another process and then write using CPU */ +static void +test_forked_cpu_write(void) +{ + int dma_buf_fd; + char *ptr; + uint32_t handle; + + handle = gem_create(fd, BO_SIZE); + + dma_buf_fd = prime_handle_to_fd(fd, handle); + igt_assert(errno == 0); + + igt_fork(childno, 1) { + ptr = mmap(NULL, BO_SIZE, PROT_READ | PROT_WRITE , MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + fill_bo_cpu(ptr); + + igt_assert(memcmp(ptr, pattern, sizeof(pattern)) == 0); + munmap(ptr, BO_SIZE); + close(dma_buf_fd); + } + close(dma_buf_fd); + igt_waitchildren(); + gem_close(fd, handle); +} + static void test_refcounting(void) { @@ -346,6 +409,8 @@ igt_main { "test_map_unmap", test_map_unmap }, { "test_reprime", test_reprime }, { "test_forked", test_forked }, + { "test_correct_cpu_write", test_correct_cpu_write }, + { "test_forked_cpu_write", test_forked_cpu_write }, { "test_refcounting", test_refcounting }, { "test_dup", test_dup }, { "test_errors", test_errors },
This program can be used to detect when the writes don't land in scanout due cache incoherency. Although this seems a problem inherently of non-LCC machines ("Atom"), this particular test catches a cache dirt on scanout on LLC machines as well. It's inspired in Ville's kms_pwrite_crc.c and can be used also to test the correctness of the driver's begin_cpu_access (TODO end_cpu_access).
To see the need for flush, one has to run this same binary a few times cause it's not 100% reproducible (in my Core machine it's always possible to catch the problem by running it at most 5 times).
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- tests/.gitignore | 1 + tests/Makefile.sources | 1 + tests/kms_mmap_write_crc.c | 236 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 238 insertions(+) create mode 100644 tests/kms_mmap_write_crc.c
diff --git a/tests/.gitignore b/tests/.gitignore index 5bc4a58..9ba1e48 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -140,6 +140,7 @@ kms_force_connector kms_frontbuffer_tracking kms_legacy_colorkey kms_mmio_vs_cs_flip +kms_mmap_write_crc kms_pipe_b_c_ivb kms_pipe_crc_basic kms_plane diff --git a/tests/Makefile.sources b/tests/Makefile.sources index 5b2072e..31c5508 100644 --- a/tests/Makefile.sources +++ b/tests/Makefile.sources @@ -163,6 +163,7 @@ TESTS_progs = \ kms_3d \ kms_fence_pin_leak \ kms_force_connector \ + kms_mmap_write_crc \ kms_pwrite_crc \ kms_sink_crc_basic \ prime_udl \ diff --git a/tests/kms_mmap_write_crc.c b/tests/kms_mmap_write_crc.c new file mode 100644 index 0000000..e24d535 --- /dev/null +++ b/tests/kms_mmap_write_crc.c @@ -0,0 +1,236 @@ +/* + * Copyright © 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Tiago Vignatti <tiago.vignatti at intel.com> + */ + +#include <errno.h> +#include <limits.h> +#include <stdbool.h> +#include <stdio.h> +#include <string.h> + +#include "drmtest.h" +#include "igt_debugfs.h" +#include "igt_kms.h" +#include "intel_chipset.h" +#include "ioctl_wrappers.h" +#include "igt_aux.h" + +IGT_TEST_DESCRIPTION( + "Use the display CRC support to validate mmap write to an already uncached future scanout buffer."); + +typedef struct { + int drm_fd; + igt_display_t display; + struct igt_fb fb[2]; + igt_output_t *output; + igt_plane_t *primary; + enum pipe pipe; + igt_crc_t ref_crc; + igt_pipe_crc_t *pipe_crc; + uint32_t devid; +} data_t; + +int dma_buf_fd; + +static char *dmabuf_mmap_framebuffer(int drm_fd, struct igt_fb *fb) +{ + char *ptr = NULL; + + dma_buf_fd = prime_handle_to_fd(drm_fd, fb->gem_handle); + igt_assert(errno == 0); + + ptr = mmap(NULL, fb->size, PROT_READ | PROT_WRITE, MAP_SHARED, dma_buf_fd, 0); + igt_assert(ptr != MAP_FAILED); + + return ptr; +} + +static void test_begin_access(data_t *data) +{ + igt_display_t *display = &data->display; + igt_output_t *output = data->output; + struct igt_fb *fb = &data->fb[1]; + drmModeModeInfo *mode; + cairo_t *cr; + char *ptr; + uint32_t caching; + void *buf; + igt_crc_t crc; + + mode = igt_output_get_mode(output); + + /* create a non-white fb where we can write later */ + igt_create_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, + DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, fb); + + ptr = dmabuf_mmap_framebuffer(data->drm_fd, fb); + + cr = igt_get_cairo_ctx(data->drm_fd, fb); + igt_paint_test_pattern(cr, fb->width, fb->height); + cairo_destroy(cr); + + /* flip to it to make it UC/WC and fully flushed */ + igt_plane_set_fb(data->primary, fb); + igt_display_commit(display); + + /* flip back the original white buffer */ + igt_plane_set_fb(data->primary, &data->fb[0]); + igt_display_commit(display); + + /* make sure caching mode has become UC/WT */ + caching = gem_get_caching(data->drm_fd, fb->gem_handle); + igt_assert(caching == I915_CACHING_NONE || caching == I915_CACHING_DISPLAY); + + // Uncomment the following for flush and the crc check next passes. It + // requires the kernel counter-part of it implemented obviously. + // { + // struct dma_buf_sync sync_start; + // memset(&sync_start, 0, sizeof(sync_start)); + // sync_start.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW; + // do_ioctl(dma_buf_fd, DMA_BUF_IOCTL_SYNC, &sync_start); + // } + + /* use dmabuf pointer to make the other fb all white too */ + buf = malloc(fb->size); + igt_assert(buf != NULL); + memset(buf, 0xff, fb->size); + memcpy(ptr, buf, fb->size); + free(buf); + + /* and flip to it */ + igt_plane_set_fb(data->primary, fb); + igt_display_commit(display); + + /* check that the crc is as expected, which requires that caches got flushed */ + igt_pipe_crc_collect_crc(data->pipe_crc, &crc); + igt_assert_crc_equal(&crc, &data->ref_crc); +} + +static bool prepare_crtc(data_t *data) +{ + igt_display_t *display = &data->display; + igt_output_t *output = data->output; + drmModeModeInfo *mode; + + /* select the pipe we want to use */ + igt_output_set_pipe(output, data->pipe); + igt_display_commit(display); + + if (!output->valid) { + igt_output_set_pipe(output, PIPE_ANY); + igt_display_commit(display); + return false; + } + + mode = igt_output_get_mode(output); + + /* create a white reference fb and flip to it */ + igt_create_color_fb(data->drm_fd, mode->hdisplay, mode->vdisplay, + DRM_FORMAT_XRGB8888, LOCAL_DRM_FORMAT_MOD_NONE, + 1.0, 1.0, 1.0, &data->fb[0]); + + data->primary = igt_output_get_plane(output, IGT_PLANE_PRIMARY); + + igt_plane_set_fb(data->primary, &data->fb[0]); + igt_display_commit(display); + + if (data->pipe_crc) + igt_pipe_crc_free(data->pipe_crc); + + data->pipe_crc = igt_pipe_crc_new(data->pipe, + INTEL_PIPE_CRC_SOURCE_AUTO); + + /* get reference crc for the white fb */ + igt_pipe_crc_collect_crc(data->pipe_crc, &data->ref_crc); + + return true; +} + +static void cleanup_crtc(data_t *data) +{ + igt_display_t *display = &data->display; + igt_output_t *output = data->output; + + igt_pipe_crc_free(data->pipe_crc); + data->pipe_crc = NULL; + + igt_plane_set_fb(data->primary, NULL); + + igt_output_set_pipe(output, PIPE_ANY); + igt_display_commit(display); + + igt_remove_fb(data->drm_fd, &data->fb[0]); + igt_remove_fb(data->drm_fd, &data->fb[1]); +} + +static void run_test(data_t *data) +{ + igt_display_t *display = &data->display; + igt_output_t *output; + enum pipe pipe; + + for_each_connected_output(display, output) { + data->output = output; + for_each_pipe(display, pipe) { + data->pipe = pipe; + + if (!prepare_crtc(data)) + continue; + + test_begin_access(data); + cleanup_crtc(data); + + /* once is enough */ + return; + } + } + + igt_skip("no valid crtc/connector combinations found\n"); +} + +static data_t data; + +igt_simple_main +{ + igt_skip_on_simulation(); + + igt_fixture { + data.drm_fd = drm_open_any_master(); + + data.devid = intel_get_drm_devid(data.drm_fd); + + kmstest_set_vt_graphics_mode(); + + igt_require_pipe_crc(); + + igt_display_init(&data.display, data.drm_fd); + } + + run_test(&data); + + igt_fixture { + igt_display_fini(&data.display); + } +}
It requires i915 changes to add end_cpu_access().
Signed-off-by: Tiago Vignatti tiago.vignatti@intel.com --- tests/kms_mmap_write_crc.c | 63 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 55 insertions(+), 8 deletions(-)
diff --git a/tests/kms_mmap_write_crc.c b/tests/kms_mmap_write_crc.c index e24d535..59ac9e7 100644 --- a/tests/kms_mmap_write_crc.c +++ b/tests/kms_mmap_write_crc.c @@ -67,6 +67,24 @@ static char *dmabuf_mmap_framebuffer(int drm_fd, struct igt_fb *fb) return ptr; }
+static void dmabuf_sync_start(void) +{ + struct dma_buf_sync sync_start; + + memset(&sync_start, 0, sizeof(sync_start)); + sync_start.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW; + do_ioctl(dma_buf_fd, DMA_BUF_IOCTL_SYNC, &sync_start); +} + +static void dmabuf_sync_end(void) +{ + struct dma_buf_sync sync_end; + + memset(&sync_end, 0, sizeof(sync_end)); + sync_end.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_RW; + do_ioctl(dma_buf_fd, DMA_BUF_IOCTL_SYNC, &sync_end); +} + static void test_begin_access(data_t *data) { igt_display_t *display = &data->display; @@ -103,14 +121,11 @@ static void test_begin_access(data_t *data) caching = gem_get_caching(data->drm_fd, fb->gem_handle); igt_assert(caching == I915_CACHING_NONE || caching == I915_CACHING_DISPLAY);
- // Uncomment the following for flush and the crc check next passes. It - // requires the kernel counter-part of it implemented obviously. - // { - // struct dma_buf_sync sync_start; - // memset(&sync_start, 0, sizeof(sync_start)); - // sync_start.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_RW; - // do_ioctl(dma_buf_fd, DMA_BUF_IOCTL_SYNC, &sync_start); - // } + /* + * firstly demonstrate the need for DMA_BUF_SYNC_START ("begin_cpu_access") + */ + + dmabuf_sync_start();
/* use dmabuf pointer to make the other fb all white too */ buf = malloc(fb->size); @@ -126,6 +141,38 @@ static void test_begin_access(data_t *data) /* check that the crc is as expected, which requires that caches got flushed */ igt_pipe_crc_collect_crc(data->pipe_crc, &crc); igt_assert_crc_equal(&crc, &data->ref_crc); + + /* + * now demonstrates the need for DMA_BUF_SYNC_END ("end_cpu_access") + */ + + /* start over, writing non-white to the fb again and flip to it to make it + * fully flushed */ + cr = igt_get_cairo_ctx(data->drm_fd, fb); + igt_paint_test_pattern(cr, fb->width, fb->height); + cairo_destroy(cr); + + igt_plane_set_fb(data->primary, fb); + igt_display_commit(display); + + /* sync start, to move to CPU domain */ + dmabuf_sync_start(); + + /* use dmabuf pointer in the same fb to make it all white */ + buf = malloc(fb->size); + igt_assert(buf != NULL); + memset(buf, 0xff, fb->size); + memcpy(ptr, buf, fb->size); + free(buf); + + /* there's an implicit flush in set_fb() as well (to set to the GTT domain), + * so if we don't do it and instead write directly into the fb as it is the + * scanout, that should demonstrate the need for end_cpu_access */ + dmabuf_sync_end(); + + /* check that the crc is as expected, which requires that caches got flushed */ + igt_pipe_crc_collect_crc(data->pipe_crc, &crc); + igt_assert_crc_equal(&crc, &data->ref_crc); }
static bool prepare_crtc(data_t *data)
dri-devel@lists.freedesktop.org