This patchset implement CRC debugfs API and add the necessary infrastructure required to enable to compute and add CRCs entries.
1. add functions to map/unmap buffers to kernel address space. 2. map/unmap buffers in the prepare/cleanup_fb hooks. 3. compute crc using crc32 on the visible portion of the buffer with appropriate synchronization methods.
Haneen Mohammed (5): drm/vkms: Add functions to map/unmap GEM backing storage drm/vkms: map/unmap buffers in [prepare/cleanup]_fb hooks drm/vkms: Add atomic_helper_check_plane_state drm/vkms: subclass CRTC state drm/vkms: Implement CRC debugfs API
drivers/gpu/drm/vkms/Makefile | 2 +- drivers/gpu/drm/vkms/vkms_crc.c | 66 ++++++++++++++++++ drivers/gpu/drm/vkms/vkms_crtc.c | 109 +++++++++++++++++++++++++++--- drivers/gpu/drm/vkms/vkms_drv.c | 1 + drivers/gpu/drm/vkms/vkms_drv.h | 41 +++++++++++ drivers/gpu/drm/vkms/vkms_gem.c | 80 +++++++++++++++++++++- drivers/gpu/drm/vkms/vkms_plane.c | 74 ++++++++++++++++++++ 7 files changed, 360 insertions(+), 13 deletions(-) create mode 100644 drivers/gpu/drm/vkms/vkms_crc.c
This patch add the necessary functions to map/unmap GEM backing memory to the kernel's virtual address space.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com --- Changes in v2: - add vkms_gem_vunmap - use vmap_count to guard against multiple prepare_fb calls on the same fb Changes in v3: - change vkms_gem_vmap to retrun int - cleanup if vmap failed - increment vmap_count after successful vmap
drivers/gpu/drm/vkms/vkms_drv.h | 9 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 80 ++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 07be29f2dc44..47048f707566 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -39,6 +39,8 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages; + unsigned int vmap_count; + void *vaddr; };
#define drm_crtc_to_vkms_output(target) \ @@ -47,6 +49,9 @@ struct vkms_gem_object { #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm)
+#define drm_gem_to_vkms_gem(target)\ + container_of(target, struct vkms_gem_object, gem) + /* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
+int vkms_gem_vmap(struct drm_gem_object *obj); + +void vkms_gem_vunmap(struct drm_gem_object *obj); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index c7e38368602b..2e55c906d9b2 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj) struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, gem);
- kvfree(gem->pages); + WARN_ON(gem->pages); + WARN_ON(gem->vaddr); + mutex_destroy(&gem->pages_lock); drm_gem_object_release(obj); kfree(gem); @@ -177,3 +179,79 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
return ret; } + +static struct page **_get_pages(struct vkms_gem_object *vkms_obj) +{ + struct drm_gem_object *gem_obj = &vkms_obj->gem; + + if (!vkms_obj->pages) { + struct page **pages = drm_gem_get_pages(gem_obj); + + if (IS_ERR(pages)) + return pages; + + if (cmpxchg(&vkms_obj->pages, NULL, pages)) + drm_gem_put_pages(gem_obj, pages, false, true); + } + + return vkms_obj->pages; +} + +void vkms_gem_vunmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + + mutex_lock(&vkms_obj->pages_lock); + if (vkms_obj->vmap_count < 1) { + WARN_ON(vkms_obj->vaddr); + WARN_ON(vkms_obj->pages); + mutex_unlock(&vkms_obj->pages_lock); + return; + } + + vkms_obj->vmap_count--; + + if (vkms_obj->vmap_count == 0) { + vunmap(vkms_obj->vaddr); + vkms_obj->vaddr = NULL; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + } + + mutex_unlock(&vkms_obj->pages_lock); +} + +int vkms_gem_vmap(struct drm_gem_object *obj) +{ + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj); + int ret = 0; + + mutex_lock(&vkms_obj->pages_lock); + + if (!vkms_obj->vaddr) { + unsigned int n_pages = obj->size >> PAGE_SHIFT; + struct page **pages = _get_pages(vkms_obj); + + if (IS_ERR(pages)) { + ret = PTR_ERR(pages); + goto fail; + } + + vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL); + if (!vkms_obj->vaddr) { + ret = -ENOMEM; + drm_gem_put_pages(obj, vkms_obj->pages, false, true); + vkms_obj->pages = NULL; + goto fail; + } + + vkms_obj->vmap_count++; + } + + mutex_unlock(&vkms_obj->pages_lock); + return 0; + +fail: + mutex_unlock(&vkms_obj->pages_lock); + return ret; +}
On Thu, Jul 19, 2018 at 03:18:03AM +0300, Haneen Mohammed wrote:
This patch add the necessary functions to map/unmap GEM backing memory to the kernel's virtual address space.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com
Changes in v2:
- add vkms_gem_vunmap
- use vmap_count to guard against multiple prepare_fb calls on the same fb
Changes in v3:
- change vkms_gem_vmap to retrun int
- cleanup if vmap failed
- increment vmap_count after successful vmap
drivers/gpu/drm/vkms/vkms_drv.h | 9 ++++ drivers/gpu/drm/vkms/vkms_gem.c | 80 ++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 07be29f2dc44..47048f707566 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -39,6 +39,8 @@ struct vkms_gem_object { struct drm_gem_object gem; struct mutex pages_lock; /* Page lock used in page fault handler */ struct page **pages;
- unsigned int vmap_count;
- void *vaddr;
};
#define drm_crtc_to_vkms_output(target) \ @@ -47,6 +49,9 @@ struct vkms_gem_object { #define drm_device_to_vkms_device(target) \ container_of(target, struct vkms_device, drm)
+#define drm_gem_to_vkms_gem(target)\
- container_of(target, struct vkms_gem_object, gem)
/* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor); @@ -75,4 +80,8 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
+int vkms_gem_vmap(struct drm_gem_object *obj);
+void vkms_gem_vunmap(struct drm_gem_object *obj);
#endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index c7e38368602b..2e55c906d9b2 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -37,7 +37,9 @@ void vkms_gem_free_object(struct drm_gem_object *obj) struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object, gem);
- kvfree(gem->pages);
- WARN_ON(gem->pages);
- WARN_ON(gem->vaddr);
- mutex_destroy(&gem->pages_lock); drm_gem_object_release(obj); kfree(gem);
@@ -177,3 +179,79 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
return ret; }
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj) +{
- struct drm_gem_object *gem_obj = &vkms_obj->gem;
- if (!vkms_obj->pages) {
struct page **pages = drm_gem_get_pages(gem_obj);
if (IS_ERR(pages))
return pages;
if (cmpxchg(&vkms_obj->pages, NULL, pages))
drm_gem_put_pages(gem_obj, pages, false, true);
- }
- return vkms_obj->pages;
+}
+void vkms_gem_vunmap(struct drm_gem_object *obj) +{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
- mutex_lock(&vkms_obj->pages_lock);
- if (vkms_obj->vmap_count < 1) {
WARN_ON(vkms_obj->vaddr);
WARN_ON(vkms_obj->pages);
mutex_unlock(&vkms_obj->pages_lock);
return;
- }
- vkms_obj->vmap_count--;
- if (vkms_obj->vmap_count == 0) {
vunmap(vkms_obj->vaddr);
vkms_obj->vaddr = NULL;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
- }
- mutex_unlock(&vkms_obj->pages_lock);
+}
+int vkms_gem_vmap(struct drm_gem_object *obj) +{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
- int ret = 0;
- mutex_lock(&vkms_obj->pages_lock);
- if (!vkms_obj->vaddr) {
unsigned int n_pages = obj->size >> PAGE_SHIFT;
struct page **pages = _get_pages(vkms_obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
}
vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
if (!vkms_obj->vaddr) {
ret = -ENOMEM;
drm_gem_put_pages(obj, vkms_obj->pages, false, true);
vkms_obj->pages = NULL;
goto fail;
}
vkms_obj->vmap_count++;
- }
- mutex_unlock(&vkms_obj->pages_lock);
- return 0;
+fail:
Since fail does the same as the success path, there are 2 ways of handling this that would be (IMO) better:
1- Rename fail to out and remove the redundant 2 lines above 2- Add a new label above fail called err_vmap and do the ret/put_pages/pages=NULL cleanup there, dropping into the unlock and return after.
With either of these changes,
Reviewed-by: Sean Paul seanpaul@chromium.org
Sean
- mutex_unlock(&vkms_obj->pages_lock);
- return ret;
+}
2.17.1
This patch map/unmap GEM backing memory to kernel address space in prepare/cleanup_fb respectively and cache the virtual address for later use.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com --- Changes in v2: - use vkms_gem_vunmap
Changes in v3: - return error number instead of vkms_obj->vaddr
drivers/gpu/drm/vkms/vkms_plane.c | 35 +++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+)
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 9f75b1e2c1c4..aa6cde34710c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -9,6 +9,7 @@ #include "vkms_drv.h" #include <drm/drm_plane_helper.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h>
static const struct drm_plane_funcs vkms_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, @@ -24,8 +25,42 @@ static void vkms_primary_plane_update(struct drm_plane *plane, { }
+static int vkms_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_gem_object *gem_obj; + struct vkms_gem_object *vkms_obj; + int ret; + + if (!state->fb) + return 0; + + gem_obj = drm_gem_fb_get_obj(state->fb, 0); + vkms_obj = drm_gem_to_vkms_gem(gem_obj); + ret = vkms_gem_vmap(gem_obj); + + if (ret) + DRM_ERROR("vmap failed\n"); + + return drm_gem_fb_prepare_fb(plane, state); +} + +static void vkms_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_gem_object *gem_obj; + + if (!old_state->fb) + return; + + gem_obj = drm_gem_fb_get_obj(old_state->fb, 0); + vkms_gem_vunmap(gem_obj); +} + static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { .atomic_update = vkms_primary_plane_update, + .prepare_fb = vkms_prepare_fb, + .cleanup_fb = vkms_cleanup_fb, };
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
On Thu, Jul 19, 2018 at 03:18:55AM +0300, Haneen Mohammed wrote:
This patch map/unmap GEM backing memory to kernel address space in prepare/cleanup_fb respectively and cache the virtual address for later use.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com
Changes in v2:
- use vkms_gem_vunmap
Changes in v3:
- return error number instead of vkms_obj->vaddr
drivers/gpu/drm/vkms/vkms_plane.c | 35 +++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+)
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 9f75b1e2c1c4..aa6cde34710c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -9,6 +9,7 @@ #include "vkms_drv.h" #include <drm/drm_plane_helper.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_gem_framebuffer_helper.h>
static const struct drm_plane_funcs vkms_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, @@ -24,8 +25,42 @@ static void vkms_primary_plane_update(struct drm_plane *plane, { }
+static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
+{
- struct drm_gem_object *gem_obj;
- struct vkms_gem_object *vkms_obj;
- int ret;
- if (!state->fb)
return 0;
- gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- ret = vkms_gem_vmap(gem_obj);
nit: extra line
- if (ret)
DRM_ERROR("vmap failed\n");
Can we actually do anything useful if the vmap fails? Asked another way, what's the value in not returning the error here? (We should also print the value of ret).
Sean
- return drm_gem_fb_prepare_fb(plane, state);
+}
+static void vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
+{
- struct drm_gem_object *gem_obj;
- if (!old_state->fb)
return;
- gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
- vkms_gem_vunmap(gem_obj);
+}
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { .atomic_update = vkms_primary_plane_update,
- .prepare_fb = vkms_prepare_fb,
- .cleanup_fb = vkms_cleanup_fb,
};
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
2.17.1
Call atomic_helper_check_plane_state to clip plane coordinates.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com Reviewed-by: Sean Paul seanpaul@chromium.org --- Changes in v2: - check for plane_state->visible since we can't handle a disabled primary plane yet.
drivers/gpu/drm/vkms/vkms_plane.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+)
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index aa6cde34710c..5b7c5d59f103 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -8,6 +8,7 @@
#include "vkms_drv.h" #include <drm/drm_plane_helper.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h>
@@ -25,6 +26,33 @@ static void vkms_primary_plane_update(struct drm_plane *plane, { }
+static int vkms_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc_state *crtc_state; + int ret; + + if (!state->fb | !state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + false, true); + if (ret != 0) + return ret; + + /* for now primary plane must be visible and full screen */ + if (!state->visible) + return -EINVAL; + + return 0; +} + static int vkms_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { @@ -59,6 +87,7 @@ static void vkms_cleanup_fb(struct drm_plane *plane,
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = { .atomic_update = vkms_primary_plane_update, + .atomic_check = vkms_plane_atomic_check, .prepare_fb = vkms_prepare_fb, .cleanup_fb = vkms_cleanup_fb, };
Subclass CRTC state struct to enable storing driver's private state. This patch only adds the base drm_crtc_state struct and the atomic functions that handle it.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com Reviewed-by: Daniel Vetter daniel.vetter@ffwll.ch Reviewed-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/vkms/vkms_crtc.c | 53 ++++++++++++++++++++++++++++++-- drivers/gpu/drm/vkms/vkms_drv.h | 11 +++++++ 2 files changed, 61 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 875fca662ac0..26babb85ca77 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -64,13 +64,60 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, return true; }
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc) +{ + struct vkms_crtc_state *vkms_state = NULL; + + if (crtc->state) { + vkms_state = to_vkms_crtc_state(crtc->state); + __drm_atomic_helper_crtc_destroy_state(crtc->state); + kfree(vkms_state); + crtc->state = NULL; + } + + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); + if (!vkms_state) + return; + + crtc->state = &vkms_state->base; + crtc->state->crtc = crtc; +} + +static struct drm_crtc_state * +vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct vkms_crtc_state *vkms_state; + + if (WARN_ON(!crtc->state)) + return NULL; + + vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL); + if (!vkms_state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base); + + return &vkms_state->base; +} + +static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct vkms_crtc_state *vkms_state; + + vkms_state = to_vkms_crtc_state(state); + + __drm_atomic_helper_crtc_destroy_state(state); + kfree(vkms_state); +} + static const struct drm_crtc_funcs vkms_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = drm_crtc_cleanup, .page_flip = drm_atomic_helper_page_flip, - .reset = drm_atomic_helper_crtc_reset, - .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, + .reset = vkms_atomic_crtc_reset, + .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state, + .atomic_destroy_state = vkms_atomic_crtc_destroy_state, .enable_vblank = vkms_enable_vblank, .disable_vblank = vkms_disable_vblank, }; diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 47048f707566..75e52d61e65d 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -20,6 +20,14 @@ static const u32 vkms_formats[] = { DRM_FORMAT_XRGB8888, };
+/** + * vkms_crtc_state - Driver specific CRTC state + * @base: base CRTC state + */ +struct vkms_crtc_state { + struct drm_crtc_state base; +}; + struct vkms_output { struct drm_crtc crtc; struct drm_encoder encoder; @@ -52,6 +60,9 @@ struct vkms_gem_object { #define drm_gem_to_vkms_gem(target)\ container_of(target, struct vkms_gem_object, gem)
+#define to_vkms_crtc_state(target)\ + container_of(target, struct vkms_crtc_state, base) + /* CRTC */ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor);
Implement the set_crc_source() callback. Compute CRC using crc32 on the visible part of the framebuffer. Use ordered workqueue to compute and add CRC at the end of a vblank.
Use appropriate synchronization methods since the CRC computation must be atomic wrt the generated vblank event for a given atomic update, by using spinlock across atomic_begin and atomic_flush to wrap the event handling code completely and match the flip event with the CRC.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com --- Changes in v3: - flush work item instead of workqueue - include missing vkms_crc.c file
drivers/gpu/drm/vkms/Makefile | 2 +- drivers/gpu/drm/vkms/vkms_crc.c | 66 +++++++++++++++++++++++++++++++ drivers/gpu/drm/vkms/vkms_crtc.c | 58 ++++++++++++++++++++++----- drivers/gpu/drm/vkms/vkms_drv.c | 1 + drivers/gpu/drm/vkms/vkms_drv.h | 21 ++++++++++ drivers/gpu/drm/vkms/vkms_plane.c | 10 +++++ 6 files changed, 148 insertions(+), 10 deletions(-) create mode 100644 drivers/gpu/drm/vkms/vkms_crc.c
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 986297da51bf..37966914f70b 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -1,3 +1,3 @@ -vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o +vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c new file mode 100644 index 000000000000..9e8bb5c7fc80 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_crc.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vkms_drv.h" +#include <linux/crc32.h> +#include <drm/drm_gem_framebuffer_helper.h> + +static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data) +{ + struct drm_framebuffer *fb = &crc_data->fb; + struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); + struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj); + u32 crc = 0; + int i = 0; + unsigned int x = crc_data->src.x1 >> 16; + unsigned int y = crc_data->src.y1 >> 16; + unsigned int height = drm_rect_height(&crc_data->src) >> 16; + unsigned int width = drm_rect_width(&crc_data->src) >> 16; + unsigned int cpp = fb->format->cpp[0]; + unsigned int src_offset; + unsigned int size_byte = width * cpp; + void *vaddr = vkms_obj->vaddr; + + if (WARN_ON(!vaddr)) + return crc; + + for (i = y; i < y + height; i++) { + src_offset = fb->offsets[0] + (i * fb->pitches[0]) + (x * cpp); + crc = crc32_le(crc, vaddr + src_offset, size_byte); + } + + return crc; +} + +void vkms_crc_work_handle(struct work_struct *work) +{ + struct vkms_crtc_state *crtc_state = container_of(work, + struct vkms_crtc_state, + crc_work); + struct drm_crtc *crtc = crtc_state->base.crtc; + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + if (crtc_state && out->crc_enabled) { + u32 crc32 = _vkms_get_crc(&crtc_state->data); + unsigned int n_frame = crtc_state->n_frame; + + drm_crtc_add_crc_entry(crtc, true, n_frame, &crc32); + } +} + +int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name, + size_t *values_cnt) +{ + struct vkms_output *out = drm_crtc_to_vkms_output(crtc); + + if (!src_name) { + out->crc_enabled = false; + } else if (strcmp(src_name, "auto") == 0) { + out->crc_enabled = true; + } else { + out->crc_enabled = false; + return -EINVAL; + } + + *values_cnt = 1; + + return 0; +} diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 26babb85ca77..a7b39627b581 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -10,17 +10,32 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h>
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +static void _vblank_handle(struct vkms_output *output) { - struct vkms_output *output = container_of(timer, struct vkms_output, - vblank_hrtimer); struct drm_crtc *crtc = &output->crtc; - int ret_overrun; + struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state); bool ret;
+ spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret) - DRM_ERROR("vkms failure on handling vblank"); + DRM_ERROR("vkms failure on handling vblank\n"); + + if (state && output->crc_enabled) { + state->n_frame = drm_crtc_accurate_vblank_count(crtc); + queue_work(output->crc_workq, &state->crc_work); + } + + spin_unlock(&output->lock); +} + +static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +{ + struct vkms_output *output = container_of(timer, struct vkms_output, + vblank_hrtimer); + int ret_overrun; + + _vblank_handle(output);
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns); @@ -97,6 +112,8 @@ vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
+ INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle); + return &vkms_state->base; }
@@ -108,7 +125,13 @@ static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc, vkms_state = to_vkms_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(state); - kfree(vkms_state); + + if (vkms_state) { + flush_work(&vkms_state->crc_work); + drm_framebuffer_put(&vkms_state->data.fb); + memset(&vkms_state->data, 0, sizeof(struct vkms_crc_data)); + kfree(vkms_state); + } }
static const struct drm_crtc_funcs vkms_crtc_funcs = { @@ -120,6 +143,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = { .atomic_destroy_state = vkms_atomic_crtc_destroy_state, .enable_vblank = vkms_enable_vblank, .disable_vblank = vkms_disable_vblank, + .set_crc_source = vkms_set_crc_source, };
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, @@ -134,26 +158,37 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); }
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc); + + spin_lock_irq(&vkms_output->lock); +} + static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { - unsigned long flags; + struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
if (crtc->state->event) { - spin_lock_irqsave(&crtc->dev->event_lock, flags); + spin_lock(&crtc->dev->event_lock);
if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + spin_unlock(&crtc->dev->event_lock);
crtc->state->event = NULL; } + + spin_unlock_irq(&vkms_output->lock); }
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { + .atomic_begin = vkms_crtc_atomic_begin, .atomic_flush = vkms_crtc_atomic_flush, .atomic_enable = vkms_crtc_atomic_enable, .atomic_disable = vkms_crtc_atomic_disable, @@ -162,6 +197,7 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) { + struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc); int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, @@ -173,5 +209,9 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+ spin_lock_init(&vkms_out->lock); + + vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0); + return ret; } diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 37aa2ef33b21..5d78bd97e69c 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -46,6 +46,7 @@ static void vkms_release(struct drm_device *dev) platform_device_unregister(vkms->platform); drm_mode_config_cleanup(&vkms->drm); drm_dev_fini(&vkms->drm); + destroy_workqueue(vkms->output.crc_workq); }
static struct drm_driver vkms_driver = { diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 75e52d61e65d..a80281a12010 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -20,12 +20,23 @@ static const u32 vkms_formats[] = { DRM_FORMAT_XRGB8888, };
+struct vkms_crc_data { + struct drm_rect src; + struct drm_framebuffer fb; +}; + /** * vkms_crtc_state - Driver specific CRTC state * @base: base CRTC state + * @crc_work: work struct to compute and add CRC entries + * @data: data required for CRC computation + * @n_frame: frame number for computed CRC */ struct vkms_crtc_state { struct drm_crtc_state base; + struct work_struct crc_work; + struct vkms_crc_data data; + unsigned int n_frame; };
struct vkms_output { @@ -35,6 +46,11 @@ struct vkms_output { struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; + bool crc_enabled; + /* ordered wq for crc_work */ + struct workqueue_struct *crc_workq; + /* protects concurrent access to crc_data */ + spinlock_t lock; };
struct vkms_device { @@ -95,4 +111,9 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* CRC Support */ +int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name, + size_t *values_cnt); +void vkms_crc_work_handle(struct work_struct *work); + #endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 5b7c5d59f103..b316b2af88ee 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -24,6 +24,16 @@ static const struct drm_plane_funcs vkms_plane_funcs = { static void vkms_primary_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) { + struct vkms_crtc_state *state; + + if (!plane->state->crtc || !plane->state->fb) + return; + + state = to_vkms_crtc_state(plane->state->crtc->state); + memcpy(&state->data.src, &plane->state->src, sizeof(struct drm_rect)); + memcpy(&state->data.fb, plane->state->fb, + sizeof(struct drm_framebuffer)); + drm_framebuffer_get(&state->data.fb); }
static int vkms_plane_atomic_check(struct drm_plane *plane,
On Thu, Jul 19, 2018 at 03:22:16AM +0300, Haneen Mohammed wrote:
Implement the set_crc_source() callback. Compute CRC using crc32 on the visible part of the framebuffer. Use ordered workqueue to compute and add CRC at the end of a vblank.
Use appropriate synchronization methods since the CRC computation must be atomic wrt the generated vblank event for a given atomic update, by using spinlock across atomic_begin and atomic_flush to wrap the event handling code completely and match the flip event with the CRC.
Signed-off-by: Haneen Mohammed hamohammed.sa@gmail.com
Changes in v3:
- flush work item instead of workqueue
- include missing vkms_crc.c file
drivers/gpu/drm/vkms/Makefile | 2 +- drivers/gpu/drm/vkms/vkms_crc.c | 66 +++++++++++++++++++++++++++++++ drivers/gpu/drm/vkms/vkms_crtc.c | 58 ++++++++++++++++++++++----- drivers/gpu/drm/vkms/vkms_drv.c | 1 + drivers/gpu/drm/vkms/vkms_drv.h | 21 ++++++++++ drivers/gpu/drm/vkms/vkms_plane.c | 10 +++++ 6 files changed, 148 insertions(+), 10 deletions(-) create mode 100644 drivers/gpu/drm/vkms/vkms_crc.c
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 986297da51bf..37966914f70b 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -1,3 +1,3 @@ -vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o +vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c new file mode 100644 index 000000000000..9e8bb5c7fc80 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_crc.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vkms_drv.h" +#include <linux/crc32.h> +#include <drm/drm_gem_framebuffer_helper.h>
+static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data) +{
- struct drm_framebuffer *fb = &crc_data->fb;
- struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- u32 crc = 0;
- int i = 0;
- unsigned int x = crc_data->src.x1 >> 16;
- unsigned int y = crc_data->src.y1 >> 16;
- unsigned int height = drm_rect_height(&crc_data->src) >> 16;
- unsigned int width = drm_rect_width(&crc_data->src) >> 16;
- unsigned int cpp = fb->format->cpp[0];
- unsigned int src_offset;
- unsigned int size_byte = width * cpp;
- void *vaddr = vkms_obj->vaddr;
You should hold the lock when copying the data from crc_data.
- if (WARN_ON(!vaddr))
return crc;
- for (i = y; i < y + height; i++) {
src_offset = fb->offsets[0] + (i * fb->pitches[0]) + (x * cpp);
crc = crc32_le(crc, vaddr + src_offset, size_byte);
- }
- return crc;
+}
+void vkms_crc_work_handle(struct work_struct *work) +{
- struct vkms_crtc_state *crtc_state = container_of(work,
struct vkms_crtc_state,
crc_work);
- struct drm_crtc *crtc = crtc_state->base.crtc;
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
- if (crtc_state && out->crc_enabled) {
u32 crc32 = _vkms_get_crc(&crtc_state->data);
unsigned int n_frame = crtc_state->n_frame;
drm_crtc_add_crc_entry(crtc, true, n_frame, &crc32);
- }
+}
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
+{
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
You should hold the lock when changing this value.
- if (!src_name) {
out->crc_enabled = false;
- } else if (strcmp(src_name, "auto") == 0) {
out->crc_enabled = true;
- } else {
out->crc_enabled = false;
return -EINVAL;
- }
- *values_cnt = 1;
- return 0;
+} diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 26babb85ca77..a7b39627b581 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -10,17 +10,32 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h>
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +static void _vblank_handle(struct vkms_output *output) {
- struct vkms_output *output = container_of(timer, struct vkms_output,
struct drm_crtc *crtc = &output->crtc;vblank_hrtimer);
- int ret_overrun;
struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state); bool ret;
spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret)
DRM_ERROR("vkms failure on handling vblank");
DRM_ERROR("vkms failure on handling vblank\n");
- if (state && output->crc_enabled) {
state->n_frame = drm_crtc_accurate_vblank_count(crtc);
queue_work(output->crc_workq, &state->crc_work);
- }
- spin_unlock(&output->lock);
+}
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) +{
struct vkms_output *output = container_of(timer, struct vkms_output,
vblank_hrtimer);
int ret_overrun;
_vblank_handle(output);
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns);
@@ -97,6 +112,8 @@ vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
- INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
- return &vkms_state->base;
}
@@ -108,7 +125,13 @@ static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc, vkms_state = to_vkms_crtc_state(state);
__drm_atomic_helper_crtc_destroy_state(state);
- kfree(vkms_state);
- if (vkms_state) {
flush_work(&vkms_state->crc_work);
drm_framebuffer_put(&vkms_state->data.fb);
It'd be nice to leave a breadcrumb explaining where this reference is acquired. ie: /* dropping the reference we acquired in vkms_primary_plane_update() */
memset(&vkms_state->data, 0, sizeof(struct vkms_crc_data));
kfree(vkms_state);
- }
}
static const struct drm_crtc_funcs vkms_crtc_funcs = { @@ -120,6 +143,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = { .atomic_destroy_state = vkms_atomic_crtc_destroy_state, .enable_vblank = vkms_enable_vblank, .disable_vblank = vkms_disable_vblank,
- .set_crc_source = vkms_set_crc_source,
};
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc, @@ -134,26 +158,37 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); }
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
+{
- struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
- spin_lock_irq(&vkms_output->lock);
Can you please add a comment explaining that we're holding this across the commit to block the vblank timer, and why.
+}
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) {
- unsigned long flags;
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
if (crtc->state->event) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
spin_lock(&crtc->dev->event_lock);
Either don't change this (since it's always safe to call _irqsave), or add a comment explaining why you don't need to.
if (drm_crtc_vblank_get(crtc) != 0) drm_crtc_send_vblank_event(crtc, crtc->state->event); else drm_crtc_arm_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
spin_unlock(&crtc->dev->event_lock);
crtc->state->event = NULL; }
spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
- .atomic_begin = vkms_crtc_atomic_begin, .atomic_flush = vkms_crtc_atomic_flush, .atomic_enable = vkms_crtc_atomic_enable, .atomic_disable = vkms_crtc_atomic_disable,
@@ -162,6 +197,7 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = { int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) {
struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc); int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
@@ -173,5 +209,9 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
- spin_lock_init(&vkms_out->lock);
- vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
- return ret;
} diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 37aa2ef33b21..5d78bd97e69c 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -46,6 +46,7 @@ static void vkms_release(struct drm_device *dev) platform_device_unregister(vkms->platform); drm_mode_config_cleanup(&vkms->drm); drm_dev_fini(&vkms->drm);
- destroy_workqueue(vkms->output.crc_workq);
}
static struct drm_driver vkms_driver = { diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 75e52d61e65d..a80281a12010 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -20,12 +20,23 @@ static const u32 vkms_formats[] = { DRM_FORMAT_XRGB8888, };
+struct vkms_crc_data {
- struct drm_rect src;
- struct drm_framebuffer fb;
+};
/**
- vkms_crtc_state - Driver specific CRTC state
- @base: base CRTC state
- @crc_work: work struct to compute and add CRC entries
- @data: data required for CRC computation
*/
- @n_frame: frame number for computed CRC
struct vkms_crtc_state { struct drm_crtc_state base;
- struct work_struct crc_work;
- struct vkms_crc_data data;
- unsigned int n_frame;
};
struct vkms_output { @@ -35,6 +46,11 @@ struct vkms_output { struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event;
- bool crc_enabled;
- /* ordered wq for crc_work */
- struct workqueue_struct *crc_workq;
- /* protects concurrent access to crc_data */
- spinlock_t lock;
};
struct vkms_device { @@ -95,4 +111,9 @@ int vkms_gem_vmap(struct drm_gem_object *obj);
void vkms_gem_vunmap(struct drm_gem_object *obj);
+/* CRC Support */ +int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt);
+void vkms_crc_work_handle(struct work_struct *work);
#endif /* _VKMS_DRV_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 5b7c5d59f103..b316b2af88ee 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -24,6 +24,16 @@ static const struct drm_plane_funcs vkms_plane_funcs = { static void vkms_primary_plane_update(struct drm_plane *plane, struct drm_plane_state *old_state) {
- struct vkms_crtc_state *state;
- if (!plane->state->crtc || !plane->state->fb)
return;
- state = to_vkms_crtc_state(plane->state->crtc->state);
- memcpy(&state->data.src, &plane->state->src, sizeof(struct drm_rect));
- memcpy(&state->data.fb, plane->state->fb,
sizeof(struct drm_framebuffer));
- drm_framebuffer_get(&state->data.fb);
Similarly to above, a comment explaining this is released in state_destroy would be helpful.
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
2.17.1
dri-devel@lists.freedesktop.org