I spoke with Rob on IRC about the set and he preferred the private object be isolated in mdp5. So here's the atomic helper set rebased on Archit's series.
One notable difference is that I've dropped the first patch from that series which added a new private_obj lock to the core. Instead, it's located in mdp5_kms along with the global state (same as the state_lock which is currently there).
Thank you for your feedback thus far!
Sean
Archit Taneja (3): drm/msm/mdp5: Add global state as a private atomic object drm/msm/mdp5: Use the new private_obj state drm/msm: Don't subclass drm_atomic_state anymore
Sean Paul (5): drm/msm: Refactor complete_commit() to look more the helpers drm/msm: Mark the crtc->state->event consumed drm/msm: Issue queued events when disabling crtc drm/msm: Remove msm_commit/worker, use atomic helper commit drm/msm: Switch to atomic_helper_commit()
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 1 + drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 10 + drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 105 ++++++--- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 27 +-- drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c | 12 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c | 20 +- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 17 +- drivers/gpu/drm/msm/msm_atomic.c | 236 +-------------------- drivers/gpu/drm/msm/msm_drv.c | 11 +- drivers/gpu/drm/msm/msm_drv.h | 7 +- drivers/gpu/drm/msm/msm_kms.h | 14 -- 11 files changed, 141 insertions(+), 319 deletions(-)
From: Archit Taneja architt@codeaurora.org
Global shared resources (hwpipes, hwmixers and SMP) for MDP5 are implemented as a part of atomic state by subclassing drm_atomic_state.
The preferred approach is to use the drm_private_obj infrastructure available in the atomic core.
mdp5_global_state is introduced as a drm atomic private object. The two funcs mdp5_get_global_state() and mdp5_get_existing_global_state() are the two variants that will be used to access mdp5_global_state.
This will replace the existing mdp5_state struct (which subclasses drm_atomic_state) and the funcs around it. These will be removed later once we mdp5_global_state is put to use everywhere.
Changes in v3: - Added glob_state_lock instead of pushing it into the core - Added to the msm atomic helper patch set
Signed-off-by: Archit Taneja architt@codeaurora.org Signed-off-by: Rob Clark robdclark@gmail.com Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 87 ++++++++++++++++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 25 +++++++ 2 files changed, 112 insertions(+)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6d8e3a9a6fc0..fcbdef385a8a 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -106,6 +106,86 @@ static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) swap(to_kms_state(state)->state, mdp5_kms->state); }
+/* Global/shared object state funcs */ + +/* + * This is a helper that returns the private state currently in operation. + * Note that this would return the "old_state" if called in the atomic check + * path, and the "new_state" after the atomic swap has been done. + */ +struct mdp5_global_state * +mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms) +{ + return to_mdp5_global_state(mdp5_kms->glob_state.state); +} + +/* + * This acquires the modeset lock set aside for global state, creates + * a new duplicated private object state. + */ +struct mdp5_global_state *mdp5_get_global_state(struct drm_atomic_state *s) +{ + struct msm_drm_private *priv = s->dev->dev_private; + struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); + struct drm_private_state *priv_state; + int ret; + + ret = drm_modeset_lock(&mdp5_kms->glob_state_lock, s->acquire_ctx); + if (ret) + return ERR_PTR(ret); + + priv_state = drm_atomic_get_private_obj_state(s, &mdp5_kms->glob_state); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_mdp5_global_state(priv_state); +} + +static struct drm_private_state * +mdp5_global_duplicate_state(struct drm_private_obj *obj) +{ + struct mdp5_global_state *state; + + state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + return &state->base; +} + +static void mdp5_global_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct mdp5_global_state *mdp5_state = to_mdp5_global_state(state); + + kfree(mdp5_state); +} + +static const struct drm_private_state_funcs mdp5_global_state_funcs = { + .atomic_duplicate_state = mdp5_global_duplicate_state, + .atomic_destroy_state = mdp5_global_destroy_state, +}; + +static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms) +{ + struct mdp5_global_state *state; + + drm_modeset_lock_init(&mdp5_kms->glob_state_lock); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->mdp5_kms = mdp5_kms; + + drm_atomic_private_obj_init(&mdp5_kms->glob_state, + &state->base, + &mdp5_global_state_funcs); + return 0; +} + static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); @@ -727,6 +807,9 @@ static void mdp5_destroy(struct platform_device *pdev) if (mdp5_kms->rpm_enabled) pm_runtime_disable(&pdev->dev);
+ drm_atomic_private_obj_fini(&mdp5_kms->glob_state); + drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); + kfree(mdp5_kms->state); }
@@ -887,6 +970,10 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) goto fail; }
+ ret = mdp5_global_obj_init(mdp5_kms); + if (ret) + goto fail; + mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); if (IS_ERR(mdp5_kms->mmio)) { ret = PTR_ERR(mdp5_kms->mmio); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index 425a03d213e5..76f0ddfca322 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -55,6 +55,13 @@ struct mdp5_kms { struct mdp5_state *state; struct drm_modeset_lock state_lock;
+ /* + * Global private object state, Do not access directly, use + * mdp5_global_get_state() + */ + struct drm_modeset_lock glob_state_lock; + struct drm_private_obj glob_state; + struct mdp5_smp *smp; struct mdp5_ctl_manager *ctlm;
@@ -95,6 +102,24 @@ struct mdp5_state { struct mdp5_state *__must_check mdp5_get_state(struct drm_atomic_state *s);
+/* Global private object state for tracking resources that are shared across + * multiple kms objects (planes/crtcs/etc). + */ +#define to_mdp5_global_state(x) container_of(x, struct mdp5_global_state, base) +struct mdp5_global_state { + struct drm_private_state base; + + struct drm_atomic_state *state; + struct mdp5_kms *mdp5_kms; + + struct mdp5_hw_pipe_state hwpipe; + struct mdp5_hw_mixer_state hwmixer; + struct mdp5_smp_state smp; +}; + +struct mdp5_global_state * mdp5_get_existing_global_state(struct mdp5_kms *mdp5_kms); +struct mdp5_global_state *__must_check mdp5_get_global_state(struct drm_atomic_state *s); + /* Atomic plane state. Subclasses the base drm_plane_state in order to * track assigned hwpipe and hw specific state. */
From: Archit Taneja architt@codeaurora.org
This replaces the usage of the subclassed atomic state (mdp5_state) with a private_obj state embedded within drm_atomic_state. The latter method is the preferred approach, since it's simpler to implement and less prone to errors.
The new API replaces the older and equivalent mdp5_state usage in the following pattern: - References to "mdp5_kms->state" (i.e, the old/existing state) is replaced with mdp5_get_existing_global_state(). In the atomic_check path, this should be called with the glob_state_lock drm_modeset_lock alredy taken. - References to "mdp5_get_state()" are replaced with mdp5_get_global_state(). This acquires glob_state_lock and uses drm_atomic_get_private_obj_state() to create a new duplicated state.
Changes in v3: - Acquire glob_state_lock in mdp5_smp.c - Added to the msm atomic helper patch set
Signed-off-by: Archit Taneja architt@codeaurora.org Signed-off-by: Rob Clark robdclark@gmail.com Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 10 ++++++++-- drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c | 12 ++++++------ drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c | 20 +++++++++++--------- drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c | 17 ++++++++++++----- 4 files changed, 37 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index fcbdef385a8a..6ada098dba0b 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -190,20 +190,26 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct device *dev = &mdp5_kms->pdev->dev; + struct mdp5_global_state *global_state; + + global_state = mdp5_get_existing_global_state(mdp5_kms);
pm_runtime_get_sync(dev);
if (mdp5_kms->smp) - mdp5_smp_prepare_commit(mdp5_kms->smp, &mdp5_kms->state->smp); + mdp5_smp_prepare_commit(mdp5_kms->smp, &global_state->smp); }
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); struct device *dev = &mdp5_kms->pdev->dev; + struct mdp5_global_state *global_state; + + global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp) - mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp); + mdp5_smp_complete_commit(mdp5_kms->smp, &global_state->smp);
pm_runtime_put_sync(dev); } diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c index 8a00991f03c7..113e6b569562 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c @@ -52,14 +52,14 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc, { struct msm_drm_private *priv = s->dev->dev_private; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_global_state *global_state = mdp5_get_global_state(s); struct mdp5_hw_mixer_state *new_state; int i;
- if (IS_ERR(state)) - return PTR_ERR(state); + if (IS_ERR(global_state)) + return PTR_ERR(global_state);
- new_state = &state->hwmixer; + new_state = &global_state->hwmixer;
for (i = 0; i < mdp5_kms->num_hwmixers; i++) { struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i]; @@ -129,8 +129,8 @@ int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer) { - struct mdp5_state *state = mdp5_get_state(s); - struct mdp5_hw_mixer_state *new_state = &state->hwmixer; + struct mdp5_global_state *global_state = mdp5_get_global_state(s); + struct mdp5_hw_mixer_state *new_state = &global_state->hwmixer;
if (!mixer) return; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c index ff52c49095f9..1ef26bc63163 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c @@ -24,17 +24,19 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, { struct msm_drm_private *priv = s->dev->dev_private; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state; + struct mdp5_global_state *new_global_state, *old_global_state; struct mdp5_hw_pipe_state *old_state, *new_state; int i, j;
- state = mdp5_get_state(s); - if (IS_ERR(state)) - return PTR_ERR(state); + new_global_state = mdp5_get_global_state(s); + if (IS_ERR(new_global_state)) + return PTR_ERR(new_global_state);
- /* grab old_state after mdp5_get_state(), since now we hold lock: */ - old_state = &mdp5_kms->state->hwpipe; - new_state = &state->hwpipe; + /* grab old_state after mdp5_get_global_state(), since now we hold lock: */ + old_global_state = mdp5_get_existing_global_state(mdp5_kms); + + old_state = &old_global_state->hwpipe; + new_state = &new_global_state->hwpipe;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) { struct mdp5_hw_pipe *cur = mdp5_kms->hwpipes[i]; @@ -107,7 +109,7 @@ int mdp5_pipe_assign(struct drm_atomic_state *s, struct drm_plane *plane, WARN_ON(r_hwpipe);
DBG("%s: alloc SMP blocks", (*hwpipe)->name); - ret = mdp5_smp_assign(mdp5_kms->smp, &state->smp, + ret = mdp5_smp_assign(mdp5_kms->smp, &new_global_state->smp, (*hwpipe)->pipe, blkcfg); if (ret) return -ENOMEM; @@ -132,7 +134,7 @@ void mdp5_pipe_release(struct drm_atomic_state *s, struct mdp5_hw_pipe *hwpipe) { struct msm_drm_private *priv = s->dev->dev_private; struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct mdp5_state *state = mdp5_get_state(s); + struct mdp5_global_state *state = mdp5_get_global_state(s); struct mdp5_hw_pipe_state *new_state = &state->hwpipe;
if (!hwpipe) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c index ae4983d9d0a5..96c2b828dba4 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c @@ -340,17 +340,20 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) struct mdp5_kms *mdp5_kms = get_kms(smp); struct mdp5_hw_pipe_state *hwpstate; struct mdp5_smp_state *state; + struct mdp5_global_state *global_state; int total = 0, i, j;
drm_printf(p, "name\tinuse\tplane\n"); drm_printf(p, "----\t-----\t-----\n");
if (drm_can_sleep()) - drm_modeset_lock(&mdp5_kms->state_lock, NULL); + drm_modeset_lock(&mdp5_kms->glob_state_lock, NULL); + + global_state = mdp5_get_existing_global_state(mdp5_kms);
/* grab these *after* we hold the state_lock */ - hwpstate = &mdp5_kms->state->hwpipe; - state = &mdp5_kms->state->smp; + hwpstate = &global_state->hwpipe; + state = &global_state->smp;
for (i = 0; i < mdp5_kms->num_hwpipes; i++) { struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; @@ -374,7 +377,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p) bitmap_weight(state->state, smp->blk_cnt));
if (drm_can_sleep()) - drm_modeset_unlock(&mdp5_kms->state_lock); + drm_modeset_unlock(&mdp5_kms->glob_state_lock); }
void mdp5_smp_destroy(struct mdp5_smp *smp) @@ -384,7 +387,8 @@ void mdp5_smp_destroy(struct mdp5_smp *smp)
struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_block *cfg) { - struct mdp5_smp_state *state = &mdp5_kms->state->smp; + struct mdp5_smp_state *state; + struct mdp5_global_state *global_state; struct mdp5_smp *smp = NULL; int ret;
@@ -398,6 +402,9 @@ struct mdp5_smp *mdp5_smp_init(struct mdp5_kms *mdp5_kms, const struct mdp5_smp_ smp->blk_cnt = cfg->mmb_count; smp->blk_size = cfg->mmb_size;
+ global_state = mdp5_get_existing_global_state(mdp5_kms); + state = &global_state->smp; + /* statically tied MMBs cannot be re-allocated: */ bitmap_copy(state->state, cfg->reserved_state, smp->blk_cnt); memcpy(smp->reserved, cfg->reserved, sizeof(smp->reserved));
From: Archit Taneja architt@codeaurora.org
With the addition of "private_objs" in drm_atomic_state, we no longer need to subclass drm_atomic_state to store state of share resources that don't perfectly fit within planes/crtc/connector state information. We can now save this state within drm_atomic_state itself using the private objects.
Remove the infrastructure that allowed subclassing of drm_atomic_state in the driver.
Changes in v3: - Added to the msm atomic helper patch set
Signed-off-by: Archit Taneja architt@codeaurora.org Signed-off-by: Rob Clark robdclark@gmail.com Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 46 ------------------------ drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h | 22 ------------ drivers/gpu/drm/msm/msm_atomic.c | 31 ---------------- drivers/gpu/drm/msm/msm_drv.c | 3 -- drivers/gpu/drm/msm/msm_kms.h | 14 -------- 5 files changed, 116 deletions(-)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6ada098dba0b..6e12e275deba 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -70,42 +70,6 @@ static int mdp5_hw_init(struct msm_kms *kms) return 0; }
-struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s) -{ - struct msm_drm_private *priv = s->dev->dev_private; - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms)); - struct msm_kms_state *state = to_kms_state(s); - struct mdp5_state *new_state; - int ret; - - if (state->state) - return state->state; - - ret = drm_modeset_lock(&mdp5_kms->state_lock, s->acquire_ctx); - if (ret) - return ERR_PTR(ret); - - new_state = kmalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); - if (!new_state) - return ERR_PTR(-ENOMEM); - - /* Copy state: */ - new_state->hwpipe = mdp5_kms->state->hwpipe; - new_state->hwmixer = mdp5_kms->state->hwmixer; - if (mdp5_kms->smp) - new_state->smp = mdp5_kms->state->smp; - - state->state = new_state; - - return new_state; -} - -static void mdp5_swap_state(struct msm_kms *kms, struct drm_atomic_state *state) -{ - struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - swap(to_kms_state(state)->state, mdp5_kms->state); -} - /* Global/shared object state funcs */
/* @@ -315,7 +279,6 @@ static const struct mdp_kms_funcs kms_funcs = { .irq = mdp5_irq, .enable_vblank = mdp5_enable_vblank, .disable_vblank = mdp5_disable_vblank, - .swap_state = mdp5_swap_state, .prepare_commit = mdp5_prepare_commit, .complete_commit = mdp5_complete_commit, .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done, @@ -815,8 +778,6 @@ static void mdp5_destroy(struct platform_device *pdev)
drm_atomic_private_obj_fini(&mdp5_kms->glob_state); drm_modeset_lock_fini(&mdp5_kms->glob_state_lock); - - kfree(mdp5_kms->state); }
static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt, @@ -969,13 +930,6 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) mdp5_kms->dev = dev; mdp5_kms->pdev = pdev;
- drm_modeset_lock_init(&mdp5_kms->state_lock); - mdp5_kms->state = kzalloc(sizeof(*mdp5_kms->state), GFP_KERNEL); - if (!mdp5_kms->state) { - ret = -ENOMEM; - goto fail; - } - ret = mdp5_global_obj_init(mdp5_kms); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h index 76f0ddfca322..854dfd30e829 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h @@ -28,8 +28,6 @@ #include "mdp5_ctl.h" #include "mdp5_smp.h"
-struct mdp5_state; - struct mdp5_kms { struct mdp_kms base;
@@ -49,12 +47,6 @@ struct mdp5_kms { struct mdp5_cfg_handler *cfg; uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
- /** - * Global atomic state. Do not access directly, use mdp5_get_state() - */ - struct mdp5_state *state; - struct drm_modeset_lock state_lock; - /* * Global private object state, Do not access directly, use * mdp5_global_get_state() @@ -88,20 +80,6 @@ struct mdp5_kms { }; #define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
-/* Global atomic state for tracking resources that are shared across - * multiple kms objects (planes/crtcs/etc). - * - * For atomic updates which require modifying global state, - */ -struct mdp5_state { - struct mdp5_hw_pipe_state hwpipe; - struct mdp5_hw_mixer_state hwmixer; - struct mdp5_smp_state smp; -}; - -struct mdp5_state *__must_check -mdp5_get_state(struct drm_atomic_state *s); - /* Global private object state for tracking resources that are shared across * multiple kms objects (planes/crtcs/etc). */ diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index bf5f8c39f34d..9d0a0ca1f0cb 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -224,11 +224,7 @@ int msm_atomic_commit(struct drm_device *dev, * This is the point of no return - everything below never fails except * when the hw goes bonghits. Which means we can commit the new state on * the software side now. - * - * swap driver private state while still holding state_lock */ - if (to_kms_state(state)->state) - priv->kms->funcs->swap_state(priv->kms, state);
/* * Everything below can be run asynchronously without the need to grab @@ -262,30 +258,3 @@ int msm_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); return ret; } - -struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev) -{ - struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL); - - if (!state || drm_atomic_state_init(dev, &state->base) < 0) { - kfree(state); - return NULL; - } - - return &state->base; -} - -void msm_atomic_state_clear(struct drm_atomic_state *s) -{ - struct msm_kms_state *state = to_kms_state(s); - drm_atomic_state_default_clear(&state->base); - kfree(state->state); - state->state = NULL; -} - -void msm_atomic_state_free(struct drm_atomic_state *state) -{ - kfree(to_kms_state(state)->state); - drm_atomic_state_default_release(state); - kfree(state); -} diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 30cd514d8f7c..1c89195da4ff 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -42,9 +42,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, .atomic_commit = msm_atomic_commit, - .atomic_state_alloc = msm_atomic_state_alloc, - .atomic_state_clear = msm_atomic_state_clear, - .atomic_state_free = msm_atomic_state_free, };
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index 17d5824417ad..f0842b963df1 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -40,8 +40,6 @@ struct msm_kms_funcs { irqreturn_t (*irq)(struct msm_kms *kms); int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc); - /* swap global atomic state: */ - void (*swap_state)(struct msm_kms *kms, struct drm_atomic_state *state); /* modeset, bracketing atomic_commit(): */ void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); @@ -77,18 +75,6 @@ struct msm_kms { struct msm_gem_address_space *aspace; };
-/** - * Subclass of drm_atomic_state, to allow kms backend to have driver - * private global state. The kms backend can do whatever it wants - * with the ->state ptr. On ->atomic_state_clear() the ->state ptr - * is kfree'd and set back to NULL. - */ -struct msm_kms_state { - struct drm_atomic_state base; - void *state; -}; -#define to_kms_state(x) container_of(x, struct msm_kms_state, base) - static inline void msm_kms_init(struct msm_kms *kms, const struct msm_kms_funcs *funcs) {
Factor out the commit_tail() portions of complete_commit() into a separate function to facilitate moving to the atomic helpers in future patches.
Changes in v2: - None Changes in v3: - Rebased on Archit's private_obj set
Cc: Jeykumar Sankaran jsanka@codeaurora.org Reviewed-by: Archit Taneja architt@codeaurora.org Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/msm_atomic.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 9d0a0ca1f0cb..c18f0bee20d4 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -97,18 +97,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, } }
-/* The (potentially) asynchronous part of the commit. At this point - * nothing can fail short of armageddon. - */ -static void complete_commit(struct msm_commit *c, bool async) +static void msm_atomic_commit_tail(struct drm_atomic_state *state) { - struct drm_atomic_state *state = c->state; struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms;
- drm_atomic_helper_wait_for_fences(dev, state, false); - kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state); @@ -135,6 +129,19 @@ static void complete_commit(struct msm_commit *c, bool async) drm_atomic_helper_cleanup_planes(dev, state);
kms->funcs->complete_commit(kms, state); +} + +/* The (potentially) asynchronous part of the commit. At this point + * nothing can fail short of armageddon. + */ +static void complete_commit(struct msm_commit *c) +{ + struct drm_atomic_state *state = c->state; + struct drm_device *dev = state->dev; + + drm_atomic_helper_wait_for_fences(dev, state, false); + + msm_atomic_commit_tail(state);
drm_atomic_state_put(state);
@@ -143,7 +150,7 @@ static void complete_commit(struct msm_commit *c, bool async)
static void commit_worker(struct work_struct *work) { - complete_commit(container_of(work, struct msm_commit, work), true); + complete_commit(container_of(work, struct msm_commit, work)); }
/** @@ -248,7 +255,7 @@ int msm_atomic_commit(struct drm_device *dev, return 0; }
- complete_commit(c, false); + complete_commit(c);
return 0;
Don't leave the event != NULL once it's consumed, this is used a signal to the atomic helpers that the event will be handled by the driver.
Changes in v2: - None Changes in v3: - Rebased on Archit's private_obj set
Cc: Jeykumar Sankaran jsanka@codeaurora.org Reviewed-by: Archit Taneja architt@codeaurora.org Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c | 1 + drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 1 + 2 files changed, 2 insertions(+)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 6e5e1aa54ce1..b001699297c4 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags); mdp4_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags);
blend_setup(crtc); diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 9893e43ba6c5..76b96081916f 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags); mdp5_crtc->event = crtc->state->event; + crtc->state->event = NULL; spin_unlock_irqrestore(&dev->event_lock, flags);
/*
Ensure that any queued events are issued when disabling the crtc. This avoids timeouts when we come back and wait for dependencies (like the previous frame's flip_done).
Changes in v2: - None Changes in v3: - Rebased on Archit's private_obj set
Reviewed-by: Archit Taneja architt@codeaurora.org Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index 76b96081916f..10271359789e 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -430,6 +430,7 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); struct mdp5_kms *mdp5_kms = get_kms(crtc); struct device *dev = &mdp5_kms->pdev->dev; + unsigned long flags;
DBG("%s", crtc->name);
@@ -445,6 +446,14 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); pm_runtime_put_sync(dev);
+ if (crtc->state->event && !crtc->state->active) { + WARN_ON(mdp5_crtc->event); + spin_lock_irqsave(&mdp5_kms->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&mdp5_kms->dev->event_lock, flags); + } + mdp5_crtc->enabled = false; }
Moving further towards switching fully to the the atomic helpers, this patch removes the hand-rolled worker nonblock commit code and uses the atomic helpers commit_work model.
Changes in v2: - Remove commit_destroy() - Shuffle order of commit_tail calls to further serialize commits - Use stall in swap_state to avoid abandoned events on disable Changes in v3: - Rebased on Archit's private_obj set
Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/msm_atomic.c | 153 +++++++++---------------------- drivers/gpu/drm/msm/msm_drv.c | 1 - drivers/gpu/drm/msm/msm_drv.h | 4 - 3 files changed, 42 insertions(+), 116 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index c18f0bee20d4..a31aa417b80d 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -20,66 +20,6 @@ #include "msm_gem.h" #include "msm_fence.h"
-struct msm_commit { - struct drm_device *dev; - struct drm_atomic_state *state; - struct work_struct work; - uint32_t crtc_mask; -}; - -static void commit_worker(struct work_struct *work); - -/* block until specified crtcs are no longer pending update, and - * atomically mark them as pending update - */ -static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) -{ - int ret; - - spin_lock(&priv->pending_crtcs_event.lock); - ret = wait_event_interruptible_locked(priv->pending_crtcs_event, - !(priv->pending_crtcs & crtc_mask)); - if (ret == 0) { - DBG("start: %08x", crtc_mask); - priv->pending_crtcs |= crtc_mask; - } - spin_unlock(&priv->pending_crtcs_event.lock); - - return ret; -} - -/* clear specified crtcs (no longer pending update) - */ -static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) -{ - spin_lock(&priv->pending_crtcs_event.lock); - DBG("end: %08x", crtc_mask); - priv->pending_crtcs &= ~crtc_mask; - wake_up_all_locked(&priv->pending_crtcs_event); - spin_unlock(&priv->pending_crtcs_event.lock); -} - -static struct msm_commit *commit_init(struct drm_atomic_state *state) -{ - struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); - - if (!c) - return NULL; - - c->dev = state->dev; - c->state = state; - - INIT_WORK(&c->work, commit_worker); - - return c; -} - -static void commit_destroy(struct msm_commit *c) -{ - end_atomic(c->dev->dev_private, c->crtc_mask); - kfree(c); -} - static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) { @@ -126,31 +66,37 @@ static void msm_atomic_commit_tail(struct drm_atomic_state *state)
msm_atomic_wait_for_commit_done(dev, state);
- drm_atomic_helper_cleanup_planes(dev, state); - kms->funcs->complete_commit(kms, state); + + drm_atomic_helper_wait_for_vblanks(dev, state); + + drm_atomic_helper_commit_hw_done(state); + + drm_atomic_helper_cleanup_planes(dev, state); }
/* The (potentially) asynchronous part of the commit. At this point * nothing can fail short of armageddon. */ -static void complete_commit(struct msm_commit *c) +static void commit_tail(struct drm_atomic_state *state) { - struct drm_atomic_state *state = c->state; - struct drm_device *dev = state->dev; + drm_atomic_helper_wait_for_fences(state->dev, state, false);
- drm_atomic_helper_wait_for_fences(dev, state, false); + drm_atomic_helper_wait_for_dependencies(state);
msm_atomic_commit_tail(state);
- drm_atomic_state_put(state); + drm_atomic_helper_commit_cleanup_done(state);
- commit_destroy(c); + drm_atomic_state_put(state); }
-static void commit_worker(struct work_struct *work) +static void commit_work(struct work_struct *work) { - complete_commit(container_of(work, struct msm_commit, work)); + struct drm_atomic_state *state = container_of(work, + struct drm_atomic_state, + commit_work); + commit_tail(state); }
/** @@ -169,17 +115,12 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock) { struct msm_drm_private *priv = dev->dev_private; - struct msm_commit *c; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; int i, ret;
- ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - /* * Note that plane->atomic_async_check() should fail if we need * to re-assign hwpipe or anything that touches global atomic @@ -187,45 +128,39 @@ int msm_atomic_commit(struct drm_device *dev, * cases. */ if (state->async_update) { + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + drm_atomic_helper_async_commit(dev, state); drm_atomic_helper_cleanup_planes(dev, state); return 0; }
- c = commit_init(state); - if (!c) { - ret = -ENOMEM; - goto error; - } + ret = drm_atomic_helper_setup_commit(state, nonblock); + if (ret) + return ret;
- /* - * Figure out what crtcs we have: - */ - for_each_new_crtc_in_state(state, crtc, crtc_state, i) - c->crtc_mask |= drm_crtc_mask(crtc); + INIT_WORK(&state->commit_work, commit_work);
- /* - * Figure out what fence to wait for: - */ - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - if ((new_plane_state->fb != old_plane_state->fb) && new_plane_state->fb) { - struct drm_gem_object *obj = msm_framebuffer_bo(new_plane_state->fb, 0); - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct dma_fence *fence = reservation_object_get_excl_rcu(msm_obj->resv); - - drm_atomic_set_fence_for_plane(new_plane_state, fence); - } + ret = drm_atomic_helper_prepare_planes(dev, state); + if (ret) + return ret; + + if (!nonblock) { + ret = drm_atomic_helper_wait_for_fences(dev, state, true); + if (ret) + goto error; }
/* - * Wait for pending updates on any of the same crtc's and then - * mark our set of crtc's as busy: + * This is the point of no return - everything below never fails except + * when the hw goes bonghits. Which means we can commit the new state on + * the software side now. + * + * swap driver private state while still holding state_lock */ - ret = start_atomic(dev->dev_private, c->crtc_mask); - if (ret) - goto err_free; - - BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); + BUG_ON(drm_atomic_helper_swap_state(state, true) < 0);
/* * This is the point of no return - everything below never fails except @@ -250,17 +185,13 @@ int msm_atomic_commit(struct drm_device *dev, */
drm_atomic_state_get(state); - if (nonblock) { - queue_work(priv->atomic_wq, &c->work); - return 0; - } - - complete_commit(c); + if (nonblock) + queue_work(system_unbound_wq, &state->commit_work); + else + commit_tail(state);
return 0;
-err_free: - kfree(c); error: drm_atomic_helper_cleanup_planes(dev, state); return ret; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 1c89195da4ff..9cec74c79aa2 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -381,7 +381,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0); priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); - init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 48ed5b9a8580..f9237758ade3 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -117,10 +117,6 @@ struct msm_drm_private { struct workqueue_struct *wq; struct workqueue_struct *atomic_wq;
- /* crtcs pending async atomic updates: */ - uint32_t pending_crtcs; - wait_queue_head_t pending_crtcs_event; - unsigned int num_planes; struct drm_plane *planes[16];
Now that all of the msm-specific goo is tucked safely away we can switch over to using the atomic helper commit directly. \o/
Changes in v2: - None Changes in v3: - Rebased on Archit's private_obj set
Cc: Abhinav Kumar abhinavk@codeaurora.org Signed-off-by: Sean Paul seanpaul@chromium.org --- drivers/gpu/drm/msm/msm_atomic.c | 139 +------------------------------ drivers/gpu/drm/msm/msm_drv.c | 7 +- drivers/gpu/drm/msm/msm_drv.h | 3 +- 3 files changed, 8 insertions(+), 141 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index a31aa417b80d..2da5e2150d15 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -17,8 +17,6 @@
#include "msm_drv.h" #include "msm_kms.h" -#include "msm_gem.h" -#include "msm_fence.h"
static void msm_atomic_wait_for_commit_done(struct drm_device *dev, struct drm_atomic_state *old_state) @@ -37,7 +35,7 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, } }
-static void msm_atomic_commit_tail(struct drm_atomic_state *state) +void msm_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct msm_drm_private *priv = dev->dev_private; @@ -51,19 +49,6 @@ static void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- /* NOTE: _wait_for_vblanks() only waits for vblank on - * enabled CRTCs. So we end up faulting when disabling - * due to (potentially) unref'ing the outgoing fb's - * before the vblank when the disable has latched. - * - * But if it did wait on disabled (or newly disabled) - * CRTCs, that would be racy (ie. we could have missed - * the irq. We need some way to poll for pipe shut - * down. Or just live with occasionally hitting the - * timeout in the CRTC disable path (which really should - * not be critical path) - */ - msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state); @@ -74,125 +59,3 @@ static void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_cleanup_planes(dev, state); } - -/* The (potentially) asynchronous part of the commit. At this point - * nothing can fail short of armageddon. - */ -static void commit_tail(struct drm_atomic_state *state) -{ - drm_atomic_helper_wait_for_fences(state->dev, state, false); - - drm_atomic_helper_wait_for_dependencies(state); - - msm_atomic_commit_tail(state); - - drm_atomic_helper_commit_cleanup_done(state); - - drm_atomic_state_put(state); -} - -static void commit_work(struct work_struct *work) -{ - struct drm_atomic_state *state = container_of(work, - struct drm_atomic_state, - commit_work); - commit_tail(state); -} - -/** - * drm_atomic_helper_commit - commit validated state object - * @dev: DRM device - * @state: the driver state object - * @nonblock: nonblocking commit - * - * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. - * - * RETURNS - * Zero for success or -errno. - */ -int msm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool nonblock) -{ - struct msm_drm_private *priv = dev->dev_private; - struct drm_crtc *crtc; - struct drm_crtc_state *crtc_state; - struct drm_plane *plane; - struct drm_plane_state *old_plane_state, *new_plane_state; - int i, ret; - - /* - * Note that plane->atomic_async_check() should fail if we need - * to re-assign hwpipe or anything that touches global atomic - * state, so we'll never go down the async update path in those - * cases. - */ - if (state->async_update) { - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - drm_atomic_helper_async_commit(dev, state); - drm_atomic_helper_cleanup_planes(dev, state); - return 0; - } - - ret = drm_atomic_helper_setup_commit(state, nonblock); - if (ret) - return ret; - - INIT_WORK(&state->commit_work, commit_work); - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) - return ret; - - if (!nonblock) { - ret = drm_atomic_helper_wait_for_fences(dev, state, true); - if (ret) - goto error; - } - - /* - * This is the point of no return - everything below never fails except - * when the hw goes bonghits. Which means we can commit the new state on - * the software side now. - * - * swap driver private state while still holding state_lock - */ - BUG_ON(drm_atomic_helper_swap_state(state, true) < 0); - - /* - * This is the point of no return - everything below never fails except - * when the hw goes bonghits. Which means we can commit the new state on - * the software side now. - */ - - /* - * Everything below can be run asynchronously without the need to grab - * any modeset locks at all under one conditions: It must be guaranteed - * that the asynchronous work has either been cancelled (if the driver - * supports it, which at least requires that the framebuffers get - * cleaned up with drm_atomic_helper_cleanup_planes()) or completed - * before the new state gets committed on the software side with - * drm_atomic_helper_swap_state(). - * - * This scheme allows new atomic state updates to be prepared and - * checked in parallel to the asynchronous completion of the previous - * update. Which is important since compositors need to figure out the - * composition of the next frame right after having submitted the - * current layout. - */ - - drm_atomic_state_get(state); - if (nonblock) - queue_work(system_unbound_wq, &state->commit_work); - else - commit_tail(state); - - return 0; - -error: - drm_atomic_helper_cleanup_planes(dev, state); - return ret; -} diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9cec74c79aa2..021a0b6f9a59 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -41,7 +41,11 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .fb_create = msm_framebuffer_create, .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = drm_atomic_helper_check, - .atomic_commit = msm_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, +}; + +static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = { + .atomic_commit_tail = msm_atomic_commit_tail, };
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING @@ -438,6 +442,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) }
ddev->mode_config.funcs = &mode_config_funcs; + ddev->mode_config.helper_private = &mode_config_helper_funcs;
ret = drm_vblank_init(ddev, priv->num_crtcs); if (ret < 0) { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index f9237758ade3..1cdb9d491a8d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -156,8 +156,7 @@ struct msm_format { uint32_t pixel_format; };
-int msm_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, bool nonblock); +void msm_atomic_commit_tail(struct drm_atomic_state *state); struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev); void msm_atomic_state_clear(struct drm_atomic_state *state); void msm_atomic_state_free(struct drm_atomic_state *state);
dri-devel@lists.freedesktop.org