Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
int drm_dev_resume(struct drm_device *dev) { int ret;
if (!dev || WARN_ON(!dev->suspend_state)) return 0;
ret = drm_atomic_helper_resume(dev, dev->suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev);
return 0; }
The drm_kms_helper_poll_enable() docs states that it's not allowed to call this function if polling support has been set up, but the code allows it. drm_kms_helper_poll_disable() docs states that it's ok to call it even if polling is not enabled.
Here are the suspend/resume code from the drivers that use drm_atomic_helper_suspend/resume():
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_kms_helper_poll_disable(drm);
hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); }
return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev);
return 0; }
static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_kms_helper_poll_disable(drm); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 1); console_unlock(); malidp->pm_state = drm_atomic_helper_suspend(drm); if (IS_ERR(malidp->pm_state)) { console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm); return PTR_ERR(malidp->pm_state); }
return 0; }
static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_atomic_helper_resume(drm, malidp->pm_state); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm);
return 0; }
static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state);
dc->suspend.state = state;
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk);
return 0; }
static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
return drm_atomic_helper_resume(drm_dev, dc->suspend.state); }
static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(private->suspend_state)) { exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(private->suspend_state); }
return 0; }
static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
if (!fsl_dev) return 0;
disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock();
fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); }
clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk);
return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret;
if (!fsl_dev) return 0;
ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; }
if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq);
return 0; }
static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); priv->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(priv->suspend_state)) { DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(priv->suspend_state)); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(priv->suspend_state); }
return 0; }
static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, priv->suspend_state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
i915 is excluded since it's rather complicated.
static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) return 0;
drm_kms_helper_poll_disable(drm_dev);
imxdrm = drm_dev->dev_private; imxdrm->state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(imxdrm->state)) { drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(imxdrm->state); }
return 0; }
static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imx_drm;
if (drm_dev == NULL) return 0;
imx_drm = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_kms_helper_poll_disable(drm);
private->suspend_state = drm_atomic_helper_suspend(drm); if (IS_ERR(private->suspend_state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(private->suspend_state); }
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return 0; }
static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_atomic_helper_resume(drm, private->suspend_state); drm_kms_helper_poll_enable(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return 0; }
nouveau is also rather complicated.
static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(priv->state); }
return 0; }
static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int host1x_drm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm); tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm); if (IS_ERR(tegra->state)) { tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(tegra->state); }
return 0; }
static int host1x_drm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state); tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private;
priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev);
return 0; }
static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; int ret = 0;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
if (priv->saved_state) ret = drm_atomic_helper_resume(ddev, priv->saved_state);
return ret; }
int tinydrm_suspend(struct tinydrm_device *tdev) { struct drm_atomic_state *state;
if (tdev->suspend_state) { DRM_ERROR("Failed to suspend: state already set\n"); return -EINVAL; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); state = drm_atomic_helper_suspend(tdev->drm); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); return PTR_ERR(state); }
tdev->suspend_state = state;
return 0; }
int tinydrm_resume(struct tinydrm_device *tdev) { struct drm_atomic_state *state = tdev->suspend_state; int ret;
if (!state) { DRM_ERROR("Failed to resume: state is not set\n"); return -EINVAL; }
tdev->suspend_state = NULL;
ret = drm_atomic_helper_resume(tdev->drm, state); if (ret) { DRM_ERROR("Error resuming state: %d\n", ret); return ret; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
return 0; }
Noralf.
Hi Noralf,
On Sun, Oct 22, 2017 at 06:52:41PM +0200, Noralf Trønnes wrote:
Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
You're maybe a bit too good at spotting these for your own good :-)
But yeah, a "suspend for dummies" is one of the things which would be nice I think ... Especially since we now have the atomic suspend/resume helpers.
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
Imo fits better when we put it into drm_mode_config.
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
This is all about suspending the modeset side ... I'd give it a drm_mode_config prefix instead of the drm_dev_ (that's maybe a bit too generic), but then maybe type a general suspend/resume kernel-doc text in the drm-internals.rst (maybe pulled in from drm_dev.c) which references these 2 functions as the recommended way to suspend/resume the modeset side of a driver. These won't suspend/resume a render part (if present), so drm_dev_ seems a bit too much.
int drm_dev_resume(struct drm_device *dev) { int ret;
if (!dev || WARN_ON(!dev->suspend_state)) return 0;
ret = drm_atomic_helper_resume(dev, dev->suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev);
return 0; }
The drm_kms_helper_poll_enable() docs states that it's not allowed to call this function if polling support has been set up, but the code allows it. drm_kms_helper_poll_disable() docs states that it's ok to call it even if polling is not enabled.
Yeah need to fix that, but need to keep a note that if you want polling, then you need to follow that order or it goes wrong. Perhaps explain that this is meant to avoid special-cases in suspend/resume code?
Here are the suspend/resume code from the drivers that use drm_atomic_helper_suspend/resume():
Yeah, looks like a really good idea.
Thanks, Daniel
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_kms_helper_poll_disable(drm);
hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); }
return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev);
return 0; }
static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_kms_helper_poll_disable(drm); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 1); console_unlock(); malidp->pm_state = drm_atomic_helper_suspend(drm); if (IS_ERR(malidp->pm_state)) { console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm); return PTR_ERR(malidp->pm_state); }
return 0; }
static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_atomic_helper_resume(drm, malidp->pm_state); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm);
return 0; }
static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state);
dc->suspend.state = state;
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk);
return 0; }
static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
return drm_atomic_helper_resume(drm_dev, dc->suspend.state); }
static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(private->suspend_state)) { exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(private->suspend_state); }
return 0; }
static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
if (!fsl_dev) return 0;
disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock();
fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); }
clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk);
return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret;
if (!fsl_dev) return 0;
ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; }
if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq);
return 0; }
static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); priv->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(priv->suspend_state)) { DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(priv->suspend_state)); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(priv->suspend_state); }
return 0; }
static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, priv->suspend_state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
i915 is excluded since it's rather complicated.
static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) return 0;
drm_kms_helper_poll_disable(drm_dev);
imxdrm = drm_dev->dev_private; imxdrm->state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(imxdrm->state)) { drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(imxdrm->state); }
return 0; }
static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imx_drm;
if (drm_dev == NULL) return 0;
imx_drm = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_kms_helper_poll_disable(drm);
private->suspend_state = drm_atomic_helper_suspend(drm); if (IS_ERR(private->suspend_state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(private->suspend_state); }
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return 0; }
static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_atomic_helper_resume(drm, private->suspend_state); drm_kms_helper_poll_enable(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return 0; }
nouveau is also rather complicated.
static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(priv->state); }
return 0; }
static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int host1x_drm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm); tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm); if (IS_ERR(tegra->state)) { tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(tegra->state); }
return 0; }
static int host1x_drm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state); tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private;
priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev);
return 0; }
static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; int ret = 0;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
if (priv->saved_state) ret = drm_atomic_helper_resume(ddev, priv->saved_state);
return ret; }
int tinydrm_suspend(struct tinydrm_device *tdev) { struct drm_atomic_state *state;
if (tdev->suspend_state) { DRM_ERROR("Failed to suspend: state already set\n"); return -EINVAL; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); state = drm_atomic_helper_suspend(tdev->drm); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); return PTR_ERR(state); }
tdev->suspend_state = state;
return 0; }
int tinydrm_resume(struct tinydrm_device *tdev) { struct drm_atomic_state *state = tdev->suspend_state; int ret;
if (!state) { DRM_ERROR("Failed to resume: state is not set\n"); return -EINVAL; }
tdev->suspend_state = NULL;
ret = drm_atomic_helper_resume(tdev->drm, state); if (ret) { DRM_ERROR("Error resuming state: %d\n", ret); return ret; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
return 0; }
Noralf.
Den 30.10.2017 10.34, skrev Daniel Vetter:
Hi Noralf,
On Sun, Oct 22, 2017 at 06:52:41PM +0200, Noralf Trønnes wrote:
Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
You're maybe a bit too good at spotting these for your own good :-)
But yeah, a "suspend for dummies" is one of the things which would be nice I think ... Especially since we now have the atomic suspend/resume helpers.
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
Imo fits better when we put it into drm_mode_config.
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
This is all about suspending the modeset side ... I'd give it a drm_mode_config prefix instead of the drm_dev_ (that's maybe a bit too generic), but then maybe type a general suspend/resume kernel-doc text in the drm-internals.rst (maybe pulled in from drm_dev.c) which references these 2 functions as the recommended way to suspend/resume the modeset side of a driver. These won't suspend/resume a render part (if present), so drm_dev_ seems a bit too much.
I just realised that this is pulling helpers (atomic, crtc, fbdev) into the core which IIRC is something that you didn't want?
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 74f6ff5df656..72f8fe1e83cb 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -20,7 +20,10 @@ * OF THIS SOFTWARE. */
+#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> #include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_mode_config.h> #include <drm/drmP.h>
@@ -473,3 +476,49 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); + +/** + * drm_mode_config_suspend + * @dev: DRM device + * + */ +int drm_mode_config_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + + if (!dev) + return 0; + + drm_kms_helper_poll_disable(dev); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_kms_helper_poll_enable(dev); + return PTR_ERR(state); + } + + dev->mode_config.suspend_state = state; + + return 0; +} +EXPORT_SYMBOL(drm_mode_config_suspend); + +int drm_mode_config_resume(struct drm_device *dev) +{ + int ret; + + if (!dev || WARN_ON(!dev->mode_config.suspend_state)) + return 0; + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) + DRM_ERROR("Failed to resume (%d)\n", ret); + dev->mode_config.suspend_state = NULL; + + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_kms_helper_poll_enable(dev); + + return 0; +} +EXPORT_SYMBOL(drm_mode_config_resume); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 1b37368416c8..86ef327a996d 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -766,11 +766,21 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height;
+ /** + * @suspend_state: + * + * Atomic state when suspended. Set by drm_mode_config_suspend(), + * cleared by drm_mode_config_resume(). + */ + struct drm_atomic_state *suspend_state; + const struct drm_mode_config_helper_funcs *helper_private; };
void drm_mode_config_init(struct drm_device *dev); void drm_mode_config_reset(struct drm_device *dev); void drm_mode_config_cleanup(struct drm_device *dev); +int drm_mode_config_suspend(struct drm_device *dev); +int drm_mode_config_resume(struct drm_device *dev);
#endif
Noralf.
int drm_dev_resume(struct drm_device *dev) { int ret;
if (!dev || WARN_ON(!dev->suspend_state)) return 0;
ret = drm_atomic_helper_resume(dev, dev->suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev);
return 0; }
The drm_kms_helper_poll_enable() docs states that it's not allowed to call this function if polling support has been set up, but the code allows it. drm_kms_helper_poll_disable() docs states that it's ok to call it even if polling is not enabled.
Yeah need to fix that, but need to keep a note that if you want polling, then you need to follow that order or it goes wrong. Perhaps explain that this is meant to avoid special-cases in suspend/resume code?
Here are the suspend/resume code from the drivers that use drm_atomic_helper_suspend/resume():
Yeah, looks like a really good idea.
Thanks, Daniel
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_kms_helper_poll_disable(drm);
hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); }
return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev);
return 0; }
static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_kms_helper_poll_disable(drm); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 1); console_unlock(); malidp->pm_state = drm_atomic_helper_suspend(drm); if (IS_ERR(malidp->pm_state)) { console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm); return PTR_ERR(malidp->pm_state); }
return 0; }
static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_atomic_helper_resume(drm, malidp->pm_state); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm);
return 0; }
static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state);
dc->suspend.state = state;
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk);
return 0; }
static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
return drm_atomic_helper_resume(drm_dev, dc->suspend.state); }
static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(private->suspend_state)) { exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(private->suspend_state); }
return 0; }
static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
if (!fsl_dev) return 0;
disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock();
fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); }
clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk);
return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret;
if (!fsl_dev) return 0;
ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; }
if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq);
return 0; }
static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); priv->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(priv->suspend_state)) { DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(priv->suspend_state)); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(priv->suspend_state); }
return 0; }
static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, priv->suspend_state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
i915 is excluded since it's rather complicated.
static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) return 0;
drm_kms_helper_poll_disable(drm_dev);
imxdrm = drm_dev->dev_private; imxdrm->state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(imxdrm->state)) { drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(imxdrm->state); }
return 0; }
static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imx_drm;
if (drm_dev == NULL) return 0;
imx_drm = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_kms_helper_poll_disable(drm);
private->suspend_state = drm_atomic_helper_suspend(drm); if (IS_ERR(private->suspend_state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(private->suspend_state); }
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return 0; }
static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_atomic_helper_resume(drm, private->suspend_state); drm_kms_helper_poll_enable(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return 0; }
nouveau is also rather complicated.
static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(priv->state); }
return 0; }
static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int host1x_drm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm); tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm); if (IS_ERR(tegra->state)) { tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(tegra->state); }
return 0; }
static int host1x_drm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state); tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private;
priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev);
return 0; }
static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; int ret = 0;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
if (priv->saved_state) ret = drm_atomic_helper_resume(ddev, priv->saved_state);
return ret; }
int tinydrm_suspend(struct tinydrm_device *tdev) { struct drm_atomic_state *state;
if (tdev->suspend_state) { DRM_ERROR("Failed to suspend: state already set\n"); return -EINVAL; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); state = drm_atomic_helper_suspend(tdev->drm); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); return PTR_ERR(state); }
tdev->suspend_state = state;
return 0; }
int tinydrm_resume(struct tinydrm_device *tdev) { struct drm_atomic_state *state = tdev->suspend_state; int ret;
if (!state) { DRM_ERROR("Failed to resume: state is not set\n"); return -EINVAL; }
tdev->suspend_state = NULL;
ret = drm_atomic_helper_resume(tdev->drm, state); if (ret) { DRM_ERROR("Error resuming state: %d\n", ret); return ret; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
return 0; }
Noralf.
On Tue, Oct 31, 2017 at 05:37:23PM +0100, Noralf Trønnes wrote:
Den 30.10.2017 10.34, skrev Daniel Vetter:
Hi Noralf,
On Sun, Oct 22, 2017 at 06:52:41PM +0200, Noralf Trønnes wrote:
Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
You're maybe a bit too good at spotting these for your own good :-)
But yeah, a "suspend for dummies" is one of the things which would be nice I think ... Especially since we now have the atomic suspend/resume helpers.
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
Imo fits better when we put it into drm_mode_config.
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
This is all about suspending the modeset side ... I'd give it a drm_mode_config prefix instead of the drm_dev_ (that's maybe a bit too generic), but then maybe type a general suspend/resume kernel-doc text in the drm-internals.rst (maybe pulled in from drm_dev.c) which references these 2 functions as the recommended way to suspend/resume the modeset side of a driver. These won't suspend/resume a render part (if present), so drm_dev_ seems a bit too much.
I just realised that this is pulling helpers (atomic, crtc, fbdev) into the core which IIRC is something that you didn't want?
Ugh right. I think starting a new drm_mode_config_helper.c for top-level helper stuff would be a reasonable solution. Or some other name if you have a better one. -Daniel
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 74f6ff5df656..72f8fe1e83cb 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -20,7 +20,10 @@ * OF THIS SOFTWARE. */
+#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> #include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_mode_config.h> #include <drm/drmP.h>
@@ -473,3 +476,49 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup);
+/**
- drm_mode_config_suspend
- @dev: DRM device
- */
+int drm_mode_config_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state;
+ if (!dev) + return 0;
+ drm_kms_helper_poll_disable(dev); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) {
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_kms_helper_poll_enable(dev); + return PTR_ERR(state); + }
+ dev->mode_config.suspend_state = state;
+ return 0; +} +EXPORT_SYMBOL(drm_mode_config_suspend);
+int drm_mode_config_resume(struct drm_device *dev) +{ + int ret;
+ if (!dev || WARN_ON(!dev->mode_config.suspend_state)) + return 0;
+ ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) + DRM_ERROR("Failed to resume (%d)\n", ret); + dev->mode_config.suspend_state = NULL;
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_kms_helper_poll_enable(dev);
+ return 0; +} +EXPORT_SYMBOL(drm_mode_config_resume); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 1b37368416c8..86ef327a996d 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -766,11 +766,21 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height;
+ /** + * @suspend_state: + * + * Atomic state when suspended. Set by drm_mode_config_suspend(), + * cleared by drm_mode_config_resume(). + */ + struct drm_atomic_state *suspend_state;
const struct drm_mode_config_helper_funcs *helper_private; };
void drm_mode_config_init(struct drm_device *dev); void drm_mode_config_reset(struct drm_device *dev); void drm_mode_config_cleanup(struct drm_device *dev); +int drm_mode_config_suspend(struct drm_device *dev); +int drm_mode_config_resume(struct drm_device *dev);
#endif
Noralf.
int drm_dev_resume(struct drm_device *dev) { int ret;
if (!dev || WARN_ON(!dev->suspend_state)) return 0;
ret = drm_atomic_helper_resume(dev, dev->suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev);
return 0; }
The drm_kms_helper_poll_enable() docs states that it's not allowed to call this function if polling support has been set up, but the code allows it. drm_kms_helper_poll_disable() docs states that it's ok to call it even if polling is not enabled.
Yeah need to fix that, but need to keep a note that if you want polling, then you need to follow that order or it goes wrong. Perhaps explain that this is meant to avoid special-cases in suspend/resume code?
Here are the suspend/resume code from the drivers that use drm_atomic_helper_suspend/resume():
Yeah, looks like a really good idea.
Thanks, Daniel
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_kms_helper_poll_disable(drm);
hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); }
return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev);
return 0; }
static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_kms_helper_poll_disable(drm); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 1); console_unlock(); malidp->pm_state = drm_atomic_helper_suspend(drm); if (IS_ERR(malidp->pm_state)) { console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm); return PTR_ERR(malidp->pm_state); }
return 0; }
static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_atomic_helper_resume(drm, malidp->pm_state); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm);
return 0; }
static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state);
dc->suspend.state = state;
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk);
return 0; }
static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
return drm_atomic_helper_resume(drm_dev, dc->suspend.state); }
static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(private->suspend_state)) { exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(private->suspend_state); }
return 0; }
static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
if (!fsl_dev) return 0;
disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock();
fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); }
clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk);
return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret;
if (!fsl_dev) return 0;
ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; }
if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq);
return 0; }
static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); priv->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(priv->suspend_state)) { DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(priv->suspend_state)); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(priv->suspend_state); }
return 0; }
static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, priv->suspend_state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
i915 is excluded since it's rather complicated.
static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) return 0;
drm_kms_helper_poll_disable(drm_dev);
imxdrm = drm_dev->dev_private; imxdrm->state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(imxdrm->state)) { drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(imxdrm->state); }
return 0; }
static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imx_drm;
if (drm_dev == NULL) return 0;
imx_drm = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_kms_helper_poll_disable(drm);
private->suspend_state = drm_atomic_helper_suspend(drm); if (IS_ERR(private->suspend_state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(private->suspend_state); }
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return 0; }
static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_atomic_helper_resume(drm, private->suspend_state); drm_kms_helper_poll_enable(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return 0; }
nouveau is also rather complicated.
static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(priv->state); }
return 0; }
static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int host1x_drm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm); tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm); if (IS_ERR(tegra->state)) { tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(tegra->state); }
return 0; }
static int host1x_drm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state); tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private;
priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev);
return 0; }
static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; int ret = 0;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
if (priv->saved_state) ret = drm_atomic_helper_resume(ddev, priv->saved_state);
return ret; }
int tinydrm_suspend(struct tinydrm_device *tdev) { struct drm_atomic_state *state;
if (tdev->suspend_state) { DRM_ERROR("Failed to suspend: state already set\n"); return -EINVAL; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); state = drm_atomic_helper_suspend(tdev->drm); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); return PTR_ERR(state); }
tdev->suspend_state = state;
return 0; }
int tinydrm_resume(struct tinydrm_device *tdev) { struct drm_atomic_state *state = tdev->suspend_state; int ret;
if (!state) { DRM_ERROR("Failed to resume: state is not set\n"); return -EINVAL; }
tdev->suspend_state = NULL;
ret = drm_atomic_helper_resume(tdev->drm, state); if (ret) { DRM_ERROR("Error resuming state: %d\n", ret); return ret; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
return 0; }
Noralf.
Den 01.11.2017 09.47, skrev Daniel Vetter:
On Tue, Oct 31, 2017 at 05:37:23PM +0100, Noralf Trønnes wrote:
Den 30.10.2017 10.34, skrev Daniel Vetter:
Hi Noralf,
On Sun, Oct 22, 2017 at 06:52:41PM +0200, Noralf Trønnes wrote:
Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
You're maybe a bit too good at spotting these for your own good :-)
But yeah, a "suspend for dummies" is one of the things which would be nice I think ... Especially since we now have the atomic suspend/resume helpers.
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
Imo fits better when we put it into drm_mode_config.
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
This is all about suspending the modeset side ... I'd give it a drm_mode_config prefix instead of the drm_dev_ (that's maybe a bit too generic), but then maybe type a general suspend/resume kernel-doc text in the drm-internals.rst (maybe pulled in from drm_dev.c) which references these 2 functions as the recommended way to suspend/resume the modeset side of a driver. These won't suspend/resume a render part (if present), so drm_dev_ seems a bit too much.
I just realised that this is pulling helpers (atomic, crtc, fbdev) into the core which IIRC is something that you didn't want?
Ugh right. I think starting a new drm_mode_config_helper.c for top-level helper stuff would be a reasonable solution. Or some other name if you have a better one.
Does it fit in drm_modeset_helper.c ?
/** * DOC: aux kms helpers * * This helper library contains various one-off functions which don't really fit * anywhere else in the DRM modeset helper library. */
Noralf.
-Daniel
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 74f6ff5df656..72f8fe1e83cb 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -20,7 +20,10 @@ * OF THIS SOFTWARE. */
+#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc_helper.h> #include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_mode_config.h> #include <drm/drmP.h>
@@ -473,3 +476,49 @@ void drm_mode_config_cleanup(struct drm_device *dev) drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup);
+/**
- drm_mode_config_suspend
- @dev: DRM device
- */
+int drm_mode_config_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state;
+ if (!dev) + return 0;
+ drm_kms_helper_poll_disable(dev); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) {
- drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
+ drm_kms_helper_poll_enable(dev); + return PTR_ERR(state); + }
+ dev->mode_config.suspend_state = state;
+ return 0; +} +EXPORT_SYMBOL(drm_mode_config_suspend);
+int drm_mode_config_resume(struct drm_device *dev) +{ + int ret;
+ if (!dev || WARN_ON(!dev->mode_config.suspend_state)) + return 0;
+ ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) + DRM_ERROR("Failed to resume (%d)\n", ret); + dev->mode_config.suspend_state = NULL;
+ drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + drm_kms_helper_poll_enable(dev);
+ return 0; +} +EXPORT_SYMBOL(drm_mode_config_resume); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 1b37368416c8..86ef327a996d 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -766,11 +766,21 @@ struct drm_mode_config { /* cursor size */ uint32_t cursor_width, cursor_height;
+ /** + * @suspend_state: + * + * Atomic state when suspended. Set by drm_mode_config_suspend(), + * cleared by drm_mode_config_resume(). + */ + struct drm_atomic_state *suspend_state;
const struct drm_mode_config_helper_funcs *helper_private; };
void drm_mode_config_init(struct drm_device *dev); void drm_mode_config_reset(struct drm_device *dev); void drm_mode_config_cleanup(struct drm_device *dev); +int drm_mode_config_suspend(struct drm_device *dev); +int drm_mode_config_resume(struct drm_device *dev);
#endif
Noralf.
int drm_dev_resume(struct drm_device *dev) { int ret;
if (!dev || WARN_ON(!dev->suspend_state)) return 0;
ret = drm_atomic_helper_resume(dev, dev->suspend_state); if (ret) DRM_ERROR("Failed to resume (%d)\n", ret); dev->suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev);
return 0; }
The drm_kms_helper_poll_enable() docs states that it's not allowed to call this function if polling support has been set up, but the code allows it. drm_kms_helper_poll_disable() docs states that it's ok to call it even if polling is not enabled.
Yeah need to fix that, but need to keep a note that if you want polling, then you need to follow that order or it goes wrong. Perhaps explain that this is meant to avoid special-cases in suspend/resume code?
Here are the suspend/resume code from the drivers that use drm_atomic_helper_suspend/resume():
Yeah, looks like a really good idea.
Thanks, Daniel
static int __maybe_unused hdlcd_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_kms_helper_poll_disable(drm);
hdlcd->state = drm_atomic_helper_suspend(drm); if (IS_ERR(hdlcd->state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(hdlcd->state); }
return 0; }
static int __maybe_unused hdlcd_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
if (!hdlcd) return 0;
drm_atomic_helper_resume(drm, hdlcd->state); drm_kms_helper_poll_enable(drm); pm_runtime_set_active(dev);
return 0; }
static int __maybe_unused malidp_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_kms_helper_poll_disable(drm); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 1); console_unlock(); malidp->pm_state = drm_atomic_helper_suspend(drm); if (IS_ERR(malidp->pm_state)) { console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm); return PTR_ERR(malidp->pm_state); }
return 0; }
static int __maybe_unused malidp_pm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct malidp_drm *malidp = drm->dev_private;
drm_atomic_helper_resume(drm, malidp->pm_state); console_lock(); drm_fbdev_cma_set_suspend(malidp->fbdev, 0); console_unlock(); drm_kms_helper_poll_enable(drm);
return 0; }
static int atmel_hlcdc_dc_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private; struct regmap *regmap = dc->hlcdc->regmap; struct drm_atomic_state *state;
state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(state)) return PTR_ERR(state);
dc->suspend.state = state;
regmap_read(regmap, ATMEL_HLCDC_IMR, &dc->suspend.imr); regmap_write(regmap, ATMEL_HLCDC_IDR, dc->suspend.imr); clk_disable_unprepare(dc->hlcdc->periph_clk);
return 0; }
static int atmel_hlcdc_dc_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct atmel_hlcdc_dc *dc = drm_dev->dev_private;
clk_prepare_enable(dc->hlcdc->periph_clk); regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, dc->suspend.imr);
return drm_atomic_helper_resume(drm_dev, dc->suspend.state); }
static int exynos_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_kms_helper_poll_disable(drm_dev); exynos_drm_fbdev_suspend(drm_dev); private->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(private->suspend_state)) { exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(private->suspend_state); }
return 0; }
static int exynos_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev) return 0;
drm_atomic_helper_resume(drm_dev, private->suspend_state); exynos_drm_fbdev_resume(drm_dev); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int fsl_dcu_drm_pm_suspend(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev);
if (!fsl_dev) return 0;
disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); console_unlock();
fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); if (IS_ERR(fsl_dev->state)) { console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq); return PTR_ERR(fsl_dev->state); }
clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk);
return 0; }
static int fsl_dcu_drm_pm_resume(struct device *dev) { struct fsl_dcu_drm_device *fsl_dev = dev_get_drvdata(dev); int ret;
if (!fsl_dev) return 0;
ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); return ret; }
if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock(); drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm); enable_irq(fsl_dev->irq);
return 0; }
static int __maybe_unused hibmc_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_kms_helper_poll_disable(drm_dev); priv->suspend_state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(priv->suspend_state)) { DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(priv->suspend_state)); drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(priv->suspend_state); }
return 0; }
static int __maybe_unused hibmc_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); struct hibmc_drm_private *priv = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, priv->suspend_state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
i915 is excluded since it's rather complicated.
static int imx_drm_suspend(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imxdrm;
/* The drm_dev is NULL before .load hook is called */ if (drm_dev == NULL) return 0;
drm_kms_helper_poll_disable(drm_dev);
imxdrm = drm_dev->dev_private; imxdrm->state = drm_atomic_helper_suspend(drm_dev); if (IS_ERR(imxdrm->state)) { drm_kms_helper_poll_enable(drm_dev); return PTR_ERR(imxdrm->state); }
return 0; }
static int imx_drm_resume(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); struct imx_drm_device *imx_drm;
if (drm_dev == NULL) return 0;
imx_drm = drm_dev->dev_private; drm_atomic_helper_resume(drm_dev, imx_drm->state); drm_kms_helper_poll_enable(drm_dev);
return 0; }
static int mtk_drm_sys_suspend(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_kms_helper_poll_disable(drm);
private->suspend_state = drm_atomic_helper_suspend(drm); if (IS_ERR(private->suspend_state)) { drm_kms_helper_poll_enable(drm); return PTR_ERR(private->suspend_state); }
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n"); return 0; }
static int mtk_drm_sys_resume(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); struct drm_device *drm = private->drm;
drm_atomic_helper_resume(drm, private->suspend_state); drm_kms_helper_poll_enable(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n"); return 0; }
nouveau is also rather complicated.
static int rockchip_drm_sys_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
drm_kms_helper_poll_disable(drm); rockchip_drm_fb_suspend(drm);
priv = drm->dev_private; priv->state = drm_atomic_helper_suspend(drm); if (IS_ERR(priv->state)) { rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(priv->state); }
return 0; }
static int rockchip_drm_sys_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct rockchip_drm_private *priv;
if (!drm) return 0;
priv = drm->dev_private; drm_atomic_helper_resume(drm, priv->state); rockchip_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int host1x_drm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm); tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm); if (IS_ERR(tegra->state)) { tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm); return PTR_ERR(tegra->state); }
return 0; }
static int host1x_drm_resume(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state); tegra_drm_fb_resume(drm); drm_kms_helper_poll_enable(drm);
return 0; }
static int tilcdc_pm_suspend(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private;
priv->saved_state = drm_atomic_helper_suspend(ddev);
/* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev);
return 0; }
static int tilcdc_pm_resume(struct device *dev) { struct drm_device *ddev = dev_get_drvdata(dev); struct tilcdc_drm_private *priv = ddev->dev_private; int ret = 0;
/* Select default pin state */ pinctrl_pm_select_default_state(dev);
if (priv->saved_state) ret = drm_atomic_helper_resume(ddev, priv->saved_state);
return ret; }
int tinydrm_suspend(struct tinydrm_device *tdev) { struct drm_atomic_state *state;
if (tdev->suspend_state) { DRM_ERROR("Failed to suspend: state already set\n"); return -EINVAL; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 1); state = drm_atomic_helper_suspend(tdev->drm); if (IS_ERR(state)) { drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0); return PTR_ERR(state); }
tdev->suspend_state = state;
return 0; }
int tinydrm_resume(struct tinydrm_device *tdev) { struct drm_atomic_state *state = tdev->suspend_state; int ret;
if (!state) { DRM_ERROR("Failed to resume: state is not set\n"); return -EINVAL; }
tdev->suspend_state = NULL;
ret = drm_atomic_helper_resume(tdev->drm, state); if (ret) { DRM_ERROR("Error resuming state: %d\n", ret); return ret; }
drm_fbdev_cma_set_suspend_unlocked(tdev->fbdev_cma, 0);
return 0; }
Noralf.
On Wed, Nov 01, 2017 at 01:59:00PM +0100, Noralf Trønnes wrote:
Den 01.11.2017 09.47, skrev Daniel Vetter:
On Tue, Oct 31, 2017 at 05:37:23PM +0100, Noralf Trønnes wrote:
Den 30.10.2017 10.34, skrev Daniel Vetter:
Hi Noralf,
On Sun, Oct 22, 2017 at 06:52:41PM +0200, Noralf Trønnes wrote:
Hi,
I've spent some time in the fbdev emulation code and discovered a recurring pattern around suspend/resume. Should we add some more helpers :-)
You're maybe a bit too good at spotting these for your own good :-)
But yeah, a "suspend for dummies" is one of the things which would be nice I think ... Especially since we now have the atomic suspend/resume helpers.
struct drm_device { /** * @suspend_state: * * Atomic state when suspended. * Set by drm_dev_suspend(), cleared by drm_dev_resume(). */ struct drm_atomic_state *suspend_state; };
Imo fits better when we put it into drm_mode_config.
int drm_dev_suspend(struct drm_device *dev) { struct drm_atomic_state *state;
if (!dev) return 0;
drm_kms_helper_poll_disable(dev); drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); state = drm_atomic_helper_suspend(dev); if (IS_ERR(state)) { drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); drm_kms_helper_poll_enable(dev); return PTR_ERR(state); }
dev->suspend_state = state;
return 0; }
This is all about suspending the modeset side ... I'd give it a drm_mode_config prefix instead of the drm_dev_ (that's maybe a bit too generic), but then maybe type a general suspend/resume kernel-doc text in the drm-internals.rst (maybe pulled in from drm_dev.c) which references these 2 functions as the recommended way to suspend/resume the modeset side of a driver. These won't suspend/resume a render part (if present), so drm_dev_ seems a bit too much.
I just realised that this is pulling helpers (atomic, crtc, fbdev) into the core which IIRC is something that you didn't want?
Ugh right. I think starting a new drm_mode_config_helper.c for top-level helper stuff would be a reasonable solution. Or some other name if you have a better one.
Does it fit in drm_modeset_helper.c ?
/** * DOC: aux kms helpers * * This helper library contains various one-off functions which don't really fit * anywhere else in the DRM modeset helper library. */
Even better I think, avoids another file no one remembers exists :-) -Daniel
dri-devel@lists.freedesktop.org