These refinements include using standard mailbox callback interface, timeout detection, and a fixed cmdq_handle.
Change in v4: 1. Add cmdq_vblank_cnt initial value to 3. 2. Move mtk_drm_cmdq_pkt_create to the same define scope with mtk_drm_cmdq_pkt_destroy.
Change in v3: 1. Revert "drm/mediatek: clear pending flag when cmdq packet is done" and add it after the CMDQ refinement pathes. 2. Change the remove of struct cmdq_client to remove the pointer of struct cmdq_client. 3. Fix pkt buf alloc once but free many times.
Changes in v2: 1. Define mtk_drm_cmdq_pkt_create() and mtk_drm_cmdq_pkt_destroy() when CONFIG_MTK_CMDQ is reachable.
Chun-Kuang Hu (4): drm/mediatek: Use mailbox rx_callback instead of cmdq_task_cb drm/mediatek: Remove the pointer of struct cmdq_client drm/mediatek: Detect CMDQ execution timeout drm/mediatek: Add cmdq_handle in mtk_crtc
Yongqiang Niu (1): drm/mediatek: clear pending flag when cmdq packet is done.
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 174 ++++++++++++++++++++---- 1 file changed, 150 insertions(+), 24 deletions(-)
From: Chun-Kuang Hu chunkuang.hu@kernel.org
rx_callback is a standard mailbox callback mechanism and could cover the function of proprietary cmdq_task_cb, so use the standard one instead of the proprietary one.
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a4e80e499674..369d3e68c0b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -4,6 +4,8 @@ */
#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/mailbox_controller.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-mmsys.h> @@ -222,9 +224,11 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) -static void ddp_cmdq_cb(struct cmdq_cb_data data) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { - cmdq_pkt_destroy(data.data); + struct cmdq_cb_data *data = mssg; + + cmdq_pkt_destroy(data->pkt); } #endif
@@ -475,7 +479,12 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev, + cmdq_handle->pa_base, + cmdq_handle->cmd_buf_size, + DMA_TO_DEVICE); + mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0); } #endif mtk_crtc->config_updating = false; @@ -839,6 +848,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, }
if (mtk_crtc->cmdq_client) { + mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb; ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base),
Hi, Jason:
jason-jh.lin jason-jh.lin@mediatek.com 於 2021年10月26日 週二 下午1:29寫道:
From: Chun-Kuang Hu chunkuang.hu@kernel.org
rx_callback is a standard mailbox callback mechanism and could cover the function of proprietary cmdq_task_cb, so use the standard one instead of the proprietary one.
Reviewed-by: Chun-Kuang Hu chunkuang.hu@kernel.org
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a4e80e499674..369d3e68c0b6 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -4,6 +4,8 @@ */
#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/mailbox_controller.h> #include <linux/pm_runtime.h> #include <linux/soc/mediatek/mtk-cmdq.h> #include <linux/soc/mediatek/mtk-mmsys.h> @@ -222,9 +224,11 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, }
#if IS_REACHABLE(CONFIG_MTK_CMDQ) -static void ddp_cmdq_cb(struct cmdq_cb_data data) +static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) {
cmdq_pkt_destroy(data.data);
struct cmdq_cb_data *data = mssg;
cmdq_pkt_destroy(data->pkt);
} #endif
@@ -475,7 +479,12 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle);
cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
DMA_TO_DEVICE);
mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0); }
#endif mtk_crtc->config_updating = false; @@ -839,6 +848,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, }
if (mtk_crtc->cmdq_client) {
mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb; ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base),
-- 2.18.0
From: Chun-Kuang Hu chunkuang.hu@kernel.org
In mailbox rx_callback, it pass struct mbox_client to callback function, but it could not map back to mtk_drm_crtc instance because struct cmdq_client use a pointer to struct mbox_client:
struct cmdq_client { struct mbox_client client; struct mbox_chan *chan; };
struct mtk_drm_crtc { /* client instance data */ struct cmdq_client *cmdq_client; };
so remove the pointer of struct cmdq_client and let mtk_drm_crtc instance define cmdq_client as:
struct mtk_drm_crtc { /* client instance data */ struct cmdq_client cmdq_client; };
and in rx_callback function, use struct mbox_client to get struct mtk_drm_crtc.
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 37 +++++++++++++------------ 1 file changed, 20 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 369d3e68c0b6..e23e3224ac67 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -52,7 +52,7 @@ struct mtk_drm_crtc { bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_client *cmdq_client; + struct cmdq_client cmdq_client; u32 cmdq_event; #endif
@@ -472,19 +472,19 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (mtk_crtc->cmdq_client) { - mbox_flush(mtk_crtc->cmdq_client->chan, 2000); - cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + if (mtk_crtc->cmdq_client.chan) { + mbox_flush(mtk_crtc->cmdq_client.chan, 2000); + cmdq_handle = cmdq_pkt_create(&mtk_crtc->cmdq_client, PAGE_SIZE); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle); - dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev, + dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, cmdq_handle->pa_base, cmdq_handle->cmd_buf_size, DMA_TO_DEVICE); - mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle); - mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0); + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); + mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } #endif mtk_crtc->config_updating = false; @@ -498,7 +498,7 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ) - if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) #else if (!priv->data->shadow_register) #endif @@ -838,17 +838,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ) - mtk_crtc->cmdq_client = - cmdq_mbox_create(mtk_crtc->mmsys_dev, - drm_crtc_index(&mtk_crtc->base)); - if (IS_ERR(mtk_crtc->cmdq_client)) { + mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev; + mtk_crtc->cmdq_client.client.tx_block = false; + mtk_crtc->cmdq_client.client.knows_txdone = true; + mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb; + mtk_crtc->cmdq_client.chan = + mbox_request_channel(&mtk_crtc->cmdq_client.client, + drm_crtc_index(&mtk_crtc->base)); + if (IS_ERR(mtk_crtc->cmdq_client.chan)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); - mtk_crtc->cmdq_client = NULL; + mtk_crtc->cmdq_client.chan = NULL; }
- if (mtk_crtc->cmdq_client) { - mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb; + if (mtk_crtc->cmdq_client.chan) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base), @@ -856,8 +859,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base)); - cmdq_mbox_destroy(mtk_crtc->cmdq_client); - mtk_crtc->cmdq_client = NULL; + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; } } #endif
Hi, Jason:
jason-jh.lin jason-jh.lin@mediatek.com 於 2021年10月26日 週二 下午1:29寫道:
From: Chun-Kuang Hu chunkuang.hu@kernel.org
In mailbox rx_callback, it pass struct mbox_client to callback function, but it could not map back to mtk_drm_crtc instance because struct cmdq_client use a pointer to struct mbox_client:
struct cmdq_client { struct mbox_client client; struct mbox_chan *chan; };
struct mtk_drm_crtc { /* client instance data */ struct cmdq_client *cmdq_client; };
so remove the pointer of struct cmdq_client and let mtk_drm_crtc instance define cmdq_client as:
struct mtk_drm_crtc { /* client instance data */ struct cmdq_client cmdq_client; };
and in rx_callback function, use struct mbox_client to get struct mtk_drm_crtc.
Reviewed-by: Chun-Kuang Hu chunkuang.hu@kernel.org
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 37 +++++++++++++------------ 1 file changed, 20 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 369d3e68c0b6..e23e3224ac67 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -52,7 +52,7 @@ struct mtk_drm_crtc { bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_client *cmdq_client;
struct cmdq_client cmdq_client; u32 cmdq_event;
#endif
@@ -472,19 +472,19 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, mtk_mutex_release(mtk_crtc->mutex); } #if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
if (mtk_crtc->cmdq_client.chan) {
mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
cmdq_handle = cmdq_pkt_create(&mtk_crtc->cmdq_client, PAGE_SIZE); cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); cmdq_pkt_finalize(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev,
dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev, cmdq_handle->pa_base, cmdq_handle->cmd_buf_size, DMA_TO_DEVICE);
mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0);
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); }
#endif mtk_crtc->config_updating = false; @@ -498,7 +498,7 @@ static void mtk_crtc_ddp_irq(void *data) struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
#else if (!priv->data->shadow_register) #endif @@ -838,17 +838,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
cmdq_mbox_create(mtk_crtc->mmsys_dev,
drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_client)) {
mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
mtk_crtc->cmdq_client.client.tx_block = false;
mtk_crtc->cmdq_client.client.knows_txdone = true;
mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
mtk_crtc->cmdq_client.chan =
mbox_request_channel(&mtk_crtc->cmdq_client.client,
drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_client.chan)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
mtk_crtc->cmdq_client.chan = NULL; }
if (mtk_crtc->cmdq_client) {
mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb;
if (mtk_crtc->cmdq_client.chan) { ret = of_property_read_u32_index(priv->mutex_node, "mediatek,gce-events", drm_crtc_index(&mtk_crtc->base),
@@ -856,8 +859,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, if (ret) { dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", drm_crtc_index(&mtk_crtc->base));
cmdq_mbox_destroy(mtk_crtc->cmdq_client);
mtk_crtc->cmdq_client = NULL;
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL; } }
#endif
2.18.0
From: Chun-Kuang Hu chunkuang.hu@kernel.org
CMDQ is used to update display register in vblank period, so it should be execute in next 2 vblank. One vblank interrupt before send message (occasionally) and one vblank interrupt after cmdq done. If it fail to execute in next 3 vblank, tiemout happen.
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index e23e3224ac67..dad1f85ee315 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -54,6 +54,7 @@ struct mtk_drm_crtc { #if IS_REACHABLE(CONFIG_MTK_CMDQ) struct cmdq_client cmdq_client; u32 cmdq_event; + u32 cmdq_vblank_cnt; #endif
struct device *mmsys_dev; @@ -227,7 +228,10 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { struct cmdq_cb_data *data = mssg; + struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); + struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+ mtk_crtc->cmdq_vblank_cnt = 0; cmdq_pkt_destroy(data->pkt); } #endif @@ -483,6 +487,15 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, cmdq_handle->pa_base, cmdq_handle->cmd_buf_size, DMA_TO_DEVICE); + /* + * CMDQ command should execute in next 3 vblank. + * One vblank interrupt before send message (occasionally) + * and one vblank interrupt after cmdq done, + * so it's timeout after 3 vblank interrupt. + * If it fail to execute in next 3 vblank, timeout happen. + */ + mtk_crtc->cmdq_vblank_cnt = 3; + mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); } @@ -499,11 +512,14 @@ static void mtk_crtc_ddp_irq(void *data)
#if IS_REACHABLE(CONFIG_MTK_CMDQ) if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan) + mtk_crtc_ddp_config(crtc, NULL); + else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0) + DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n", + drm_crtc_index(&mtk_crtc->base)); #else if (!priv->data->shadow_register) -#endif mtk_crtc_ddp_config(crtc, NULL); - +#endif mtk_drm_finish_page_flip(mtk_crtc); }
Hi, Jason:
jason-jh.lin jason-jh.lin@mediatek.com 於 2021年10月26日 週二 下午1:29寫道:
From: Chun-Kuang Hu chunkuang.hu@kernel.org
CMDQ is used to update display register in vblank period, so it should be execute in next 2 vblank. One vblank interrupt before send message (occasionally) and one vblank interrupt after cmdq done. If it fail to execute in next 3 vblank, tiemout happen.
Reviewed-by: Chun-Kuang Hu chunkuang.hu@kernel.org
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index e23e3224ac67..dad1f85ee315 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -54,6 +54,7 @@ struct mtk_drm_crtc { #if IS_REACHABLE(CONFIG_MTK_CMDQ) struct cmdq_client cmdq_client; u32 cmdq_event;
u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -227,7 +228,10 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { struct cmdq_cb_data *data = mssg;
struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
mtk_crtc->cmdq_vblank_cnt = 0; cmdq_pkt_destroy(data->pkt);
} #endif @@ -483,6 +487,15 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, cmdq_handle->pa_base, cmdq_handle->cmd_buf_size, DMA_TO_DEVICE);
/*
* CMDQ command should execute in next 3 vblank.
* One vblank interrupt before send message (occasionally)
* and one vblank interrupt after cmdq done,
* so it's timeout after 3 vblank interrupt.
* If it fail to execute in next 3 vblank, timeout happen.
*/
mtk_crtc->cmdq_vblank_cnt = 3;
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle); mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0); }
@@ -499,11 +512,14 @@ static void mtk_crtc_ddp_irq(void *data)
#if IS_REACHABLE(CONFIG_MTK_CMDQ) if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
mtk_crtc_ddp_config(crtc, NULL);
else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
drm_crtc_index(&mtk_crtc->base));
#else if (!priv->data->shadow_register) -#endif mtk_crtc_ddp_config(crtc, NULL);
+#endif mtk_drm_finish_page_flip(mtk_crtc); }
-- 2.18.0
From: Chun-Kuang Hu chunkuang.hu@kernel.org
One mtk_crtc need just one cmdq_handle, so add one cmdq_handle in mtk_crtc to prevent frequently allocation and free of cmdq_handle.
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 64 +++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index dad1f85ee315..31f05efc1bc0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -53,6 +53,7 @@ struct mtk_drm_crtc {
#if IS_REACHABLE(CONFIG_MTK_CMDQ) struct cmdq_client cmdq_client; + struct cmdq_pkt cmdq_handle; u32 cmdq_event; u32 cmdq_vblank_cnt; #endif @@ -107,12 +108,59 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) } }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, + size_t size) +{ + struct device *dev; + dma_addr_t dma_addr; + + pkt->va_base = kzalloc(size, GFP_KERNEL); + if (!pkt->va_base) { + kfree(pkt); + return -ENOMEM; + } + pkt->buf_size = size; + pkt->cl = (void *)client; + + dev = client->chan->mbox->dev; + dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); + kfree(pkt->va_base); + kfree(pkt); + return -ENOMEM; + } + + pkt->pa_base = dma_addr; + + return 0; +} + +static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{ + struct cmdq_client *client = (struct cmdq_client *)pkt->cl; + + dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, + DMA_TO_DEVICE); + kfree(pkt->va_base); + kfree(pkt); +} +#endif + static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
mtk_mutex_put(mtk_crtc->mutex); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ if (mtk_crtc->cmdq_client.chan) + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; +#endif drm_crtc_cleanup(crtc); }
@@ -227,12 +275,10 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { - struct cmdq_cb_data *data = mssg; struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
mtk_crtc->cmdq_vblank_cnt = 0; - cmdq_pkt_destroy(data->pkt); } #endif
@@ -438,7 +484,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, bool needs_vblank) { #if IS_REACHABLE(CONFIG_MTK_CMDQ) - struct cmdq_pkt *cmdq_handle; + struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle; #endif struct drm_crtc *crtc = &mtk_crtc->base; struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -478,7 +524,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) if (mtk_crtc->cmdq_client.chan) { mbox_flush(mtk_crtc->cmdq_client.chan, 2000); - cmdq_handle = cmdq_pkt_create(&mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_handle->cmd_buf_size = 0; cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle); @@ -877,6 +923,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, drm_crtc_index(&mtk_crtc->base)); mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL; + } else { + ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client, + &mtk_crtc->cmdq_handle, + PAGE_SIZE); + if (ret) { + dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n", + drm_crtc_index(&mtk_crtc->base)); + mbox_free_channel(mtk_crtc->cmdq_client.chan); + mtk_crtc->cmdq_client.chan = NULL; + } } } #endif
Hi, Jason:
jason-jh.lin jason-jh.lin@mediatek.com 於 2021年10月26日 週二 下午1:29寫道:
From: Chun-Kuang Hu chunkuang.hu@kernel.org
One mtk_crtc need just one cmdq_handle, so add one cmdq_handle in mtk_crtc to prevent frequently allocation and free of cmdq_handle.
Signed-off-by: Chun-Kuang Hu chunkuang.hu@kernel.org Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 64 +++++++++++++++++++++++-- 1 file changed, 60 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index dad1f85ee315..31f05efc1bc0 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -53,6 +53,7 @@ struct mtk_drm_crtc {
#if IS_REACHABLE(CONFIG_MTK_CMDQ) struct cmdq_client cmdq_client;
struct cmdq_pkt cmdq_handle; u32 cmdq_event; u32 cmdq_vblank_cnt;
#endif @@ -107,12 +108,59 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc) } }
+#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
size_t size)
+{
struct device *dev;
dma_addr_t dma_addr;
pkt->va_base = kzalloc(size, GFP_KERNEL);
if (!pkt->va_base) {
kfree(pkt);
return -ENOMEM;
}
pkt->buf_size = size;
pkt->cl = (void *)client;
dev = client->chan->mbox->dev;
dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr)) {
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
kfree(pkt);
return -ENOMEM;
}
pkt->pa_base = dma_addr;
return 0;
+}
+static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt) +{
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
DMA_TO_DEVICE);
kfree(pkt->va_base);
kfree(pkt);
+} +#endif
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
mtk_mutex_put(mtk_crtc->mutex);
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan)
This is not related to this patch, so move to an independent patch.
Regards, Chun-Kuang.
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
+#endif drm_crtc_cleanup(crtc); }
@@ -227,12 +275,10 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) {
struct cmdq_cb_data *data = mssg; struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); mtk_crtc->cmdq_vblank_cnt = 0;
cmdq_pkt_destroy(data->pkt);
} #endif
@@ -438,7 +484,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, bool needs_vblank) { #if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct cmdq_pkt *cmdq_handle;
struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif struct drm_crtc *crtc = &mtk_crtc->base; struct mtk_drm_private *priv = crtc->dev->dev_private; @@ -478,7 +524,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) if (mtk_crtc->cmdq_client.chan) { mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
cmdq_handle = cmdq_pkt_create(&mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_handle->cmd_buf_size = 0; cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false); mtk_crtc_ddp_config(crtc, cmdq_handle);
@@ -877,6 +923,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, drm_crtc_index(&mtk_crtc->base)); mbox_free_channel(mtk_crtc->cmdq_client.chan); mtk_crtc->cmdq_client.chan = NULL;
} else {
ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
&mtk_crtc->cmdq_handle,
PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
} } }
#endif
2.18.0
From: Yongqiang Niu yongqiang.niu@mediatek.com
In cmdq mode, packet may be flushed before it is executed, so the pending flag should be cleared after cmdq packet is done.
Signed-off-by: Yongqiang Niu yongqiang.niu@mediatek.com Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com --- drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 51 ++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 31f05efc1bc0..ea285795776f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -275,8 +275,42 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) { + struct cmdq_cb_data *data = mssg; struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client); + struct mtk_crtc_state *state; + unsigned int i; + + if (data->sta < 0) + return; + + state = to_mtk_crtc_state(mtk_crtc->base.state); + + state->pending_config = false; + + if (mtk_crtc->pending_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.config = false; + } + mtk_crtc->pending_planes = false; + } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + }
mtk_crtc->cmdq_vblank_cnt = 0; } @@ -432,7 +466,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, state->pending_vrefresh, 0, cmdq_handle);
- state->pending_config = false; + if (!cmdq_handle) + state->pending_config = false; }
if (mtk_crtc->pending_planes) { @@ -452,9 +487,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.config = false; + if (!cmdq_handle) + plane_state->pending.config = false; } - mtk_crtc->pending_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_planes = false; }
if (mtk_crtc->pending_async_planes) { @@ -474,9 +512,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle); - plane_state->pending.async_config = false; + if (!cmdq_handle) + plane_state->pending.async_config = false; } - mtk_crtc->pending_async_planes = false; + + if (!cmdq_handle) + mtk_crtc->pending_async_planes = false; } }
Hi, Jason:
jason-jh.lin jason-jh.lin@mediatek.com 於 2021年10月26日 週二 下午1:29寫道:
From: Yongqiang Niu yongqiang.niu@mediatek.com
In cmdq mode, packet may be flushed before it is executed, so the pending flag should be cleared after cmdq packet is done.
Reviewed-by: Chun-Kuang Hu chunkuang.hu@kernel.org
Signed-off-by: Yongqiang Niu yongqiang.niu@mediatek.com Signed-off-by: jason-jh.lin jason-jh.lin@mediatek.com
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 51 ++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 31f05efc1bc0..ea285795776f 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -275,8 +275,42 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, #if IS_REACHABLE(CONFIG_MTK_CMDQ) static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg) {
struct cmdq_cb_data *data = mssg; struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client); struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
if (data->sta < 0)
return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
state->pending_config = false;
if (mtk_crtc->pending_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.config = false;
}
mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
plane_state->pending.async_config = false;
}
mtk_crtc->pending_async_planes = false;
} mtk_crtc->cmdq_vblank_cnt = 0;
} @@ -432,7 +466,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, state->pending_vrefresh, 0, cmdq_handle);
state->pending_config = false;
if (!cmdq_handle)
state->pending_config = false; } if (mtk_crtc->pending_planes) {
@@ -452,9 +487,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle);
plane_state->pending.config = false;
if (!cmdq_handle)
plane_state->pending.config = false; }
mtk_crtc->pending_planes = false;
if (!cmdq_handle)
mtk_crtc->pending_planes = false; } if (mtk_crtc->pending_async_planes) {
@@ -474,9 +512,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc, mtk_ddp_comp_layer_config(comp, local_layer, plane_state, cmdq_handle);
plane_state->pending.async_config = false;
if (!cmdq_handle)
plane_state->pending.async_config = false; }
mtk_crtc->pending_async_planes = false;
if (!cmdq_handle)
mtk_crtc->pending_async_planes = false; }
}
-- 2.18.0
dri-devel@lists.freedesktop.org