On 11/9/2021 11:41 PM, Rob Clark wrote:
From: Rob Clark robdclark@chromium.org
cur_ctx_seqno already does the same thing, but handles the edge cases where a refcnt'd context can live after lastclose. So let's not have two ways to do the same thing.
Signed-off-by: Rob Clark robdclark@chromium.org
drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 3 +-- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 3 +-- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 3 +-- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 8 +++----- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 9 +++------ drivers/gpu/drm/msm/adreno/a6xx_gpu.h | 10 ---------- drivers/gpu/drm/msm/msm_drv.c | 6 ------ drivers/gpu/drm/msm/msm_drv.h | 2 +- drivers/gpu/drm/msm/msm_gpu.c | 2 +- drivers/gpu/drm/msm/msm_gpu.h | 11 +++++++++++ 10 files changed, 22 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index bdc989183c64..22e8295a5e2b 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) {
- struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i;
@@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 8fb847c174ff..2e481e2692ba 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) {
- struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i;
@@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index a96ee79cc5e0..c5524d6e8705 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) {
- struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i;
@@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: /* ignore if there has not been a ctx switch: */
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 5e2750eb3810..6163990a4d09 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) {
- struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; struct msm_gem_object *obj; uint32_t *ptr, dwords;
@@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
@@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
priv->lastctx = NULL;
a5xx_submit_in_rb(gpu, submit); return; }gpu->cur_ctx_seqno = 0;
@@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 33da25b81615..3d2da81cb2c9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, u32 asid; u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno) return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1); OUT_RING(ring, 0x31);
a6xx_gpu->cur_ctx_seqno = ctx->seqno; }
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) { unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private; struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); struct msm_ringbuffer *ring = submit->ring;
@@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) case MSM_SUBMIT_CMD_IB_TARGET_BUF: break; case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == submit->queue->ctx)
case MSM_SUBMIT_CMD_BUF:if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) break; fallthrough;
@@ -1081,7 +1078,7 @@ static int hw_init(struct msm_gpu *gpu) /* Always come up on rb 0 */ a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx_seqno = 0;
gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h index 8e5527c881b1..86e0a7c3fe6d 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h @@ -20,16 +20,6 @@ struct a6xx_gpu {
struct msm_ringbuffer *cur_ring;
/**
* cur_ctx_seqno:
*
* The ctx->seqno value of the context with current pgtables
* installed. Tracked by seqno rather than pointer value to
* avoid dangling pointers, and cases where a ctx can be freed
* and a new one created with the same address.
*/
int cur_ctx_seqno;
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7936e8d498dd..73e827641024 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -752,14 +752,8 @@ static void context_close(struct msm_file_private *ctx)
static void msm_postclose(struct drm_device *dev, struct drm_file *file) {
struct msm_drm_private *priv = dev->dev_private; struct msm_file_private *ctx = file->driver_priv;
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
context_close(ctx); }
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 69952b239384..2943c21d9aac 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -164,7 +164,7 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */ struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
- /* gpu is only set on open(), but we need this info earlier */ bool is_a2xx; bool has_cached_coherent;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 2c46cd968ac4..3dfc58e6498f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -763,7 +763,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- priv->lastctx = submit->queue->ctx;
gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu); }
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 59870095ea41..623ee416c568 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -144,6 +144,17 @@ struct msm_gpu { struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; int nr_rings;
- /**
* cur_ctx_seqno:
*
* The ctx->seqno value of the last context to submit rendering,
* and the one with current pgtables installed (for generations
* that support per-context pgtables). Tracked by seqno rather
* than pointer value to avoid dangling pointers, and cases where
* a ctx can be freed and a new one created with the same address.
*/
- int cur_ctx_seqno;
- /*
- List of GEM active objects on this gpu. Protected by
- msm_drm_private::mm_lock
Reviewed-by: Akhil P Oommen akhilpo@codeaurora.org
-Akhil.