Here is my stack of Adreno GPU goodness for 4.14. Starting off with a few fixes that could be appropriate for 4.13 if you deem them so and then moving into some new features:
- a5xx hardware fault detection - can detect a legitimate fault within microseconds and fires an interrupt so you should be able to recover and move on much faster than relying on the timer.
- submitqueues - submitqueues are the kernel side equivalent of a context. The user can set submitqueue specific flags and features (such as ringbuffer priority) and in the following patch we move to per-submitqueue fences.
- multiple ringbuffer preemption - provide for multiple ringbuffers, allow the user to select a ringbuffer to submit on and preempt between the different priorities. You've seen some of this code before but some of our newer features have cleaned things up significantly.
Jordan Crouse (17): drm/msm: Remove some potentially blocked register ranges drm/msm: Allow hardware clock gating to be toggled drm/msm: Turn off hardware clock gating before reading A5XX registers drm/msm: Remove uneeded platform dev members drm/msm: args->fence should be args->flags drm/msm: Remove __user from __u64 data types drm/msm: Add A5XX hardware fault detection drm/msm: Add per-instance submit queues drm/msm: Implement per-submitqueue fences drm/msm: Attach the GPU MMU when it is created drm/msm: Add a helper function for in-kernel buffer allocations drm/msm: Move memptrs to msm_gpu drm/msm: Support multiple ringbuffers drm/msm: Add a parameter query for the number of ringbuffers drm/msm: Shadow current pointer in the ring until command is complete drm/msm: Make the value of RB_CNTL (almost) generic drm/msm: Implement preemption for A5XX targets
drivers/gpu/drm/msm/Makefile | 4 +- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 12 +- drivers/gpu/drm/msm/adreno/a3xx_gpu.h | 1 - drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 12 +- drivers/gpu/drm/msm/adreno/a4xx_gpu.h | 1 - drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 366 +++++++++++++++++++++--------- drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 105 ++++++++- drivers/gpu/drm/msm/adreno/a5xx_power.c | 20 +- drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 294 ++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 224 ++++++++---------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 43 ++-- drivers/gpu/drm/msm/msm_drv.c | 62 ++++- drivers/gpu/drm/msm/msm_drv.h | 28 ++- drivers/gpu/drm/msm/msm_fbdev.c | 35 ++- drivers/gpu/drm/msm/msm_fence.c | 2 +- drivers/gpu/drm/msm/msm_fence.h | 2 +- drivers/gpu/drm/msm/msm_gem.c | 46 ++++ drivers/gpu/drm/msm/msm_gem.h | 5 +- drivers/gpu/drm/msm/msm_gem_submit.c | 27 ++- drivers/gpu/drm/msm/msm_gpu.c | 259 +++++++++++++++------ drivers/gpu/drm/msm/msm_gpu.h | 54 ++++- drivers/gpu/drm/msm/msm_ringbuffer.c | 35 +-- drivers/gpu/drm/msm/msm_ringbuffer.h | 32 ++- drivers/gpu/drm/msm/msm_submitqueue.c | 160 +++++++++++++ include/uapi/drm/msm_drm.h | 30 ++- 25 files changed, 1422 insertions(+), 437 deletions(-) create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_preempt.c create mode 100644 drivers/gpu/drm/msm/msm_submitqueue.c
The 0xf400 and 0xf800 ranges are in the RBBM_SECVID block which may be protected from CPU access. Skip dumping them since they are minimally useful for debugging and they aren't worth a system hang.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 49 +++++++++++++++++------------------ 1 file changed, 24 insertions(+), 25 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b4b54f1..1f60a9a 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -920,31 +920,30 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, - 0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, - 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, - 0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, - 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, - 0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, - 0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, - 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, - 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, - 0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, - 0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, - 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, - 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, - 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, - 0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, - 0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, - 0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, - 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, - 0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, - 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, - 0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, - 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, - 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, - 0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, - 0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, - ~0 + 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841, + 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28, + 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, + 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98, + 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585, + 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, + 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, + 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545, + 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0, + 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57, + 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8, + 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, + 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201, + 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A, + 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F, + 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0, + 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947, + 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, + 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68, + 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, + 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05, + 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3, + 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F, + 0xB9A0, 0xB9BF, ~0 };
static void a5xx_dump(struct msm_gpu *gpu)
There are some use cases wherein we need to turn off hardware clock gating before reading certain registers. Modify the A5XX HWCG function to allow user to enable or disable clock gating at will.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 42 ++++++++--------------------------- drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 1 + 2 files changed, 10 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1f60a9a..c1f8c20 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -117,12 +117,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, gpu->funcs->flush(gpu); }
-struct a5xx_hwcg { +static const struct { u32 offset; u32 value; -}; - -static const struct a5xx_hwcg a530_hwcg[] = { +} a5xx_hwcg[] = { {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, @@ -217,38 +215,16 @@ struct a5xx_hwcg { {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} };
-static const struct { - int (*test)(struct adreno_gpu *gpu); - const struct a5xx_hwcg *regs; - unsigned int count; -} a5xx_hwcg_regs[] = { - { adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), }, -}; - -static void _a5xx_enable_hwcg(struct msm_gpu *gpu, - const struct a5xx_hwcg *regs, unsigned int count) +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) { unsigned int i;
- for (i = 0; i < count; i++) - gpu_write(gpu, regs[i].offset, regs[i].value); - - gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); - gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); -} + for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++) + gpu_write(gpu, a5xx_hwcg[i].offset, + state ? a5xx_hwcg[i].value : 0);
-static void a5xx_enable_hwcg(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - unsigned int i; - - for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) { - if (a5xx_hwcg_regs[i].test(adreno_gpu)) { - _a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs, - a5xx_hwcg_regs[i].count); - return; - } - } + gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0); + gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180); }
static int a5xx_me_init(struct msm_gpu *gpu) @@ -545,7 +521,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */ - a5xx_enable_hwcg(gpu); + a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 6638bc8..d24796f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -59,5 +59,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, }
bool a5xx_idle(struct msm_gpu *gpu); +void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */
On A5XX GPU hardware clock gating needs to be turned off before reading certain GPU registers via AHB. Turn off HWCG before calling adreno_show() to safely dump all the registers without a system hang.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 7 +++++++ 1 file changed, 7 insertions(+)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index c1f8c20..33763b0 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -995,7 +995,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) { seq_printf(m, "status: %08x\n", gpu_read(gpu, REG_A5XX_RBBM_STATUS)); + + /* + * Temporarily disable hardware clock gating before going into + * adreno_show to avoid issues while reading the registers + */ + a5xx_set_hwcg(gpu, false); adreno_show(gpu, m); + a5xx_set_hwcg(gpu, true); } #endif
Commit eeb754746b14 ("drm/msm/gpu: use pm-runtime") adds a pointer for the GPU platform device to the msm_gpu struct so we can happily remove the same pointers from the individual GPU structs.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 -- drivers/gpu/drm/msm/adreno/a3xx_gpu.h | 1 - drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 -- drivers/gpu/drm/msm/adreno/a4xx_gpu.h | 1 - drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 3 +-- drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 1 - 6 files changed, 1 insertion(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 0e3828ed..7791313 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -486,8 +486,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) adreno_gpu = &a3xx_gpu->base; gpu = &adreno_gpu->base;
- a3xx_gpu->pdev = pdev; - gpu->perfcntrs = perfcntrs; gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h index 85ff66c..ab60dc9 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h @@ -28,7 +28,6 @@
struct a3xx_gpu { struct adreno_gpu base; - struct platform_device *pdev;
/* if OCMEM is used for GMEM: */ uint32_t ocmem_base; diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 19abf22..58341ef 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -568,8 +568,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu = &a4xx_gpu->base; gpu = &adreno_gpu->base;
- a4xx_gpu->pdev = pdev; - gpu->perfcntrs = NULL; gpu->num_perfcntrs = 0;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h index 0124720..f757184 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h @@ -23,7 +23,6 @@
struct a4xx_gpu { struct adreno_gpu base; - struct platform_device *pdev;
/* if OCMEM is used for GMEM: */ uint32_t ocmem_base; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 33763b0..3af29cae 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -397,7 +397,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) static bool loaded; struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - struct platform_device *pdev = a5xx_gpu->pdev; + struct platform_device *pdev = gpu->pdev; int ret;
/* @@ -1046,7 +1046,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) adreno_gpu = &a5xx_gpu->base; gpu = &adreno_gpu->base;
- a5xx_gpu->pdev = pdev; adreno_gpu->registers = a5xx_registers; adreno_gpu->reg_offsets = a5xx_register_offsets;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index d24796f..3d62f00 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -23,7 +23,6 @@
struct a5xx_gpu { struct adreno_gpu base; - struct platform_device *pdev;
struct drm_gem_object *pm4_bo; uint64_t pm4_iova;
Fix a typo in msm_ioctl_gem_submit - check args->flags for the MSM_SUBMIT_NO_IMPLICIT flag instead of args->fence.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/msm_gem_submit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 6bfca74..c0bfe18b 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out;
- if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { + if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { ret = submit_fence_sync(submit); if (ret) goto out;
__user should be used to identify user pointers and not __u64 variables containing pointers.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- include/uapi/drm/msm_drm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 26c54f6..ad4eb28 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd { __u32 size; /* in, cmdstream size */ __u32 pad; __u32 nr_relocs; /* in, number of submit_reloc's */ - __u64 __user relocs; /* in, ptr to array of submit_reloc's */ + __u64 relocs; /* in, ptr to array of submit_reloc's */ };
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the @@ -215,8 +215,8 @@ struct drm_msm_gem_submit { __u32 fence; /* out */ __u32 nr_bos; /* in, number of submit_bo's */ __u32 nr_cmds; /* in, number of submit_cmd's */ - __u64 __user bos; /* in, ptr to array of submit_bo's */ - __u64 __user cmds; /* in, ptr to array of submit_cmd's */ + __u64 bos; /* in, ptr to array of submit_bo's */ + __u64 cmds; /* in, ptr to array of submit_cmd's */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ };
The A5XX GPU has really good hardware fault detection that can detect a abnormal hardware condition and fire an interrupt in a matter of milliseconds which is a lot better than waiting for the hangcheck timer.
Enable the interrupt and log information before kicking off recovery.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 3af29cae..6361193 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -438,6 +438,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ + A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) @@ -843,6 +844,28 @@ static void a5xx_gpmu_err_irq(struct msm_gpu *gpu) dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n"); }
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu) +{ + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); + + dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n", + ring ? ring->id : -1, ring ? ring->seqno : 0, + gpu_read(gpu, REG_A5XX_RBBM_STATUS), + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), + gpu_read(gpu, REG_A5XX_CP_RB_WPTR), + gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI), + gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ), + gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI), + gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ)); + + /* Turn off the hangcheck timer to keep it from bothering us */ + del_timer(&gpu->hangcheck_timer); + + queue_work(priv->wq, &gpu->recover_work); +} + #define RBBM_ERROR_MASK \ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \ @@ -869,6 +892,9 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR) a5xx_cp_err_irq(gpu);
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT) + a5xx_fault_detect_irq(gpu); + if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS) a5xx_uche_err_irq(gpu);
Currently the behavior of a command stream is provided by the user application during submission and the application is expected to internally maintain the settings for each 'context' or 'rendering queue' and specify the correct ones.
This works okay for simple cases but as applications become more complex we will want to set context specific flags and do various permission checks to allow certain contexts to enable additional privileges.
Add kernel-side submit queues to be analogous to 'contexts' or 'rendering queues' on the application side. Each file descriptor instance will maintain its own list of queues. Queues cannot be shared between file descriptors.
For backwards compatibility context id '0' is defined as a default context specifying no priority and no special flags. This is intended to be the usual configuration for 99% of applications so that a garden variety application can function correctly without creating a queue. Only those applications requiring the specific benefit of different queues need create one.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/Makefile | 3 +- drivers/gpu/drm/msm/msm_drv.c | 51 +++++++++++-- drivers/gpu/drm/msm/msm_drv.h | 20 +++-- drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_gem_submit.c | 13 +++- drivers/gpu/drm/msm/msm_gpu.h | 15 ++++ drivers/gpu/drm/msm/msm_submitqueue.c | 139 ++++++++++++++++++++++++++++++++++ include/uapi/drm/msm_drm.h | 22 ++++++ 8 files changed, 249 insertions(+), 15 deletions(-) create mode 100644 drivers/gpu/drm/msm/msm_submitqueue.c
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 33008fa..3c234e7 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -57,7 +57,8 @@ msm-y := \ msm_iommu.o \ msm_perf.o \ msm_rd.o \ - msm_ringbuffer.o + msm_ringbuffer.o \ + msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f49f6ac..f4bb909 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -510,24 +510,37 @@ static void load_gpu(struct drm_device *dev) mutex_unlock(&init_lock); }
-static int msm_open(struct drm_device *dev, struct drm_file *file) +static int context_init(struct drm_file *file) { struct msm_file_private *ctx;
- /* For now, load gpu on open.. to avoid the requirement of having - * firmware in the initrd. - */ - load_gpu(dev); - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM;
+ msm_submitqueue_init(ctx); + file->driver_priv = ctx;
return 0; }
+static int msm_open(struct drm_device *dev, struct drm_file *file) +{ + /* For now, load gpu on open.. to avoid the requirement of having + * firmware in the initrd. + */ + load_gpu(dev); + + return context_init(file); +} + +static void context_close(struct msm_file_private *ctx) +{ + msm_submitqueue_close(ctx); + kfree(ctx); +} + static void msm_postclose(struct drm_device *dev, struct drm_file *file) { struct msm_drm_private *priv = dev->dev_private; @@ -538,7 +551,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file) priv->lastctx = NULL; mutex_unlock(&dev->struct_mutex);
- kfree(ctx); + context_close(ctx); }
static void msm_lastclose(struct drm_device *dev) @@ -783,6 +796,28 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, return ret; }
+ +static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_submitqueue *args = data; + + if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) + return -EINVAL; + + return msm_submitqueue_create(file->driver_priv, args->prio, + args->flags, &args->id); +} + + +static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_msm_submitqueue *args = data; + + return msm_submitqueue_remove(file->driver_priv, args->id); +} + static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), @@ -792,6 +827,8 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW), };
static const struct vm_operations_struct vm_ops = { diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index fc8d24f..f4d8328 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -58,11 +58,9 @@ #define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
struct msm_file_private { - /* currently we don't do anything useful with this.. but when - * per-context address spaces are supported we'd keep track of - * the context's page-tables here. - */ - int dummy; + rwlock_t queuelock; + struct list_head submitqueues; + int queueid; };
enum msm_mdp_plane_property { @@ -315,6 +313,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr);
+struct msm_gpu_submitqueue; +int msm_submitqueue_init(struct msm_file_private *ctx); +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id); +int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, + u32 flags, u32 *id); +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); +void msm_submitqueue_close(struct msm_file_private *ctx); + +void msm_submitqueue_destroy(struct kref *kref); + + #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__) #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 91c210d..17f8a6c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -142,6 +142,7 @@ struct msm_gem_submit { struct list_head bo_list; struct ww_acquire_ctx ticket; struct dma_fence *fence; + struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ unsigned int nr_cmds; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c0bfe18b..eb0b046 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -31,7 +31,8 @@ #define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) + struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue, + uint32_t nr_bos, uint32_t nr_cmds) { struct msm_gem_submit *submit; uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + @@ -49,6 +50,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->fence = NULL; submit->pid = get_pid(task_pid(current)); submit->cmd = (void *)&submit->bos[nr_bos]; + submit->queue = queue;
/* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; @@ -66,6 +68,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit) dma_fence_put(submit->fence); list_del(&submit->node); put_pid(submit->pid); + msm_submitqueue_put(submit->queue); + kfree(submit); }
@@ -391,6 +395,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_gpu *gpu = priv->gpu; struct dma_fence *in_fence = NULL; struct sync_file *sync_file = NULL; + struct msm_gpu_submitqueue *queue; int out_fence_fd = -1; unsigned i; int ret; @@ -407,6 +412,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS) return -EINVAL;
+ queue = msm_submitqueue_get(ctx, args->queueid); + if (!queue) + return -ENOENT; + if (args->flags & MSM_SUBMIT_FENCE_FD_IN) { in_fence = sync_file_get_fence(args->fence_fd);
@@ -437,7 +446,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } priv->struct_mutex_task = current;
- submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds); + submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds); if (!submit) { ret = -ENOMEM; goto out_unlock; diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index df4e277..a890176 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -150,6 +150,15 @@ struct msm_gpu_perfcntr { const char *name; };
+struct msm_gpu_submitqueue { + int id; + u32 flags; + u32 prio; + int faults; + struct list_head node; + struct kref ref; +}; + static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) { msm_writel(data, gpu->mmio + (reg << 2)); @@ -223,4 +232,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, void __init adreno_register(void); void __exit adreno_unregister(void);
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue) +{ + if (queue) + kref_put(&queue->ref, msm_submitqueue_destroy); +} + #endif /* __MSM_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c new file mode 100644 index 0000000..a545dc6 --- /dev/null +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -0,0 +1,139 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kref.h> +#include "msm_gpu.h" + +void msm_submitqueue_destroy(struct kref *kref) +{ + struct msm_gpu_submitqueue *queue = container_of(kref, + struct msm_gpu_submitqueue, ref); + + kfree(queue); +} + +struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, + u32 id) +{ + struct msm_gpu_submitqueue *entry; + + if (!ctx) + return NULL; + + read_lock(&ctx->queuelock); + + list_for_each_entry(entry, &ctx->submitqueues, node) { + if (entry->id == id) { + kref_get(&entry->ref); + read_unlock(&ctx->queuelock); + + return entry; + } + } + + read_unlock(&ctx->queuelock); + return NULL; +} + +void msm_submitqueue_close(struct msm_file_private *ctx) +{ + struct msm_gpu_submitqueue *entry, *tmp; + + if (!ctx) + return; + + /* + * No lock needed in close and there won't + * be any more user ioctls coming our way + */ + list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) + msm_submitqueue_put(entry); +} + +int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags, + u32 *id) +{ + struct msm_gpu_submitqueue *queue; + + if (!ctx) + return -ENODEV; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + + if (!queue) + return -ENOMEM; + + kref_init(&queue->ref); + queue->flags = flags; + queue->prio = prio; + + write_lock(&ctx->queuelock); + + queue->id = ctx->queueid++; + + if (id) + *id = queue->id; + + list_add_tail(&queue->node, &ctx->submitqueues); + + write_unlock(&ctx->queuelock); + + return 0; +} + +int msm_submitqueue_init(struct msm_file_private *ctx) +{ + if (!ctx) + return 0; + + INIT_LIST_HEAD(&ctx->submitqueues); + + rwlock_init(&ctx->queuelock); + + /* + * Add the "default" submitqueue with id 0 + * "medium" priority (3) and no flags + */ + return msm_submitqueue_create(ctx, 3, 0, NULL); +} + +int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) +{ + struct msm_gpu_submitqueue *entry; + + if (!ctx) + return 0; + + /* + * id 0 is the "default" queue and can't be destroyed + * by the user + */ + if (!id) + return -ENOENT; + + write_lock(&ctx->queuelock); + + list_for_each_entry(entry, &ctx->submitqueues, node) { + if (entry->id == id) { + list_del(&entry->node); + write_unlock(&ctx->queuelock); + + msm_submitqueue_put(entry); + return 0; + } + } + + write_unlock(&ctx->queuelock); + return -ENOENT; +} + diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index ad4eb28..99826bd 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -218,6 +218,7 @@ struct drm_msm_gem_submit { __u64 bos; /* in, ptr to array of submit_bo's */ __u64 cmds; /* in, ptr to array of submit_cmd's */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ + __u32 queueid; /* in, submitqueue id */ };
/* The normal way to synchronize with the GPU is just to CPU_PREP on @@ -254,6 +255,20 @@ struct drm_msm_gem_madvise { __u32 retained; /* out, whether backing store still exists */ };
+/* + * Draw queues allow the user to set specific submission parameter. Command + * submissions specify a specific submitqueue to use. ID 0 is reserved for + * backwards compatibility as a "default" submitqueue + */ + +#define MSM_SUBMITQUEUE_FLAGS (0) + +struct drm_msm_submitqueue { + __u32 flags; /* in, MSM_SUBMITQUEUE_x */ + __u32 prio; /* in, Priority level */ + __u32 id; /* out, identifier */ +}; + #define DRM_MSM_GET_PARAM 0x00 /* placeholder: #define DRM_MSM_SET_PARAM 0x01 @@ -265,6 +280,11 @@ struct drm_msm_gem_madvise { #define DRM_MSM_GEM_SUBMIT 0x06 #define DRM_MSM_WAIT_FENCE 0x07 #define DRM_MSM_GEM_MADVISE 0x08 +/* placeholder: +#define DRM_MSM_GEM_SVM_NEW 0x09 + */ +#define DRM_MSM_SUBMITQUEUE_NEW 0x0A +#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) @@ -274,6 +294,8 @@ struct drm_msm_gem_madvise { #define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit) #define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence) #define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise) +#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue) +#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, struct drm_msm_submitqueue)
#if defined(__cplusplus) }
Currently we use a single fence context for the entire GPU. That works until we start dealing with multiple ringbuffers that will all need their own sequence number.
So in order for the user to continue using the fence functions we will need some way to uniquely identify a submission that gets mapped into the appropriate ringbuffer and sequence ID.
The best way to do this is to move to per-submitqueue fences. This allows each user context to have their own monotonically increasing identifier and we can easily map back and forth.
To implement this, create a fence context for each submitqueue and assign user fences from that context. Each submission will have be assigned a submitqueue fence and a unique sequence ID for the target ringbuffer. When the submission is retired on the ring the fence will get signaled.
struct drm_msm_wait_fence in the UABI needs to be extended to include the queue ID for the fence being queried. Legacy applications that don't use submitqueues will use queue 0 by default.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 4 ++-- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 10 ++++----- drivers/gpu/drm/msm/msm_drv.c | 19 ++++++++++++----- drivers/gpu/drm/msm/msm_drv.h | 6 +++--- drivers/gpu/drm/msm/msm_fence.c | 2 +- drivers/gpu/drm/msm/msm_fence.h | 2 +- drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_gem_submit.c | 7 ++++--- drivers/gpu/drm/msm/msm_gpu.c | 36 +++++++++++++++++++-------------- drivers/gpu/drm/msm/msm_gpu.h | 8 +++++--- drivers/gpu/drm/msm/msm_submitqueue.c | 33 ++++++++++++++++++++++++------ include/uapi/drm/msm_drm.h | 1 + 12 files changed, 85 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 6361193..3acbba1 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -106,13 +106,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, }
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); - OUT_RING(ring, submit->fence->seqno); + OUT_RING(ring, submit->seqno);
OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); - OUT_RING(ring, submit->fence->seqno); + OUT_RING(ring, submit->seqno);
gpu->funcs->flush(gpu); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f1ab270..b1c1639 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -75,7 +75,7 @@ int adreno_hw_init(struct msm_gpu *gpu) gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno: */ - adreno_gpu->memptrs->fence = gpu->fctx->completed_fence; + adreno_gpu->memptrs->fence = gpu->seqno; adreno_gpu->memptrs->rptr = 0;
/* Setup REG_CP_RB_CNTL: */ @@ -165,7 +165,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, }
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); - OUT_RING(ring, submit->fence->seqno); + OUT_RING(ring, submit->seqno);
if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { /* Flush HLSQ lazy updates to make sure there is nothing @@ -182,7 +182,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); OUT_RING(ring, rbmemptr(adreno_gpu, fence)); - OUT_RING(ring, submit->fence->seqno); + OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */ OUT_PKT3(ring, CP_INTERRUPT, 1); @@ -255,7 +255,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) adreno_gpu->rev.patchid);
seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, - gpu->fctx->last_fence); + gpu->seqno); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
@@ -290,7 +290,7 @@ void adreno_dump_info(struct msm_gpu *gpu) adreno_gpu->rev.patchid);
printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, - gpu->fctx->last_fence); + gpu->seqno); printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("rb wptr: %d\n", get_wptr(gpu->rb)); } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f4bb909..9599d02 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -510,7 +510,7 @@ static void load_gpu(struct drm_device *dev) mutex_unlock(&init_lock); }
-static int context_init(struct drm_file *file) +static int context_init(struct drm_device *dev, struct drm_file *file) { struct msm_file_private *ctx;
@@ -518,7 +518,7 @@ static int context_init(struct drm_file *file) if (!ctx) return -ENOMEM;
- msm_submitqueue_init(ctx); + msm_submitqueue_init(dev, ctx);
file->driver_priv = ctx;
@@ -532,7 +532,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file) */ load_gpu(dev);
- return context_init(file); + return context_init(dev, file); }
static void context_close(struct msm_file_private *ctx) @@ -746,6 +746,8 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, struct msm_drm_private *priv = dev->dev_private; struct drm_msm_wait_fence *args = data; ktime_t timeout = to_ktime(args->timeout); + struct msm_gpu_submitqueue *queue; + int ret;
if (args->pad) { DRM_ERROR("invalid pad: %08x\n", args->pad); @@ -755,7 +757,14 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, if (!priv->gpu) return 0;
- return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); + queue = msm_submitqueue_get(file->driver_priv, args->queueid); + if (!queue) + return -ENOENT; + + ret = msm_wait_fence(queue->fctx, args->fence, &timeout, true); + + msm_submitqueue_put(queue); + return ret; }
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data, @@ -805,7 +814,7 @@ static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data, if (args->flags & ~MSM_SUBMITQUEUE_FLAGS) return -EINVAL;
- return msm_submitqueue_create(file->driver_priv, args->prio, + return msm_submitqueue_create(dev, file->driver_priv, args->prio, args->flags, &args->id); }
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index f4d8328..0fa9a7d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -314,11 +314,11 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, u32 msm_readl(const void __iomem *addr);
struct msm_gpu_submitqueue; -int msm_submitqueue_init(struct msm_file_private *ctx); +int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx); struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx, u32 id); -int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, - u32 flags, u32 *id); +int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, + u32 prio, u32 flags, u32 *id); int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id); void msm_submitqueue_close(struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index a2f89ba..349c12f 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -31,7 +31,7 @@ struct msm_fence_context * return ERR_PTR(-ENOMEM);
fctx->dev = dev; - fctx->name = name; + strncpy(fctx->name, name, sizeof(fctx->name)); fctx->context = dma_fence_context_alloc(1); init_waitqueue_head(&fctx->event); spin_lock_init(&fctx->spinlock); diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h index 56061aa..1aa6a4c 100644 --- a/drivers/gpu/drm/msm/msm_fence.h +++ b/drivers/gpu/drm/msm/msm_fence.h @@ -22,7 +22,7 @@
struct msm_fence_context { struct drm_device *dev; - const char *name; + char name[32]; unsigned context; /* last_fence == completed_fence --> no pending work */ uint32_t last_fence; /* last assigned fence */ diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 17f8a6c..c98d3a7 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -141,6 +141,7 @@ struct msm_gem_submit { struct list_head node; /* node in gpu submit_list */ struct list_head bo_list; struct ww_acquire_ctx ticket; + uint32_t seqno; /* Sequence number of the submit on the ring */ struct dma_fence *fence; struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index eb0b046..ac95816 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -233,7 +233,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit) struct msm_gem_object *msm_obj = submit->bos[i].obj; bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
- ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); + ret = msm_gem_sync_object(&msm_obj->base, submit->queue->fctx, + write); if (ret) break; } @@ -426,7 +427,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, * Wait if the fence is from a foreign context, or if the fence * array contains any fence from a foreign context. */ - if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { + if (!dma_fence_match_context(in_fence, queue->fctx->context)) { ret = dma_fence_wait(in_fence, true); if (ret) return ret; @@ -531,7 +532,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- submit->fence = msm_fence_alloc(gpu->fctx); + submit->fence = msm_fence_alloc(queue->fctx); if (IS_ERR(submit->fence)) { ret = PTR_ERR(submit->fence); submit->fence = NULL; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 9f3dbc2..69bd60e 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -221,6 +221,19 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) * Hangcheck detection for locked gpu: */
+static void update_fences(struct msm_gpu *gpu, uint32_t fence) +{ + struct msm_gem_submit *submit; + + list_for_each_entry(submit, &gpu->submit_list, node) { + if (submit->seqno > fence) + break; + + msm_update_fence(submit->queue->fctx, + submit->fence->seqno); + } +} + static void retire_submits(struct msm_gpu *gpu);
static void recover_worker(struct work_struct *work) @@ -230,13 +243,13 @@ static void recover_worker(struct work_struct *work) struct msm_gem_submit *submit; uint32_t fence = gpu->funcs->last_fence(gpu);
- msm_update_fence(gpu->fctx, fence + 1); + update_fences(gpu, fence + 1);
mutex_lock(&dev->struct_mutex);
dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); list_for_each_entry(submit, &gpu->submit_list, node) { - if (submit->fence->seqno == (fence + 1)) { + if (submit->seqno == (fence + 1)) { struct task_struct *task;
rcu_read_lock(); @@ -286,7 +299,7 @@ static void hangcheck_handler(unsigned long data) if (fence != gpu->hangcheck_fence) { /* some progress has been made.. ya! */ gpu->hangcheck_fence = fence; - } else if (fence < gpu->fctx->last_fence) { + } else if (fence < gpu->seqno) { /* no progress and not done.. hung! */ gpu->hangcheck_fence = fence; dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", @@ -294,12 +307,12 @@ static void hangcheck_handler(unsigned long data) dev_err(dev->dev, "%s: completed fence: %u\n", gpu->name, fence); dev_err(dev->dev, "%s: submitted fence: %u\n", - gpu->name, gpu->fctx->last_fence); + gpu->name, gpu->seqno); queue_work(priv->wq, &gpu->recover_work); }
/* if still more pending work, reset the hangcheck timer: */ - if (gpu->fctx->last_fence > gpu->hangcheck_fence) + if (gpu->seqno > gpu->hangcheck_fence) hangcheck_timer_reset(gpu);
/* workaround for missing irq: */ @@ -451,7 +464,7 @@ static void retire_worker(struct work_struct *work) struct drm_device *dev = gpu->dev; uint32_t fence = gpu->funcs->last_fence(gpu);
- msm_update_fence(gpu->fctx, fence); + update_fences(gpu, fence);
mutex_lock(&dev->struct_mutex); retire_submits(gpu); @@ -480,6 +493,8 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gpu_hw_init(gpu);
+ submit->seqno = ++gpu->seqno; + list_add_tail(&submit->node, &gpu->submit_list);
msm_rd_dump_submit(submit); @@ -575,12 +590,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; - gpu->fctx = msm_fence_context_alloc(drm, name); - if (IS_ERR(gpu->fctx)) { - ret = PTR_ERR(gpu->fctx); - gpu->fctx = NULL; - goto fail; - }
INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); @@ -693,7 +702,4 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } - - if (gpu->fctx) - msm_fence_context_free(gpu->fctx); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index a890176..d8485ec 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -93,8 +93,8 @@ struct msm_gpu { /* list of GEM active objects: */ struct list_head active_list;
- /* fencing: */ - struct msm_fence_context *fctx; + /* The sequence number for ring submissions */ + uint32_t seqno;
/* does gpu need hw_init? */ bool needs_hw_init; @@ -134,7 +134,7 @@ struct msm_gpu {
static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu); + return gpu->seqno > gpu->funcs->last_fence(gpu); }
/* Perf-Counters: @@ -157,6 +157,8 @@ struct msm_gpu_submitqueue { int faults; struct list_head node; struct kref ref; + + struct msm_fence_context *fctx; };
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index a545dc6..3afb086 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -19,6 +19,7 @@ void msm_submitqueue_destroy(struct kref *kref) struct msm_gpu_submitqueue *queue = container_of(kref, struct msm_gpu_submitqueue, ref);
+ msm_fence_context_free(queue->fctx); kfree(queue); }
@@ -60,16 +61,17 @@ void msm_submitqueue_close(struct msm_file_private *ctx) msm_submitqueue_put(entry); }
-int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags, - u32 *id) +int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx, + u32 prio, u32 flags, u32 *id) { struct msm_gpu_submitqueue *queue; + char name[32]; + int ret = 0;
if (!ctx) return -ENODEV;
queue = kzalloc(sizeof(*queue), GFP_KERNEL); - if (!queue) return -ENOMEM;
@@ -86,12 +88,31 @@ int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags,
list_add_tail(&queue->node, &ctx->submitqueues);
+ /* + * Get another reference to the queue before releasing the lock + * so it does't get destroyed while we are still setting it up + */ + kref_get(&queue->ref); + write_unlock(&ctx->queuelock);
- return 0; + /* FIXME: What should the name be? */ + sprintf(name, "gpu-queue-%d", queue->id); + + /* Allocate a fence domain for the queue */ + + queue->fctx = msm_fence_context_alloc(drm, name); + if (IS_ERR(queue->fctx)) { + ret = PTR_ERR(queue->fctx); + msm_submitqueue_put(queue); + } + + msm_submitqueue_put(queue); + + return ret; }
-int msm_submitqueue_init(struct msm_file_private *ctx) +int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx) { if (!ctx) return 0; @@ -104,7 +125,7 @@ int msm_submitqueue_init(struct msm_file_private *ctx) * Add the "default" submitqueue with id 0 * "medium" priority (3) and no flags */ - return msm_submitqueue_create(ctx, 3, 0, NULL); + return msm_submitqueue_create(drm, ctx, 3, 0, NULL); }
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id) diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 99826bd..a06bf97 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -232,6 +232,7 @@ struct drm_msm_wait_fence { __u32 fence; /* in */ __u32 pad; struct drm_msm_timespec timeout; /* in */ + __u32 queueid; /* in, submitqueue id that owns the fence */ };
/* madvise provides a way to tell the kernel in case a buffers contents
Currently the GPU MMU is attached in the adreno_gpu code but as more and more of the GPU initialization moves to the generic GPU path we have a need to map and use GPU memory earlier and earlier. There isn't any reason to defer attaching the MMU until later so attach it right after the address space is created so it can be used immediately.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 27 ++--------- drivers/gpu/drm/msm/msm_gpu.c | 83 ++++++++++++++++++++++----------- 2 files changed, 61 insertions(+), 49 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index b1c1639..634e724 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -330,11 +330,6 @@ void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); }
-static const char *iommu_ports[] = { - "gfx3d_user", "gfx3d_priv", - "gfx3d1_user", "gfx3d1_priv", -}; - int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) { @@ -366,15 +361,15 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ringsz = RB_SIZE;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, &adreno_gpu_config); if (ret) return ret;
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_enable(&pdev->dev); - ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", @@ -389,14 +384,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; }
- if (gpu->aspace && gpu->aspace->mmu) { - struct msm_mmu *mmu = gpu->aspace->mmu; - ret = mmu->funcs->attach(mmu, iommu_ports, - ARRAY_SIZE(iommu_ports)); - if (ret) - return ret; - } - adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED); if (IS_ERR(adreno_gpu->memptrs_bo)) { @@ -439,10 +426,4 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) release_firmware(adreno_gpu->pfp);
msm_gpu_cleanup(gpu); - - if (gpu->aspace) { - gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, - iommu_ports, ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_put(gpu->aspace); - } } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 69bd60e..35533d3 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -577,11 +577,49 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) return 0; }
+static struct msm_gem_address_space * +msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, + uint64_t va_start, uint64_t va_end) +{ + struct iommu_domain *iommu; + struct msm_gem_address_space *aspace; + int ret; + + /* + * Setup IOMMU.. eventually we will (I think) do this once per context + * and have separate page tables per context. For now, to keep things + * simple and to get something working, just use a single address space: + */ + iommu = iommu_domain_alloc(&platform_bus_type); + if (!iommu) + return NULL; + + iommu->geometry.aperture_start = va_start; + iommu->geometry.aperture_end = va_end; + + dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); + + aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); + if (IS_ERR(aspace)) { + dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", + PTR_ERR(aspace)); + iommu_domain_free(iommu); + return ERR_CAST(aspace); + } + + ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); + if (ret) { + msm_gem_address_space_put(aspace); + return ERR_PTR(ret); + } + + return aspace; +} + int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) { - struct iommu_domain *iommu; int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) @@ -645,28 +683,19 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context - * and have separate page tables per context. For now, to keep things - * simple and to get something working, just use a single address space: - */ - iommu = iommu_domain_alloc(&platform_bus_type); - if (iommu) { - iommu->geometry.aperture_start = config->va_start; - iommu->geometry.aperture_end = config->va_end; - - dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->aspace = msm_gem_address_space_create(&pdev->dev, - iommu, "gpu"); - if (IS_ERR(gpu->aspace)) { - ret = PTR_ERR(gpu->aspace); - dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->aspace = NULL; - iommu_domain_free(iommu); - goto fail; - } + gpu->pdev = pdev; + platform_set_drvdata(pdev, gpu); + + bs_init(gpu);
- } else { + gpu->aspace = msm_gpu_create_address_space(gpu, pdev, + config->va_start, config->va_end); + + if (gpu->aspace == NULL) dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); + else if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); + goto fail; }
/* Create ringbuffer: */ @@ -678,14 +707,10 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; }
- gpu->pdev = pdev; - platform_set_drvdata(pdev, gpu); - - bs_init(gpu); - return 0;
fail: + platform_set_drvdata(pdev, NULL); return ret; }
@@ -702,4 +727,10 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } + + if (gpu->aspace) { + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, + NULL, 0); + msm_gem_address_space_put(gpu->aspace); + } }
Nearly all of the buffer allocations for kernel allocate an buffer object, virtual address and GPU iova at the same time. Make a helper function to handle the details.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 22 +++------------- drivers/gpu/drm/msm/adreno/a5xx_power.c | 14 +++------- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 26 +++++-------------- drivers/gpu/drm/msm/msm_drv.h | 6 +++++ drivers/gpu/drm/msm/msm_fbdev.c | 35 ++++++++++--------------- drivers/gpu/drm/msm/msm_gem.c | 46 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_ringbuffer.c | 12 ++++----- 7 files changed, 86 insertions(+), 75 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 3acbba1..e533007 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -269,28 +269,14 @@ static int a5xx_me_init(struct msm_gpu *gpu) static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { - struct drm_device *drm = gpu->dev; struct drm_gem_object *bo; void *ptr;
- bo = msm_gem_new_locked(drm, fw->size - 4, MSM_BO_UNCACHED); - if (IS_ERR(bo)) - return bo; + ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
- ptr = msm_gem_get_vaddr(bo); - if (!ptr) { - drm_gem_object_unreference(bo); - return ERR_PTR(-ENOMEM); - } - - if (iova) { - int ret = msm_gem_get_iova(bo, gpu->aspace, iova); - - if (ret) { - drm_gem_object_unreference(bo); - return ERR_PTR(ret); - } - } + if (IS_ERR(ptr)) + return ERR_CAST(ptr);
memcpy(ptr, &fw->data[4], fw->size - 4);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 87af6ee..04aab1d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -294,16 +294,10 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) */ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
- a5xx_gpu->gpmu_bo = msm_gem_new_locked(drm, bosize, MSM_BO_UNCACHED); - if (IS_ERR(a5xx_gpu->gpmu_bo)) - goto err; - - if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace, - &a5xx_gpu->gpmu_iova)) - goto err; - - ptr = msm_gem_get_vaddr(a5xx_gpu->gpmu_bo); - if (!ptr) + ptr = msm_gem_kernel_new_locked(drm, bosize, + MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, + &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova); + if (IS_ERR(ptr)) goto err;
while (cmds_size > 0) { diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 634e724..6d65684 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -384,29 +384,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; }
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), - MSM_BO_UNCACHED); - if (IS_ERR(adreno_gpu->memptrs_bo)) { - ret = PTR_ERR(adreno_gpu->memptrs_bo); - adreno_gpu->memptrs_bo = NULL; - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); - return ret; - } + adreno_gpu->memptrs = msm_gem_kernel_new(drm, + sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace, + &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova);
- adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo); if (IS_ERR(adreno_gpu->memptrs)) { - dev_err(drm->dev, "could not vmap memptrs\n"); - return -ENOMEM; - } - - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, - &adreno_gpu->memptrs_iova); - if (ret) { - dev_err(drm->dev, "could not map memptrs: %d\n", ret); - return ret; + ret = PTR_ERR(adreno_gpu->memptrs); + adreno_gpu->memptrs = NULL; + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); }
- return 0; + return ret; }
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 0fa9a7d..696413d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -235,6 +235,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev, uint32_t size, uint32_t flags); +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 5ecf4ff..bfb1fe7 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -78,6 +78,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, struct drm_framebuffer *fb = NULL; struct fb_info *fbi = NULL; struct drm_mode_fb_cmd2 mode_cmd = {0}; + void *ptr; uint64_t paddr; int ret, size;
@@ -97,11 +98,18 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, /* allocate backing bo */ size = mode_cmd.pitches[0] * mode_cmd.height; DBG("allocating %d bytes for fb %d", size, dev->primary->index); - fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | - MSM_BO_WC | MSM_BO_STOLEN); - if (IS_ERR(fbdev->bo)) { - ret = PTR_ERR(fbdev->bo); - fbdev->bo = NULL; + + /* + * NOTE: if we can be guaranteed to be able to map buffer + * in panic (ie. lock-safe, etc) we could avoid pinning the + * buffer now: + */ + ptr = msm_gem_kernel_new(dev, size, + MSM_BO_SCANOUT | MSM_BO_WC | MSM_BO_STOLEN, + priv->kms->aspace, &fbdev->bo, &paddr); + + if (IS_ERR(ptr)) { + ret = PTR_ERR(ptr); dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret); goto fail; } @@ -119,17 +127,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
- /* - * NOTE: if we can be guaranteed to be able to map buffer - * in panic (ie. lock-safe, etc) we could avoid pinning the - * buffer now: - */ - ret = msm_gem_get_iova(fbdev->bo, priv->kms->aspace, &paddr); - if (ret) { - dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); - goto fail_unlock; - } - fbi = drm_fb_helper_alloc_fbi(helper); if (IS_ERR(fbi)) { dev_err(dev->dev, "failed to allocate fb info\n"); @@ -153,11 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = paddr;
- fbi->screen_base = msm_gem_get_vaddr(fbdev->bo); - if (IS_ERR(fbi->screen_base)) { - ret = PTR_ERR(fbi->screen_base); - goto fail_unlock; - } + fbi->screen_base = ptr; fbi->screen_size = fbdev->bo->size; fbi->fix.smem_start = paddr; fbi->fix.smem_len = fbdev->bo->size; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 65f3554..6a4e8d0 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1018,3 +1018,49 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } + +static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova, bool locked) +{ + void *vaddr; + struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked); + int ret; + + if (IS_ERR(obj)) + return ERR_CAST(obj); + + if (iova) { + ret = msm_gem_get_iova(obj, aspace, iova); + if (ret) { + drm_gem_object_unreference(obj); + return ERR_PTR(ret); + } + } + + vaddr = msm_gem_get_vaddr(obj); + if (!vaddr) { + msm_gem_put_iova(obj, aspace); + drm_gem_object_unreference(obj); + return ERR_PTR(-ENOMEM); + } + + if (bo) + *bo = obj; + + return vaddr; +} + +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false); +} + +void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size, + uint32_t flags, struct msm_gem_address_space *aspace, + struct drm_gem_object **bo, uint64_t *iova) +{ + return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true); +} diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 791bca3..bf065a5 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -33,16 +33,14 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) }
ring->gpu = gpu; - ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); - if (IS_ERR(ring->bo)) { - ret = PTR_ERR(ring->bo); - ring->bo = NULL; - goto fail; - }
- ring->start = msm_gem_get_vaddr(ring->bo); + /* Pass NULL for the iova pointer - we will map it later */ + ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, + gpu->aspace, &ring->bo, NULL); + if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); + ring->start = 0; goto fail; } ring->end = ring->start + (size / 4);
When we move to multiple ringbuffers we're going to store the data in the memptrs on a per-ring basis. In order to prepare for that move the current memptrs from the adreno namespace into msm_gpu. This is way cleaner and immediately lets us kill off some sub functions so there is much less cost later when we do move to per-ring structs.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 1 - drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 1 - drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 6 ++-- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 52 ++++++++------------------------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 16 ---------- drivers/gpu/drm/msm/msm_gpu.c | 35 ++++++++++++++++++++-- drivers/gpu/drm/msm/msm_gpu.h | 17 +++++++++-- 7 files changed, 61 insertions(+), 67 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 7791313..789f7fb 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -444,7 +444,6 @@ static void a3xx_dump(struct msm_gpu *gpu) .pm_suspend = msm_gpu_pm_suspend, .pm_resume = msm_gpu_pm_resume, .recover = a3xx_recover, - .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, .irq = a3xx_irq, diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 58341ef..f87c4312 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -532,7 +532,6 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) .pm_suspend = a4xx_pm_suspend, .pm_resume = a4xx_pm_resume, .recover = a4xx_recover, - .last_fence = adreno_last_fence, .submit = adreno_submit, .flush = adreno_flush, .irq = a4xx_irq, diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index e533007..c986a76 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -83,7 +83,6 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = gpu->rb; unsigned int i, ibs = 0; @@ -110,8 +109,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); - OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, fence))); - OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, fence))); + OUT_RING(ring, lower_32_bits(rbmemptr(gpu, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(gpu, fence))); OUT_RING(ring, submit->seqno);
gpu->funcs->flush(gpu); @@ -1025,7 +1024,6 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) .pm_suspend = a5xx_pm_suspend, .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, - .last_fence = adreno_last_fence, .submit = a5xx_submit, .flush = adreno_flush, .irq = a5xx_irq, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 6d65684..552e692 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -75,8 +75,8 @@ int adreno_hw_init(struct msm_gpu *gpu) gpu->rb->cur = gpu->rb->start;
/* reset completed fence seqno: */ - adreno_gpu->memptrs->fence = gpu->seqno; - adreno_gpu->memptrs->rptr = 0; + gpu->memptrs->fence = gpu->seqno; + gpu->memptrs->rptr = 0;
/* Setup REG_CP_RB_CNTL: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, @@ -91,8 +91,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!adreno_is_a430(adreno_gpu)) { adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, - rbmemptr(adreno_gpu, rptr)); + REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu, rptr)); }
return 0; @@ -106,17 +105,13 @@ static uint32_t get_wptr(struct msm_ringbuffer *ring) /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) { + struct msm_gpu *gpu = &adreno_gpu->base; + if (adreno_is_a430(adreno_gpu)) - return adreno_gpu->memptrs->rptr = adreno_gpu_read( + return gpu->memptrs->rptr = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR); else - return adreno_gpu->memptrs->rptr; -} - -uint32_t adreno_last_fence(struct msm_gpu *gpu) -{ - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - return adreno_gpu->memptrs->fence; + return gpu->memptrs->rptr; }
void adreno_recover(struct msm_gpu *gpu) @@ -181,7 +176,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(adreno_gpu, fence)); + OUT_RING(ring, rbmemptr(gpu, fence)); OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */ @@ -254,7 +249,7 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence, + seq_printf(m, "fence: %d/%d\n", gpu->memptrs->fence, gpu->seqno); seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); @@ -289,7 +284,7 @@ void adreno_dump_info(struct msm_gpu *gpu) adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence, + printk("fence: %d/%d\n", gpu->memptrs->fence, gpu->seqno); printk("rptr: %d\n", get_rptr(adreno_gpu)); printk("rb wptr: %d\n", get_wptr(gpu->rb)); @@ -378,40 +373,17 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, }
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); - if (ret) { + if (ret) dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", adreno_gpu->info->pfpfw, ret); - return ret; - } - - adreno_gpu->memptrs = msm_gem_kernel_new(drm, - sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED, gpu->aspace, - &adreno_gpu->memptrs_bo, &adreno_gpu->memptrs_iova); - - if (IS_ERR(adreno_gpu->memptrs)) { - ret = PTR_ERR(adreno_gpu->memptrs); - adreno_gpu->memptrs = NULL; - dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); - }
return ret; }
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) { - struct msm_gpu *gpu = &adreno_gpu->base; - - if (adreno_gpu->memptrs_bo) { - if (adreno_gpu->memptrs) - msm_gem_put_vaddr(adreno_gpu->memptrs_bo); - - if (adreno_gpu->memptrs_iova) - msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace); - - drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); - } release_firmware(adreno_gpu->pm4); release_firmware(adreno_gpu->pfp);
- msm_gpu_cleanup(gpu); + msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 4d9165f..5ee1d6a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -82,14 +82,6 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-#define rbmemptr(adreno_gpu, member) \ - ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member)) - -struct adreno_rbmemptrs { - volatile uint32_t rptr; - volatile uint32_t fence; -}; - struct adreno_gpu { struct msm_gpu base; struct adreno_rev rev; @@ -104,13 +96,6 @@ struct adreno_gpu { /* firmware: */ const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */ - // TODO should this be in msm_ringbuffer? I think it would be - // different for z180.. - struct adreno_rbmemptrs *memptrs; - struct drm_gem_object *memptrs_bo; - uint64_t memptrs_iova; - /* * Register offsets are different between some GPUs. * GPU specific offsets will be exported by GPU specific @@ -197,7 +182,6 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); int adreno_hw_init(struct msm_gpu *gpu); -uint32_t adreno_last_fence(struct msm_gpu *gpu); void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 35533d3..9767d38 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -241,7 +241,7 @@ static void recover_worker(struct work_struct *work) struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; struct msm_gem_submit *submit; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence;
update_fences(gpu, fence + 1);
@@ -294,7 +294,7 @@ static void hangcheck_handler(unsigned long data) struct msm_gpu *gpu = (struct msm_gpu *)data; struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence;
if (fence != gpu->hangcheck_fence) { /* some progress has been made.. ya! */ @@ -462,7 +462,7 @@ static void retire_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); struct drm_device *dev = gpu->dev; - uint32_t fence = gpu->funcs->last_fence(gpu); + uint32_t fence = gpu->memptrs->fence;
update_fences(gpu, fence);
@@ -698,6 +698,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; }
+ gpu->memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), + MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, + &gpu->memptrs_iova); + + if (IS_ERR(gpu->memptrs)) { + ret = PTR_ERR(gpu->memptrs); + gpu->memptrs = NULL; + dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); + goto fail; + } + /* Create ringbuffer: */ gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); if (IS_ERR(gpu->rb)) { @@ -710,6 +721,18 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, return 0;
fail: + if (gpu->memptrs_bo) { + msm_gem_put_vaddr(gpu->memptrs_bo); + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + } + + if (gpu->aspace) { + gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, + NULL, 0); + msm_gem_address_space_put(gpu->aspace); + } + platform_set_drvdata(pdev, NULL); return ret; } @@ -728,6 +751,12 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); }
+ if (gpu->memptrs_bo) { + msm_gem_put_vaddr(gpu->memptrs_bo); + msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); + drm_gem_object_unreference_unlocked(gpu->memptrs_bo); + } + if (gpu->aspace) { gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, NULL, 0); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index d8485ec..fcd735c 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -59,7 +59,6 @@ struct msm_gpu_funcs { struct msm_file_private *ctx); void (*flush)(struct msm_gpu *gpu); irqreturn_t (*irq)(struct msm_gpu *irq); - uint32_t (*last_fence)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS @@ -68,6 +67,14 @@ struct msm_gpu_funcs { #endif };
+#define rbmemptr(gpu, member) \ + ((gpu)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) + +struct msm_rbmemptrs { + volatile uint32_t rptr; + volatile uint32_t fence; +}; + struct msm_gpu { const char *name; struct drm_device *dev; @@ -130,11 +137,17 @@ struct msm_gpu { struct work_struct recover_work;
struct list_head submit_list; + + struct msm_rbmemptrs *memptrs; + struct drm_gem_object *memptrs_bo; + uint64_t memptrs_iova; + + };
static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->seqno > gpu->funcs->last_fence(gpu); + return gpu->seqno > gpu->memptrs->fence; }
/* Perf-Counters:
Add the infrastructure to support the idea of multiple ringbuffers. Assign each ringbuffer an id and use that as an index for the various ring specific operations.
The biggest delta is to support legacy fences. Each fence gets its own sequence number but the legacy functions expect to use a unique integer. To handle this we return a unique identifier for each submission but map it to a specific ring/sequence under the covers. Newer users use a dma_fence pointer anyway so they don't care about the actual sequence ID or ring.
The actual mechanics for multiple ringbuffers are very target specific so this code just allows for the possibility but still only defines one ringbuffer for each target family.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 9 +- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 9 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 45 +++++----- drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 2 +- drivers/gpu/drm/msm/adreno/a5xx_power.c | 6 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 133 +++++++++++++++++------------ drivers/gpu/drm/msm/adreno/adreno_gpu.h | 20 +++-- drivers/gpu/drm/msm/msm_drv.h | 2 + drivers/gpu/drm/msm/msm_gem.h | 3 +- drivers/gpu/drm/msm/msm_gem_submit.c | 5 +- drivers/gpu/drm/msm/msm_gpu.c | 146 +++++++++++++++++++++----------- drivers/gpu/drm/msm/msm_gpu.h | 39 ++++----- drivers/gpu/drm/msm/msm_ringbuffer.c | 27 +++--- drivers/gpu/drm/msm/msm_ringbuffer.h | 19 ++++- 14 files changed, 287 insertions(+), 178 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 789f7fb..4baef27 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -44,7 +44,7 @@
static bool a3xx_me_init(struct msm_gpu *gpu) { - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17); OUT_RING(ring, 0x000003f7); @@ -65,7 +65,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu); + gpu->funcs->flush(gpu, ring); return a3xx_idle(gpu); }
@@ -339,7 +339,7 @@ static void a3xx_destroy(struct msm_gpu *gpu) static bool a3xx_idle(struct msm_gpu *gpu) { /* wait for ringbuffer to drain: */ - if (!adreno_idle(gpu)) + if (!adreno_idle(gpu, gpu->rb[0])) return false;
/* then wait for GPU to finish: */ @@ -446,6 +446,7 @@ static void a3xx_dump(struct msm_gpu *gpu) .recover = a3xx_recover, .submit = adreno_submit, .flush = adreno_flush, + .active_ring = adreno_active_ring, .irq = a3xx_irq, .destroy = a3xx_destroy, #ifdef CONFIG_DEBUG_FS @@ -491,7 +492,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = a3xx_registers; adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index f87c4312..8199a4b 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -116,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu) { - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17); OUT_RING(ring, 0x000003f7); @@ -137,7 +137,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu); + gpu->funcs->flush(gpu, ring); return a4xx_idle(gpu); }
@@ -337,7 +337,7 @@ static void a4xx_destroy(struct msm_gpu *gpu) static bool a4xx_idle(struct msm_gpu *gpu) { /* wait for ringbuffer to drain: */ - if (!adreno_idle(gpu)) + if (!adreno_idle(gpu, gpu->rb[0])) return false;
/* then wait for GPU to finish: */ @@ -534,6 +534,7 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value) .recover = a4xx_recover, .submit = adreno_submit, .flush = adreno_flush, + .active_ring = adreno_active_ring, .irq = a4xx_irq, .destroy = a4xx_destroy, #ifdef CONFIG_DEBUG_FS @@ -573,7 +574,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->registers = a4xx_registers; adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index c986a76..a195d5d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -84,7 +84,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { struct msm_drm_private *priv = gpu->dev->dev_private; - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0;
for (i = 0; i < submit->nr_cmds; i++) { @@ -109,11 +109,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); - OUT_RING(ring, lower_32_bits(rbmemptr(gpu, fence))); - OUT_RING(ring, upper_32_bits(rbmemptr(gpu, fence))); + OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); + OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno);
- gpu->funcs->flush(gpu); + gpu->funcs->flush(gpu, ring); }
static const struct { @@ -229,7 +229,7 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state) static int a5xx_me_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT7(ring, CP_ME_INIT, 8);
@@ -260,9 +260,8 @@ static int a5xx_me_init(struct msm_gpu *gpu) OUT_RING(ring, 0x00000000); OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu); - - return a5xx_idle(gpu) ? 0 : -EINVAL; + gpu->funcs->flush(gpu, ring); + return a5xx_idle(gpu, ring) ? 0 : -EINVAL; }
static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, @@ -593,11 +592,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu) * ticking correctly */ if (adreno_is_a530(adreno_gpu)) { - OUT_PKT7(gpu->rb, CP_EVENT_WRITE, 1); - OUT_RING(gpu->rb, 0x0F); + OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1); + OUT_RING(gpu->rb[0], 0x0F);
- gpu->funcs->flush(gpu); - if (!a5xx_idle(gpu)) + gpu->funcs->flush(gpu, gpu->rb[0]); + if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; }
@@ -610,11 +609,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu) */ ret = a5xx_zap_shader_init(gpu); if (!ret) { - OUT_PKT7(gpu->rb, CP_SET_SECURE_MODE, 1); - OUT_RING(gpu->rb, 0x00000000); + OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1); + OUT_RING(gpu->rb[0], 0x00000000);
- gpu->funcs->flush(gpu); - if (!a5xx_idle(gpu)) + gpu->funcs->flush(gpu, gpu->rb[0]); + if (!a5xx_idle(gpu, gpu->rb[0])) return -EINVAL; } else { /* Print a warning so if we die, we know why */ @@ -691,18 +690,19 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu) A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT); }
-bool a5xx_idle(struct msm_gpu *gpu) +bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { /* wait for CP to drain ringbuffer: */ - if (!adreno_idle(gpu)) + if (!adreno_idle(gpu, ring)) return false;
if (spin_until(_a5xx_check_idle(gpu))) { - DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X\n", + DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", gpu->name, __builtin_return_address(0), gpu_read(gpu, REG_A5XX_RBBM_STATUS), - gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS)); - + gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS), + gpu_read(gpu, REG_A5XX_CP_RB_RPTR), + gpu_read(gpu, REG_A5XX_CP_RB_WPTR)); return false; }
@@ -1026,6 +1026,7 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) .recover = a5xx_recover, .submit = a5xx_submit, .flush = adreno_flush, + .active_ring = adreno_active_ring, .irq = a5xx_irq, .destroy = a5xx_destroy, #ifdef CONFIG_DEBUG_FS @@ -1061,7 +1062,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 3d62f00..8f4f093 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -57,7 +57,7 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, return -ETIMEDOUT; }
-bool a5xx_idle(struct msm_gpu *gpu); +bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c index 04aab1d..d09a716 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_power.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c @@ -173,7 +173,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = gpu->rb[0];
if (!a5xx_gpu->gpmu_dwords) return 0; @@ -192,9 +192,9 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu) OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); OUT_RING(ring, 1);
- gpu->funcs->flush(gpu); + gpu->funcs->flush(gpu, ring);
- if (!a5xx_idle(gpu)) { + if (!a5xx_idle(gpu, ring)) { DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n", gpu->name); return -EINVAL; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 552e692..5f62244 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -21,7 +21,6 @@ #include "msm_gem.h" #include "msm_mmu.h"
-#define RB_SIZE SZ_32K #define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) @@ -60,38 +59,47 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) int adreno_hw_init(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - int ret; + int i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); - if (ret) { - gpu->rb_iova = 0; - dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); - return ret; - } + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + int ret;
- /* reset ringbuffer: */ - gpu->rb->cur = gpu->rb->start; + if (!ring) + continue;
- /* reset completed fence seqno: */ - gpu->memptrs->fence = gpu->seqno; - gpu->memptrs->rptr = 0; + ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova); + if (ret) { + ring->iova = 0; + dev_err(gpu->dev->dev, + "could not map ringbuffer %d: %d\n", i, ret); + return ret; + } + + ring->cur = ring->start; + + /* reset completed fence seqno: */ + ring->memptrs->fence = ring->seqno; + ring->memptrs->rptr = 0; + }
/* Setup REG_CP_RB_CNTL: */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, - /* size is log2(quad-words): */ - AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | - AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | - (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); + /* size is log2(quad-words): */ + AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | + AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | + (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup ringbuffer address: */ + /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, - REG_ADRENO_CP_RB_BASE_HI, gpu->rb_iova); + REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
if (!adreno_is_a430(adreno_gpu)) { adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, - REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu, rptr)); + REG_ADRENO_CP_RB_RPTR_ADDR_HI, + rbmemptr(gpu->rb[0], rptr)); }
return 0; @@ -103,15 +111,19 @@ static uint32_t get_wptr(struct msm_ringbuffer *ring) }
/* Use this helper to read rptr, since a430 doesn't update rptr in memory */ -static uint32_t get_rptr(struct adreno_gpu *adreno_gpu) +static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, + struct msm_ringbuffer *ring) { - struct msm_gpu *gpu = &adreno_gpu->base; - if (adreno_is_a430(adreno_gpu)) - return gpu->memptrs->rptr = adreno_gpu_read( + return ring->memptrs->rptr = adreno_gpu_read( adreno_gpu, REG_ADRENO_CP_RB_RPTR); else - return gpu->memptrs->rptr; + return ring->memptrs->rptr; +} + +struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) +{ + return gpu->rb[0]; }
void adreno_recover(struct msm_gpu *gpu) @@ -137,7 +149,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct msm_drm_private *priv = gpu->dev->dev_private; - struct msm_ringbuffer *ring = gpu->rb; + struct msm_ringbuffer *ring = submit->ring; unsigned i;
for (i = 0; i < submit->nr_cmds; i++) { @@ -176,7 +188,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3); OUT_RING(ring, CACHE_FLUSH_TS); - OUT_RING(ring, rbmemptr(gpu, fence)); + OUT_RING(ring, rbmemptr(ring, fence)); OUT_RING(ring, submit->seqno);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */ @@ -203,10 +215,10 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, } #endif
- gpu->funcs->flush(gpu); + gpu->funcs->flush(gpu, ring); }
-void adreno_flush(struct msm_gpu *gpu) +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr; @@ -216,7 +228,7 @@ void adreno_flush(struct msm_gpu *gpu) * to account for the possibility that the last command fit exactly into * the ringbuffer and rb->next hasn't wrapped to zero yet */ - wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); + wptr = get_wptr(ring) % (MSM_GPU_RINGBUFFER_SZ >> 2);
/* ensure writes to ringbuffer have hit system memory: */ mb(); @@ -224,17 +236,18 @@ void adreno_flush(struct msm_gpu *gpu) adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); }
-bool adreno_idle(struct msm_gpu *gpu) +bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - uint32_t wptr = get_wptr(gpu->rb); + uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */ - if (!spin_until(get_rptr(adreno_gpu) == wptr)) + if (!spin_until(get_rptr(adreno_gpu, ring) == wptr)) return true;
/* TODO maybe we need to reset GPU here to recover from hang? */ - DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name); + DRM_ERROR("%s: timeout waiting to drain ringbuffer %d!\n", gpu->name, + ring->id); return false; }
@@ -249,10 +262,16 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", gpu->memptrs->fence, - gpu->seqno); - seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu)); - seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + seq_printf(m, "rb %d: fence: %d/%d\n", i, + ring->memptrs->fence, ring->seqno); + + seq_printf(m, " rptr: %d\n", + get_rptr(adreno_gpu, ring)); + seq_printf(m, "rb wptr: %d\n", get_wptr(ring)); + }
/* dump these out in a form that can be parsed by demsm: */ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name); @@ -278,16 +297,23 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m) void adreno_dump_info(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int i;
printk("revision: %d (%d.%d.%d.%d)\n", adreno_gpu->info->revn, adreno_gpu->rev.core, adreno_gpu->rev.major, adreno_gpu->rev.minor, adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", gpu->memptrs->fence, - gpu->seqno); - printk("rptr: %d\n", get_rptr(adreno_gpu)); - printk("rb wptr: %d\n", get_wptr(gpu->rb)); + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + printk("rb %d: fence: %d/%d\n", i, + ring->memptrs->fence, + ring->seqno); + + printk("rptr: %d\n", get_rptr(adreno_gpu, ring)); + printk("rb wptr: %d\n", get_wptr(ring)); + } }
/* would be nice to not have to duplicate the _show() stuff with printk(): */ @@ -310,23 +336,26 @@ void adreno_dump(struct msm_gpu *gpu) } }
-static uint32_t ring_freewords(struct msm_gpu *gpu) +static uint32_t ring_freewords(struct msm_ringbuffer *ring) { - struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); - uint32_t size = gpu->rb->size / 4; - uint32_t wptr = get_wptr(gpu->rb); - uint32_t rptr = get_rptr(adreno_gpu); + struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); + uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2; + uint32_t wptr = get_wptr(ring); + uint32_t rptr = get_rptr(adreno_gpu, ring); return (rptr + (size - 1) - wptr) % size; }
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords) +void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords) { - if (spin_until(ring_freewords(gpu) >= ndwords)) - DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name); + if (spin_until(ring_freewords(ring) >= ndwords)) + DRM_DEV_ERROR(ring->gpu->dev->dev, + "timeout waiting for space in ringubffer %d\n", + ring->id); }
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, - struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) + struct adreno_gpu *adreno_gpu, + const struct adreno_gpu_funcs *funcs, int nr_rings) { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu_config adreno_gpu_config = { 0 }; @@ -354,7 +383,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu_config.va_start = SZ_16M; adreno_gpu_config.va_end = 0xffffffff;
- adreno_gpu_config.ringsz = RB_SIZE; + adreno_gpu_config.nr_rings = nr_rings;
pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); pm_runtime_use_autosuspend(&pdev->dev); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 5ee1d6a..2a51ac4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -185,17 +185,19 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu) void adreno_recover(struct msm_gpu *gpu); void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); -void adreno_flush(struct msm_gpu *gpu); -bool adreno_idle(struct msm_gpu *gpu); +void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring); +bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); #ifdef CONFIG_DEBUG_FS void adreno_show(struct msm_gpu *gpu, struct seq_file *m); #endif void adreno_dump_info(struct msm_gpu *gpu); void adreno_dump(struct msm_gpu *gpu); -void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); +void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords); +struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, - struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs); + struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs, + int nr_rings); void adreno_gpu_cleanup(struct adreno_gpu *gpu);
@@ -204,7 +206,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, static inline void OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) { - adreno_wait_ring(ring->gpu, cnt+1); + adreno_wait_ring(ring, cnt+1); OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF)); }
@@ -212,14 +214,14 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, static inline void OUT_PKT2(struct msm_ringbuffer *ring) { - adreno_wait_ring(ring->gpu, 1); + adreno_wait_ring(ring, 1); OUT_RING(ring, CP_TYPE2_PKT); }
static inline void OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) { - adreno_wait_ring(ring->gpu, cnt+1); + adreno_wait_ring(ring, cnt+1); OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); }
@@ -241,14 +243,14 @@ static inline u32 PM4_PARITY(u32 val) static inline void OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt) { - adreno_wait_ring(ring->gpu, cnt + 1); + adreno_wait_ring(ring, cnt + 1); OUT_RING(ring, PKT4(regindx, cnt)); }
static inline void OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt) { - adreno_wait_ring(ring->gpu, cnt + 1); + adreno_wait_ring(ring, cnt + 1); OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) | ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23)); } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 696413d..079a5b0 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -76,6 +76,8 @@ struct msm_vblank_ctrl { spinlock_t lock; };
+#define MSM_GPU_MAX_RINGS 1 + struct msm_drm_private {
struct drm_device *dev; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c98d3a7..9320e18 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -138,7 +138,7 @@ enum msm_gem_lock { struct msm_gem_submit { struct drm_device *dev; struct msm_gpu *gpu; - struct list_head node; /* node in gpu submit_list */ + struct list_head node; /* node in ring submit list */ struct list_head bo_list; struct ww_acquire_ctx ticket; uint32_t seqno; /* Sequence number of the submit on the ring */ @@ -146,6 +146,7 @@ struct msm_gem_submit { struct msm_gpu_submitqueue *queue; struct pid *pid; /* submitting process */ bool valid; /* true if no cmdstream patching needed */ + struct msm_ringbuffer *ring; unsigned int nr_cmds; unsigned int nr_bos; struct { diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index ac95816..5404b18 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -399,7 +399,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_gpu_submitqueue *queue; int out_fence_fd = -1; unsigned i; - int ret; + int ret, ring;
if (!gpu) return -ENXIO; @@ -532,6 +532,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ ring = clamp_t(uint32_t, queue->prio, 0, gpu->nr_rings - 1); + submit->ring = gpu->rb[ring]; + submit->fence = msm_fence_alloc(queue->fctx); if (IS_ERR(submit->fence)) { ret = PTR_ERR(submit->fence); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 9767d38..880d69d 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -221,11 +221,12 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) * Hangcheck detection for locked gpu: */
-static void update_fences(struct msm_gpu *gpu, uint32_t fence) +static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, + uint32_t fence) { struct msm_gem_submit *submit;
- list_for_each_entry(submit, &gpu->submit_list, node) { + list_for_each_entry(submit, &ring->submits, node) { if (submit->seqno > fence) break;
@@ -241,15 +242,34 @@ static void recover_worker(struct work_struct *work) struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); struct drm_device *dev = gpu->dev; struct msm_gem_submit *submit; - uint32_t fence = gpu->memptrs->fence; + struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu); + uint64_t fence; + int i; + + /* Update all the rings with the latest and greatest fence */ + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { + struct msm_ringbuffer *ring = gpu->rb[i];
- update_fences(gpu, fence + 1); + fence = ring->memptrs->fence; + + /* + * For the current (faulting?) ring/submit advance the fence by + * one more to clear the faulting submit + */ + if (ring == cur_ring) + fence = fence + 1; + + update_fences(gpu, ring, fence); + }
mutex_lock(&dev->struct_mutex);
+ dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); - list_for_each_entry(submit, &gpu->submit_list, node) { - if (submit->seqno == (fence + 1)) { + fence = cur_ring->memptrs->fence + 1; + + list_for_each_entry(submit, &cur_ring->submits, node) { + if (submit->seqno == fence) { struct task_struct *task;
rcu_read_lock(); @@ -271,9 +291,16 @@ static void recover_worker(struct work_struct *work) gpu->funcs->recover(gpu); pm_runtime_put_sync(&gpu->pdev->dev);
- /* replay the remaining submits after the one that hung: */ - list_for_each_entry(submit, &gpu->submit_list, node) { - gpu->funcs->submit(gpu, submit, NULL); + /* + * Replay all remaining submits starting with highest priority + * ring + */ + + for (i = gpu->nr_rings - 1; i >= 0; i--) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + list_for_each_entry(submit, &ring->submits, node) + gpu->funcs->submit(gpu, submit, NULL); } }
@@ -294,25 +321,27 @@ static void hangcheck_handler(unsigned long data) struct msm_gpu *gpu = (struct msm_gpu *)data; struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; - uint32_t fence = gpu->memptrs->fence; + struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu); + uint32_t fence = ring->memptrs->fence;
- if (fence != gpu->hangcheck_fence) { + if (fence != ring->hangcheck_fence) { /* some progress has been made.. ya! */ - gpu->hangcheck_fence = fence; - } else if (fence < gpu->seqno) { + ring->hangcheck_fence = fence; + } else if (fence < ring->seqno) { /* no progress and not done.. hung! */ - gpu->hangcheck_fence = fence; - dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", - gpu->name); + ring->hangcheck_fence = fence; + dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n", + gpu->name, ring->id); dev_err(dev->dev, "%s: completed fence: %u\n", gpu->name, fence); dev_err(dev->dev, "%s: submitted fence: %u\n", - gpu->name, gpu->seqno); + gpu->name, ring->seqno); + queue_work(priv->wq, &gpu->recover_work); }
/* if still more pending work, reset the hangcheck timer: */ - if (gpu->seqno > gpu->hangcheck_fence) + if (ring->seqno > ring->hangcheck_fence) hangcheck_timer_reset(gpu);
/* workaround for missing irq: */ @@ -441,19 +470,18 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) static void retire_submits(struct msm_gpu *gpu) { struct drm_device *dev = gpu->dev; + struct msm_gem_submit *submit, *tmp; + int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) { - struct msm_gem_submit *submit; - - submit = list_first_entry(&gpu->submit_list, - struct msm_gem_submit, node); + /* Retire the commits starting with highest priority */ + for (i = gpu->nr_rings - 1; i >= 0; i--) { + struct msm_ringbuffer *ring = gpu->rb[i];
- if (dma_fence_is_signaled(submit->fence)) { - retire_submit(gpu, submit); - } else { - break; + list_for_each_entry_safe(submit, tmp, &ring->submits, node) { + if (dma_fence_is_signaled(submit->fence)) + retire_submit(gpu, submit); } } } @@ -462,9 +490,10 @@ static void retire_worker(struct work_struct *work) { struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); struct drm_device *dev = gpu->dev; - uint32_t fence = gpu->memptrs->fence; + int i;
- update_fences(gpu, fence); + for (i = 0; i < gpu->nr_rings; i++) + update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence);
mutex_lock(&dev->struct_mutex); retire_submits(gpu); @@ -485,6 +514,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, { struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; + struct msm_ringbuffer *ring = submit->ring; int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -495,7 +525,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
submit->seqno = ++gpu->seqno;
- list_add_tail(&submit->node, &gpu->submit_list); + list_add_tail(&submit->node, &ring->submits);
msm_rd_dump_submit(submit);
@@ -620,7 +650,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config) { - int ret; + int i, ret, nr_rings = config->nr_rings; + void *memptrs; + uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); @@ -633,7 +665,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->recover_work, recover_worker);
- INIT_LIST_HEAD(&gpu->submit_list);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler, (unsigned long)gpu); @@ -698,29 +729,47 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, goto fail; }
- gpu->memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), + memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, - &gpu->memptrs_iova); + &memptrs_iova);
- if (IS_ERR(gpu->memptrs)) { - ret = PTR_ERR(gpu->memptrs); - gpu->memptrs = NULL; + if (IS_ERR(memptrs)) { + ret = PTR_ERR(memptrs); dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); goto fail; }
- /* Create ringbuffer: */ - gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); - if (IS_ERR(gpu->rb)) { - ret = PTR_ERR(gpu->rb); - gpu->rb = NULL; - dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); - goto fail; + if (nr_rings > ARRAY_SIZE(gpu->rb)) { + DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n", + ARRAY_SIZE(gpu->rb)); + nr_rings = ARRAY_SIZE(gpu->rb); + } + + /* Create ringbuffer(s): */ + for (i = 0; i < nr_rings; i++) { + gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); + + if (IS_ERR(gpu->rb[i])) { + ret = PTR_ERR(gpu->rb[i]); + dev_err(drm->dev, + "could not create ringbuffer %d: %d\n", i, ret); + goto fail; + } + + memptrs += sizeof(struct msm_rbmemptrs); + memptrs_iova += sizeof(struct msm_rbmemptrs); }
+ gpu->nr_rings = nr_rings; + return 0;
fail: + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { + msm_ringbuffer_destroy(gpu->rb[i]); + gpu->rb[i] = NULL; + } + if (gpu->memptrs_bo) { msm_gem_put_vaddr(gpu->memptrs_bo); msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); @@ -739,16 +788,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void msm_gpu_cleanup(struct msm_gpu *gpu) { + int i; + DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) { - if (gpu->rb_iova) - msm_gem_put_iova(gpu->rb->bo, gpu->aspace); - msm_ringbuffer_destroy(gpu->rb); + for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { + msm_ringbuffer_destroy(gpu->rb[i]); + gpu->rb[i] = NULL; }
if (gpu->memptrs_bo) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index fcd735c..dbc4835 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -33,7 +33,7 @@ struct msm_gpu_config { const char *irqname; uint64_t va_start; uint64_t va_end; - unsigned int ringsz; + unsigned int nr_rings; };
/* So far, with hardware that I've seen to date, we can have: @@ -57,8 +57,9 @@ struct msm_gpu_funcs { int (*pm_resume)(struct msm_gpu *gpu); void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx); - void (*flush)(struct msm_gpu *gpu); + void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); irqreturn_t (*irq)(struct msm_gpu *irq); + struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); void (*recover)(struct msm_gpu *gpu); void (*destroy)(struct msm_gpu *gpu); #ifdef CONFIG_DEBUG_FS @@ -67,14 +68,6 @@ struct msm_gpu_funcs { #endif };
-#define rbmemptr(gpu, member) \ - ((gpu)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) - -struct msm_rbmemptrs { - volatile uint32_t rptr; - volatile uint32_t fence; -}; - struct msm_gpu { const char *name; struct drm_device *dev; @@ -93,9 +86,8 @@ struct msm_gpu { const struct msm_gpu_perfcntr *perfcntrs; uint32_t num_perfcntrs;
- /* ringbuffer: */ - struct msm_ringbuffer *rb; - uint64_t rb_iova; + struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; + int nr_rings;
/* list of GEM active objects: */ struct list_head active_list; @@ -133,21 +125,26 @@ struct msm_gpu { #define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */ #define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; - uint32_t hangcheck_fence; struct work_struct recover_work;
- struct list_head submit_list; - - struct msm_rbmemptrs *memptrs; struct drm_gem_object *memptrs_bo; - uint64_t memptrs_iova; - - };
+/* It turns out that all targets use the same ringbuffer size */ +#define MSM_GPU_RINGBUFFER_SZ SZ_32K + static inline bool msm_gpu_active(struct msm_gpu *gpu) { - return gpu->seqno > gpu->memptrs->fence; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + struct msm_ringbuffer *ring = gpu->rb[i]; + + if (ring->seqno > ring->memptrs->fence) + return true; + } + + return false; }
/* Perf-Counters: diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index bf065a5..a74d66a 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -18,13 +18,14 @@ #include "msm_ringbuffer.h" #include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, + void *memptrs, uint64_t memptrs_iova) { struct msm_ringbuffer *ring; int ret;
- if (WARN_ON(!is_power_of_2(size))) - return ERR_PTR(-EINVAL); + /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ + BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { @@ -33,32 +34,38 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) }
ring->gpu = gpu; - + ring->id = id; /* Pass NULL for the iova pointer - we will map it later */ - ring->start = msm_gem_kernel_new(gpu->dev, size, MSM_BO_WC, - gpu->aspace, &ring->bo, NULL); + ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, + MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); ring->start = 0; goto fail; } - ring->end = ring->start + (size / 4); + ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); ring->cur = ring->start;
- ring->size = size; + ring->memptrs = memptrs; + ring->memptrs_iova = memptrs_iova; + + INIT_LIST_HEAD(&ring->submits);
return ring;
fail: - if (ring) - msm_ringbuffer_destroy(ring); + msm_ringbuffer_destroy(ring); return ERR_PTR(ret); }
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) { + if (IS_ERR_OR_NULL(ring)) + return; + if (ring->bo) { + msm_gem_put_iova(ring->bo, ring->gpu->aspace); msm_gem_put_vaddr(ring->bo); drm_gem_object_unreference_unlocked(ring->bo); } diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 6e0e104..5907063f 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -20,14 +20,29 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \ + ((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member)) + +struct msm_rbmemptrs { + volatile uint32_t rptr; + volatile uint32_t fence; +}; + struct msm_ringbuffer { struct msm_gpu *gpu; - int size; + int id; struct drm_gem_object *bo; uint32_t *start, *end, *cur; + struct list_head submits; + uint64_t iova; + uint32_t seqno; + uint32_t hangcheck_fence; + struct msm_rbmemptrs *memptrs; + uint64_t memptrs_iova; };
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size); +struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, + void *memptrs, uint64_t memptrs_iova); void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
Hi Jordan,
[auto build test WARNING on v4.13-rc2] [also build test WARNING on next-20170728] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
url: https://github.com/0day-ci/linux/commits/Jordan-Crouse/drm-msm-GPU-fixes-and... config: arm-multi_v7_defconfig (attached as .config) compiler: arm-linux-gnueabi-gcc (Debian 6.1.1-9) 6.1.1 20160705 reproduce: wget https://raw.githubusercontent.com/01org/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree make.cross ARCH=arm
All warnings (new ones prefixed by >>):
In file included from drivers/gpu/drm/msm/msm_drv.h:37:0, from drivers/gpu/drm/msm/msm_gpu.h:24, from drivers/gpu/drm/msm/msm_gpu.c:18: drivers/gpu/drm/msm/msm_gpu.c: In function 'msm_gpu_init':
drivers/gpu/drm/msm/msm_gpu.c:743:31: warning: format '%lu' expects argument of type 'long unsigned int', but argument 7 has type 'unsigned int' [-Wformat=]
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n", ^ include/drm/drmP.h:208:60: note: in definition of macro 'DRM_DEV_INFO' drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \ ^~~
drivers/gpu/drm/msm/msm_gpu.c:743:3: note: in expansion of macro 'DRM_DEV_INFO_ONCE'
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n", ^~~~~~~~~~~~~~~~~
vim +743 drivers/gpu/drm/msm/msm_gpu.c
648 649 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 650 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 651 const char *name, struct msm_gpu_config *config) 652 { 653 int i, ret, nr_rings = config->nr_rings; 654 void *memptrs; 655 uint64_t memptrs_iova; 656 657 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 658 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); 659 660 gpu->dev = drm; 661 gpu->funcs = funcs; 662 gpu->name = name; 663 664 INIT_LIST_HEAD(&gpu->active_list); 665 INIT_WORK(&gpu->retire_work, retire_worker); 666 INIT_WORK(&gpu->recover_work, recover_worker); 667 668 669 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 670 (unsigned long)gpu); 671 672 spin_lock_init(&gpu->perf_lock); 673 674 675 /* Map registers: */ 676 gpu->mmio = msm_ioremap(pdev, config->ioname, name); 677 if (IS_ERR(gpu->mmio)) { 678 ret = PTR_ERR(gpu->mmio); 679 goto fail; 680 } 681 682 /* Get Interrupt: */ 683 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 684 if (gpu->irq < 0) { 685 ret = gpu->irq; 686 dev_err(drm->dev, "failed to get irq: %d\n", ret); 687 goto fail; 688 } 689 690 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 691 IRQF_TRIGGER_HIGH, gpu->name, gpu); 692 if (ret) { 693 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); 694 goto fail; 695 } 696 697 ret = get_clocks(pdev, gpu); 698 if (ret) 699 goto fail; 700 701 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); 702 DBG("ebi1_clk: %p", gpu->ebi1_clk); 703 if (IS_ERR(gpu->ebi1_clk)) 704 gpu->ebi1_clk = NULL; 705 706 /* Acquire regulators: */ 707 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); 708 DBG("gpu_reg: %p", gpu->gpu_reg); 709 if (IS_ERR(gpu->gpu_reg)) 710 gpu->gpu_reg = NULL; 711 712 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); 713 DBG("gpu_cx: %p", gpu->gpu_cx); 714 if (IS_ERR(gpu->gpu_cx)) 715 gpu->gpu_cx = NULL; 716 717 gpu->pdev = pdev; 718 platform_set_drvdata(pdev, gpu); 719 720 bs_init(gpu); 721 722 gpu->aspace = msm_gpu_create_address_space(gpu, pdev, 723 config->va_start, config->va_end); 724 725 if (gpu->aspace == NULL) 726 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 727 else if (IS_ERR(gpu->aspace)) { 728 ret = PTR_ERR(gpu->aspace); 729 goto fail; 730 } 731 732 memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo), 733 MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, 734 &memptrs_iova); 735 736 if (IS_ERR(memptrs)) { 737 ret = PTR_ERR(memptrs); 738 dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); 739 goto fail; 740 } 741 742 if (nr_rings > ARRAY_SIZE(gpu->rb)) {
743 DRM_DEV_INFO_ONCE(drm->dev, "Only creating %lu ringbuffers\n",
744 ARRAY_SIZE(gpu->rb)); 745 nr_rings = ARRAY_SIZE(gpu->rb); 746 } 747 748 /* Create ringbuffer(s): */ 749 for (i = 0; i < nr_rings; i++) { 750 gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova); 751 752 if (IS_ERR(gpu->rb[i])) { 753 ret = PTR_ERR(gpu->rb[i]); 754 dev_err(drm->dev, 755 "could not create ringbuffer %d: %d\n", i, ret); 756 goto fail; 757 } 758 759 memptrs += sizeof(struct msm_rbmemptrs); 760 memptrs_iova += sizeof(struct msm_rbmemptrs); 761 } 762 763 gpu->nr_rings = nr_rings; 764 765 return 0; 766 767 fail: 768 for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) { 769 msm_ringbuffer_destroy(gpu->rb[i]); 770 gpu->rb[i] = NULL; 771 } 772 773 if (gpu->memptrs_bo) { 774 msm_gem_put_vaddr(gpu->memptrs_bo); 775 msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace); 776 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 777 } 778 779 if (gpu->aspace) { 780 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 781 NULL, 0); 782 msm_gem_address_space_put(gpu->aspace); 783 } 784 785 platform_set_drvdata(pdev, NULL); 786 return ret; 787 } 788
--- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
In order to manage ringbuffer priority to its fullest userspace should know how many ringbuffers it has to work with. Add a parameter to return the number of active rings.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 3 +++ include/uapi/drm/msm_drm.h | 1 + 2 files changed, 4 insertions(+)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 5f62244..39ff842 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -50,6 +50,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) if (adreno_gpu->funcs->get_timestamp) return adreno_gpu->funcs->get_timestamp(gpu, value); return -EINVAL; + case MSM_PARAM_NR_RINGS: + *value = gpu->nr_rings; + return 0; default: DBG("%s: invalid param: %u", gpu->name, param); return -EINVAL; diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index a06bf97..711ea30 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -73,6 +73,7 @@ struct drm_msm_timespec { #define MSM_PARAM_MAX_FREQ 0x04 #define MSM_PARAM_TIMESTAMP 0x05 #define MSM_PARAM_GMEM_BASE 0x06 +#define MSM_PARAM_NR_RINGS 0x07
struct drm_msm_param { __u32 pipe; /* in, MSM_PIPE_x */
Add a shadow pointer to track the current command being written into the ring. Don't commit it as 'cur' until the command is submitted. Because 'cur' is used to construct the software copy of the wptr this ensures that somebody peeking in on the ring doesn't assume that a command is inflight while it is being written. This isn't a huge deal with a single ring (though technically the hangcheck could assume the system is prematurely busy when it isn't) but it will be rather important for preemption where the decision to preempt is based on a non-empty ringbuffer. Without a shadow an aggressive preemption scheme could assume that the ringbuffer is non empty and switch to it before the CPU is done writing the command and boom.
Even though preemption won't be supported for all targets because of the way the code is organized it is simpler to make this generic for all targets. The extra load for non-preemption targets should be minimal.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 9 +++++++-- drivers/gpu/drm/msm/msm_ringbuffer.c | 1 + drivers/gpu/drm/msm/msm_ringbuffer.h | 12 ++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 39ff842..a2777d8 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -82,6 +82,7 @@ int adreno_hw_init(struct msm_gpu *gpu) }
ring->cur = ring->start; + ring->next = ring->start;
/* reset completed fence seqno: */ ring->memptrs->fence = ring->seqno; @@ -226,12 +227,15 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); uint32_t wptr;
+ /* Copy the shadow to the actual register */ + ring->cur = ring->next; + /* * Mask wptr value that we calculate to fit in the HW range. This is * to account for the possibility that the last command fit exactly into * the ringbuffer and rb->next hasn't wrapped to zero yet */ - wptr = get_wptr(ring) % (MSM_GPU_RINGBUFFER_SZ >> 2); + wptr = (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
/* ensure writes to ringbuffer have hit system memory: */ mb(); @@ -343,7 +347,8 @@ static uint32_t ring_freewords(struct msm_ringbuffer *ring) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2; - uint32_t wptr = get_wptr(ring); + /* Use ring->next to calculate free size */ + uint32_t wptr = ring->next - ring->start; uint32_t rptr = get_rptr(adreno_gpu, ring); return (rptr + (size - 1) - wptr) % size; } diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index a74d66a..899145b 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -45,6 +45,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, goto fail; } ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); + ring->next = ring->start; ring->cur = ring->start;
ring->memptrs = memptrs; diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 5907063f..8cff38a 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -32,7 +32,7 @@ struct msm_ringbuffer { struct msm_gpu *gpu; int id; struct drm_gem_object *bo; - uint32_t *start, *end, *cur; + uint32_t *start, *end, *cur, *next; struct list_head submits; uint64_t iova; uint32_t seqno; @@ -50,9 +50,13 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, static inline void OUT_RING(struct msm_ringbuffer *ring, uint32_t data) { - if (ring->cur == ring->end) - ring->cur = ring->start; - *(ring->cur++) = data; + /* + * ring->next points to the current command being written - it won't be + * committed as ring->cur until the flush + */ + if (ring->next == ring->end) + ring->next = ring->start; + *(ring->next++) = data; }
#endif /* __MSM_RINGBUFFER_H__ */
We use a global ringbuffer size and block size for all targets and at least for 5XX preemption we need to know the value the RB_CNTL in several locations so it makes sense to calculate it once and use it everywhere.
The only monkey wrench is that we need to disable the RPTR shadow for A430 targets but that only needs to be done once and doesn't affect A5XX so we can or in the value at init time.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 12 +++++++----- drivers/gpu/drm/msm/msm_gpu.h | 5 +++++ 2 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index a2777d8..85e2368 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -21,7 +21,6 @@ #include "msm_gem.h" #include "msm_mmu.h"
-#define RB_BLKSIZE 32
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) { @@ -89,11 +88,14 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->memptrs->rptr = 0; }
- /* Setup REG_CP_RB_CNTL: */ + /* + * Setup REG_CP_RB_CNTL. The same value is used across targets (with + * the excpetion of A430 that disables the RPTR shadow) - the cacluation + * for the ringbuffer size and block size is moved to msm_gpu.h for the + * pre-processor to deal with and the A430 variant is ORed in here + */ adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, - /* size is log2(quad-words): */ - AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | - AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) | + MSM_GPU_RB_CNTL_DEFAULT | (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */ diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index dbc4835..914cd2f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -132,6 +132,11 @@ struct msm_gpu {
/* It turns out that all targets use the same ringbuffer size */ #define MSM_GPU_RINGBUFFER_SZ SZ_32K +#define MSM_GPU_RINGBUFFER_BLKSIZE 32 + +#define MSM_GPU_RB_CNTL_DEFAULT \ + (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \ + AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
static inline bool msm_gpu_active(struct msm_gpu *gpu) {
Implement preemption for A5XX targets - this allows multiple ringbuffers for different priorities with automatic preemption of a lower priority ringbuffer if a higher one is ready.
Signed-off-by: Jordan Crouse jcrouse@codeaurora.org --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 176 +++++++++++++++++- drivers/gpu/drm/msm/adreno/a5xx_gpu.h | 101 +++++++++- drivers/gpu/drm/msm/adreno/a5xx_preempt.c | 294 ++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/adreno/adreno_gpu.c | 14 +- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 7 +- drivers/gpu/drm/msm/msm_drv.h | 2 +- drivers/gpu/drm/msm/msm_gpu.c | 5 +- drivers/gpu/drm/msm/msm_ringbuffer.c | 1 + drivers/gpu/drm/msm/msm_ringbuffer.h | 1 + drivers/gpu/drm/msm/msm_submitqueue.c | 4 +- 11 files changed, 584 insertions(+), 22 deletions(-) create mode 100644 drivers/gpu/drm/msm/adreno/a5xx_preempt.c
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 3c234e7..d0b26dd 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -8,6 +8,7 @@ msm-y := \ adreno/a4xx_gpu.o \ adreno/a5xx_gpu.o \ adreno/a5xx_power.o \ + adreno/a5xx_preempt.o \ hdmi/hdmi.o \ hdmi/hdmi_audio.o \ hdmi/hdmi_bridge.o \ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index a195d5d..eba2a02 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -80,13 +80,65 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) } #endif
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + uint32_t wptr; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + + /* Copy the shadow to the actual register */ + ring->cur = ring->next; + + /* Make sure to wrap wptr if we need to */ + wptr = get_wptr(ring); + + spin_unlock_irqrestore(&ring->lock, flags); + + /* Make sure everything is posted before making a decision */ + mb(); + + /* Update HW if this is the current ring and we are not in preempt */ + if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu)) + gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); +} + static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); struct msm_drm_private *priv = gpu->dev->dev_private; struct msm_ringbuffer *ring = submit->ring; unsigned int i, ibs = 0;
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); + OUT_RING(ring, 0x02); + + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Set the save preemption record for the ring/command */ + OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); + OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); + OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring->id])); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + /* Enable local preemption for finegrain preemption */ + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); + OUT_RING(ring, 0x02); + + /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x02); + + /* Submit the commands */ for (i = 0; i < submit->nr_cmds; i++) { switch (submit->cmd[i].type) { case MSM_SUBMIT_CMD_IB_TARGET_BUF: @@ -104,16 +156,54 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, } }
+ /* + * Write the render mode to NULL (0) to indicate to the CP that the IBs + * are done rendering - otherwise a lucky preemption would start + * replaying from the last checkpoint + */ + OUT_PKT7(ring, CP_SET_RENDER_MODE, 5); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + OUT_RING(ring, 0); + + /* Turn off IB level preemptions */ + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x01); + + /* Write the fence to the scratch register */ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1); OUT_RING(ring, submit->seqno);
+ /* + * Execute a CACHE_FLUSH_TS event. This will ensure that the + * timestamp is written to the memory and then triggers the interrupt + */ OUT_PKT7(ring, CP_EVENT_WRITE, 4); OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31)); OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence))); OUT_RING(ring, submit->seqno);
- gpu->funcs->flush(gpu, ring); + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + /* + * If dword[2:1] are non zero, they specify an address for the CP to + * write the value of dword[3] to on preemption complete. Write 0 to + * skip the write + */ + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + /* Data value - not used if the address above is 0 */ + OUT_RING(ring, 0x01); + /* Set bit 0 to trigger an interrupt on preempt complete */ + OUT_RING(ring, 0x01); + + a5xx_flush(gpu, ring); + + /* Check to see if we need to start preemption */ + a5xx_preempt_trigger(gpu); }
static const struct { @@ -264,6 +354,50 @@ static int a5xx_me_init(struct msm_gpu *gpu) return a5xx_idle(gpu, ring) ? 0 : -EINVAL; }
+static int a5xx_preempt_start(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct msm_ringbuffer *ring = gpu->rb[0]; + + if (gpu->nr_rings == 1) + return 0; + + /* Turn off protected mode to write to special registers */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 0); + + /* Set the save preemption record for the ring/command */ + OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2); + OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id])); + OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id])); + + /* Turn back on protected mode */ + OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1); + OUT_RING(ring, 1); + + OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); + OUT_RING(ring, 0x00); + + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x01); + + OUT_PKT7(ring, CP_YIELD_ENABLE, 1); + OUT_RING(ring, 0x01); + + /* Yield the floor on command completion */ + OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x00); + OUT_RING(ring, 0x01); + OUT_RING(ring, 0x01); + + gpu->funcs->flush(gpu, ring); + + return a5xx_idle(gpu, ring) ? 0 : -EINVAL; +} + + static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, const struct firmware *fw, u64 *iova) { @@ -423,6 +557,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \ + A5XX_RBBM_INT_0_MASK_CP_SW | \ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) @@ -570,6 +705,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu) if (ret) return ret;
+ a5xx_preempt_hw_init(gpu); + ret = a5xx_ucode_init(gpu); if (ret) return ret; @@ -622,6 +759,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0); }
+ /* Last step - yield the ringbuffer */ + a5xx_preempt_start(gpu); + return 0; }
@@ -652,6 +792,8 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name);
+ a5xx_preempt_fini(gpu); + if (a5xx_gpu->zap_dev.parent) device_unregister(&a5xx_gpu->zap_dev);
@@ -692,6 +834,14 @@ static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + if (ring != a5xx_gpu->cur_ring) { + WARN(1, "Tried to idle a non-current ringbuffer\n"); + return false; + } + /* wait for CP to drain ringbuffer: */ if (!adreno_idle(gpu, ring)) return false; @@ -886,8 +1036,13 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu) if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP) a5xx_gpmu_err_irq(gpu);
- if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) + if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) { + a5xx_preempt_trigger(gpu); msm_gpu_retire(gpu); + } + + if (status & A5XX_RBBM_INT_0_MASK_CP_SW) + a5xx_preempt_irq(gpu);
return IRQ_HANDLED; } @@ -1017,6 +1172,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) } #endif
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + + return a5xx_gpu->cur_ring; +} + static const struct adreno_gpu_funcs funcs = { .base = { .get_param = adreno_get_param, @@ -1025,8 +1188,8 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) .pm_resume = a5xx_pm_resume, .recover = a5xx_recover, .submit = a5xx_submit, - .flush = adreno_flush, - .active_ring = adreno_active_ring, + .flush = a5xx_flush, + .active_ring = a5xx_active_ring, .irq = a5xx_irq, .destroy = a5xx_destroy, #ifdef CONFIG_DEBUG_FS @@ -1062,7 +1225,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
a5xx_gpu->lm_leakage = 0x4E001A;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1); + ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4); if (ret) { a5xx_destroy(&(a5xx_gpu->base.base)); return ERR_PTR(ret); @@ -1071,5 +1234,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) if (gpu->aspace) msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler);
+ /* Set up the preemption specific bits and pieces for each ringbuffer */ + a5xx_preempt_init(gpu); + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h index 8f4f093..2c783fb 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -37,10 +37,97 @@ struct a5xx_gpu { uint32_t lm_leakage;
struct device zap_dev; + + struct msm_ringbuffer *cur_ring; + struct msm_ringbuffer *next_ring; + + struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS]; + struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS]; + uint64_t preempt_iova[MSM_GPU_MAX_RINGS]; + + atomic_t preempt_state; + struct timer_list preempt_timer; };
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+/* + * In order to do lockless preemption we use a simple state machine to progress + * through the process. + * + * PREEMPT_NONE - no preemption in progress. Next state START. + * PREEMPT_START - The trigger is evaulating if preemption is possible. Next + * states: TRIGGERED, NONE + * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next + * states: FAULTED, PENDING + * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger + * recovery. Next state: N/A + * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is + * checking the success of the operation. Next state: FAULTED, NONE. + */ + +enum preempt_state { + PREEMPT_NONE = 0, + PREEMPT_START, + PREEMPT_TRIGGERED, + PREEMPT_FAULTED, + PREEMPT_PENDING, +}; + +/* + * struct a5xx_preempt_record is a shared buffer between the microcode and the + * CPU to store the state for preemption. The record itself is much larger + * (64k) but most of that is used by the CP for storage. + * + * There is a preemption record assigned per ringbuffer. When the CPU triggers a + * preemption, it fills out the record with the useful information (wptr, ring + * base, etc) and the microcode uses that information to set up the CP following + * the preemption. When a ring is switched out, the CP will save the ringbuffer + * state back to the record. In this way, once the records are properly set up + * the CPU can quickly switch back and forth between ringbuffers by only + * updating a few registers (often only the wptr). + * + * These are the CPU aware registers in the record: + * @magic: Must always be 0x27C4BAFC + * @info: Type of the record - written 0 by the CPU, updated by the CP + * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by + * the CP + * @cntl: Value of RB_CNTL written by CPU, save/restored by CP + * @rptr: Value of RB_RPTR written by CPU, save/restored by CP + * @wptr: Value of RB_WPTR written by CPU, save/restored by CP + * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP + * @rbase: Value of RB_BASE written by CPU, save/restored by CP + * @counter: GPU address of the storage area for the performance counters + */ +struct a5xx_preempt_record { + uint32_t magic; + uint32_t info; + uint32_t data; + uint32_t cntl; + uint32_t rptr; + uint32_t wptr; + uint64_t rptr_addr; + uint64_t rbase; + uint64_t counter; +}; + +/* Magic identifier for the preemption record */ +#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL + +/* + * Even though the structure above is only a few bytes, we need a full 64k to + * store the entire preemption record from the CP + */ +#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024) + +/* + * The preemption counter block is a storage area for the value of the + * preemption counters that are saved immediately before context switch. We + * append it on to the end of the allocadtion for the preemption record. + */ +#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4) + + int a5xx_power_init(struct msm_gpu *gpu); void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
@@ -60,4 +147,16 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring); void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+void a5xx_preempt_init(struct msm_gpu *gpu); +void a5xx_preempt_hw_init(struct msm_gpu *gpu); +void a5xx_preempt_trigger(struct msm_gpu *gpu); +void a5xx_preempt_irq(struct msm_gpu *gpu); +void a5xx_preempt_fini(struct msm_gpu *gpu); + +/* Return true if we are in a preempt state */ +static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu) +{ + return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE); +} + #endif /* __A5XX_GPU_H__ */ diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c new file mode 100644 index 0000000..6a3767d --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -0,0 +1,294 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "msm_gem.h" +#include "a5xx_gpu.h" + +/* + * Try to transition the preemption state from old to new. Return + * true on success or false if the original state wasn't 'old' + */ +static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu, + enum preempt_state old, enum preempt_state new) +{ + enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state, + old, new); + + return (cur == old); +} + +/* + * Force the preemption state to the specified state. This is used in cases + * where the current state is known and won't change + */ +static inline void set_preempt_state(struct a5xx_gpu *gpu, + enum preempt_state new) +{ + /* + * preempt_state may be read by other cores trying to trigger a + * preemption or in the interrupt handler so barriers are needed + * before... + */ + smp_mb__before_atomic(); + atomic_set(&gpu->preempt_state, new); + /* ... and after*/ + smp_mb__after_atomic(); +} + +/* Write the most recent wptr for the given ring into the hardware */ +static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) +{ + unsigned long flags; + uint32_t wptr; + + if (!ring) + return; + + spin_lock_irqsave(&ring->lock, flags); + wptr = get_wptr(ring); + spin_unlock_irqrestore(&ring->lock, flags); + + gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); +} + +/* Return the highest priority ringbuffer with something in it */ +static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) +{ + unsigned long flags; + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + bool empty; + struct msm_ringbuffer *ring = gpu->rb[i]; + + spin_lock_irqsave(&ring->lock, flags); + empty = (get_wptr(ring) == ring->memptrs->rptr); + spin_unlock_irqrestore(&ring->lock, flags); + + if (!empty) + return ring; + } + + return NULL; +} + +static void a5xx_preempt_timer(unsigned long data) +{ + struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data; + struct msm_gpu *gpu = &a5xx_gpu->base.base; + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + + if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED)) + return; + + dev_err(dev->dev, "%s: preemption timed out\n", gpu->name); + queue_work(priv->wq, &gpu->recover_work); +} + +/* Try to trigger a preemption switch */ +void a5xx_preempt_trigger(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + unsigned long flags; + struct msm_ringbuffer *ring; + + if (gpu->nr_rings == 1) + return; + + /* + * Try to start preemption by moving from NONE to START. If + * unsuccessful, a preemption is already in flight + */ + if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START)) + return; + + /* Get the next ring to preempt to */ + ring = get_next_ring(gpu); + + /* + * If no ring is populated or the highest priority ring is the current + * one do nothing except to update the wptr to the latest and greatest + */ + if (!ring || (a5xx_gpu->cur_ring == ring)) { + update_wptr(gpu, ring); + + /* Set the state back to NONE */ + set_preempt_state(a5xx_gpu, PREEMPT_NONE); + return; + } + + /* Make sure the wptr doesn't update while we're in motion */ + spin_lock_irqsave(&ring->lock, flags); + a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring); + spin_unlock_irqrestore(&ring->lock, flags); + + /* Set the address of the incoming preemption record */ + gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO, + REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI, + a5xx_gpu->preempt_iova[ring->id]); + + a5xx_gpu->next_ring = ring; + + /* Start a timer to catch a stuck preemption */ + mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000)); + + /* Set the preemption state to triggered */ + set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED); + + /* Make sure everything is written before hitting the button */ + wmb(); + + /* And actually start the preemption */ + gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1); +} + +void a5xx_preempt_irq(struct msm_gpu *gpu) +{ + uint32_t status; + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + struct drm_device *dev = gpu->dev; + struct msm_drm_private *priv = dev->dev_private; + + if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING)) + return; + + /* Delete the preemption watchdog timer */ + del_timer(&a5xx_gpu->preempt_timer); + + /* + * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before + * firing the interrupt, but there is a non zero chance of a hardware + * condition or a software race that could set it again before we have a + * chance to finish. If that happens, log and go for recovery + */ + status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL); + if (unlikely(status)) { + set_preempt_state(a5xx_gpu, PREEMPT_FAULTED); + dev_err(dev->dev, "%s: Preemption failed to complete\n", + gpu->name); + queue_work(priv->wq, &gpu->recover_work); + return; + } + + a5xx_gpu->cur_ring = a5xx_gpu->next_ring; + a5xx_gpu->next_ring = NULL; + + update_wptr(gpu, a5xx_gpu->cur_ring); + + set_preempt_state(a5xx_gpu, PREEMPT_NONE); +} + +void a5xx_preempt_hw_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + a5xx_gpu->preempt[i]->wptr = 0; + a5xx_gpu->preempt[i]->rptr = 0; + a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; + } + + /* Write a 0 to signal that we aren't switching pagetables */ + gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO, + REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI, 0); + + /* Reset the preemption state */ + set_preempt_state(a5xx_gpu, PREEMPT_NONE); + + /* Always come up on rb 0 */ + a5xx_gpu->cur_ring = gpu->rb[0]; +} + +static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, + struct msm_ringbuffer *ring) +{ + struct adreno_gpu *adreno_gpu = &a5xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; + struct a5xx_preempt_record *ptr; + struct drm_gem_object *bo = NULL; + u64 iova = 0; + + ptr = msm_gem_kernel_new(gpu->dev, + A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE, + MSM_BO_UNCACHED, gpu->aspace, &bo, &iova); + + if (IS_ERR(ptr)) + return PTR_ERR(ptr); + + a5xx_gpu->preempt_bo[ring->id] = bo; + a5xx_gpu->preempt_iova[ring->id] = iova; + a5xx_gpu->preempt[ring->id] = ptr; + + /* Set up the defaults on the preemption record */ + + ptr->magic = A5XX_PREEMPT_RECORD_MAGIC; + ptr->info = 0; + ptr->data = 0; + ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT; + ptr->rptr_addr = rbmemptr(ring, rptr); + ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE; + + return 0; +} + +void a5xx_preempt_fini(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + for (i = 0; i < gpu->nr_rings; i++) { + if (!a5xx_gpu->preempt_bo[i]) + continue; + + msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]); + + if (a5xx_gpu->preempt_iova[i]) + msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace); + + drm_gem_object_unreference(a5xx_gpu->preempt_bo[i]); + a5xx_gpu->preempt_bo[i] = NULL; + } +} + +void a5xx_preempt_init(struct msm_gpu *gpu) +{ + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu); + int i; + + /* No preemption if we only have one ring */ + if (gpu->nr_rings <= 1) + return; + + for (i = 0; i < gpu->nr_rings; i++) { + if (preempt_init_ring(a5xx_gpu, gpu->rb[i])) { + /* + * On any failure our adventure is over. Clean up and + * set nr_rings to 1 to force preemption off + */ + a5xx_preempt_fini(gpu); + gpu->nr_rings = 1; + + return; + } + } + + setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, + (unsigned long) a5xx_gpu); +} diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 85e2368..8414615 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -111,11 +111,6 @@ int adreno_hw_init(struct msm_gpu *gpu) return 0; }
-static uint32_t get_wptr(struct msm_ringbuffer *ring) -{ - return ring->cur - ring->start; -} - /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, struct msm_ringbuffer *ring) @@ -170,7 +165,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); - OUT_RING(ring, submit->cmd[i].iova); + OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); OUT_RING(ring, submit->cmd[i].size); OUT_PKT2(ring); break; @@ -237,7 +232,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) * to account for the possibility that the last command fit exactly into * the ringbuffer and rb->next hasn't wrapped to zero yet */ - wptr = (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); + wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */ mb(); @@ -255,8 +250,9 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) return true;
/* TODO maybe we need to reset GPU here to recover from hang? */ - DRM_ERROR("%s: timeout waiting to drain ringbuffer %d!\n", gpu->name, - ring->id); + DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n", + gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); + return false; }
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 2a51ac4..c81434c 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -2,7 +2,7 @@ * Copyright (C) 2013 Red Hat * Author: Rob Clark robdclark@gmail.com * - * Copyright (c) 2014 The Linux Foundation. All rights reserved. + * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by @@ -309,6 +309,11 @@ static inline void adreno_gpu_write64(struct adreno_gpu *gpu, adreno_gpu_write(gpu, hi, upper_32_bits(data)); }
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring) +{ + return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2); +} + /* * Given a register and a count, return a value to program into * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 079a5b0..521ce27 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -76,7 +76,7 @@ struct msm_vblank_ctrl { spinlock_t lock; };
-#define MSM_GPU_MAX_RINGS 1 +#define MSM_GPU_MAX_RINGS 4
struct msm_drm_private {
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 880d69d..1e44f88 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -295,8 +295,7 @@ static void recover_worker(struct work_struct *work) * Replay all remaining submits starting with highest priority * ring */ - - for (i = gpu->nr_rings - 1; i >= 0; i--) { + for (i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i];
list_for_each_entry(submit, &ring->submits, node) @@ -476,7 +475,7 @@ static void retire_submits(struct msm_gpu *gpu) WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Retire the commits starting with highest priority */ - for (i = gpu->nr_rings - 1; i >= 0; i--) { + for (i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i];
list_for_each_entry_safe(submit, tmp, &ring->submits, node) { diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 899145b..e0b84d5 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -52,6 +52,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, ring->memptrs_iova = memptrs_iova;
INIT_LIST_HEAD(&ring->submits); + spin_lock_init(&ring->lock);
return ring;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h index 8cff38a..a553d8d 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.h +++ b/drivers/gpu/drm/msm/msm_ringbuffer.h @@ -39,6 +39,7 @@ struct msm_ringbuffer { uint32_t hangcheck_fence; struct msm_rbmemptrs *memptrs; uint64_t memptrs_iova; + spinlock_t lock; };
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 3afb086..f23b43e 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -123,9 +123,9 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
/* * Add the "default" submitqueue with id 0 - * "medium" priority (3) and no flags + * "low" priority (2) and no flags */ - return msm_submitqueue_create(drm, ctx, 3, 0, NULL); + return msm_submitqueue_create(drm, ctx, 2, 0, NULL); }
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
dri-devel@lists.freedesktop.org