This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 61 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/ - bool needs_reset; + bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..82633fdd399d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,34 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock);
if (!rdev->needs_reset) { + WARN_ON(rdev->in_reset); up_write(&rdev->exclusive_lock); return 0; }
rdev->needs_reset = false; - - radeon_save_bios_scratch_regs(rdev); - /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); - radeon_pm_suspend(rdev); - radeon_suspend(rdev);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) { - ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], - &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(rdev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); + if (!rdev->in_reset) { + rdev->in_reset = true; + + radeon_save_bios_scratch_regs(rdev); + /* block TTM */ + radeon_pm_suspend(rdev); + radeon_suspend(rdev); + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], + &ring_data[i]); + if (ring_sizes[i]) { + saved = true; + dev_info(rdev->dev, "Saved %d dwords of commands " + "on ring %d.\n", ring_sizes[i], i); + } } - } + } else + memset(ring_data, 0, sizeof(ring_data));
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); @@ -1702,40 +1707,46 @@ retry:
radeon_restore_bios_scratch_regs(rdev);
- if (!r) { + if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; ring_data[i] = NULL; } + } else { + radeon_fence_driver_force_completion(rdev); + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + kfree(ring_data[i]); + } + } + downgrade_write(&rdev->exclusive_lock); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
+ if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { - saved = false; + /* if reset fails, try without saving data */ + rdev->needs_reset = true; radeon_suspend(rdev); - goto retry; + up_read(&rdev->exclusive_lock); + return -EAGAIN; } } - } else { - radeon_fence_driver_force_completion(rdev); - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - kfree(ring_data[i]); - } }
radeon_pm_resume(rdev); drm_helper_resume_force_mode(rdev->ddev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
- up_write(&rdev->exclusive_lock); + rdev->in_reset = false; + up_read(&rdev->exclusive_lock); return r; }
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- V1 had a nasty bug breaking gpu lockup recovery. The fix is not allowing radeon_fence_driver_check_lockup to take exclusive_lock, and kill it during lockup recovery instead. V2 used delayed work that ran during lockup recovery, but required read lock. I've fixed this by downgrading the write, and retrying if recovery fails. --- drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_fence.c | 115 +++++++++++++++++----------------- 2 files changed, 61 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1d806983ec7b..29504efe8971 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -355,6 +355,8 @@ struct radeon_fence_driver { uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; bool initialized; + struct delayed_work fence_check_work; + struct radeon_device *rdev; };
struct radeon_fence { diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 913787085dfa..94eca53d99f8 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -125,16 +125,7 @@ int radeon_fence_emit(struct radeon_device *rdev, return 0; }
-/** - * radeon_fence_process - process a fence - * - * @rdev: radeon_device pointer - * @ring: ring index the fence is associated with - * - * Checks the current fence value and wakes the fence queue - * if the sequence number has increased (all asics). - */ -void radeon_fence_process(struct radeon_device *rdev, int ring) +static bool __radeon_fence_process(struct radeon_device *rdev, int ring) { uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; @@ -190,7 +181,53 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake) + if (seq < last_emitted && !rdev->in_reset) + mod_delayed_work(system_power_efficient_wq, + &rdev->fence_drv[ring].fence_check_work, + RADEON_FENCE_JIFFIES_TIMEOUT); + + return wake; +} + +static void radeon_fence_driver_check_lockup(struct work_struct *work) +{ + struct radeon_fence_driver *fence_drv; + struct radeon_device *rdev; + unsigned long iring; + + fence_drv = container_of(work, struct radeon_fence_driver, fence_check_work.work); + rdev = fence_drv->rdev; + iring = fence_drv - &rdev->fence_drv[0]; + + down_read(&rdev->exclusive_lock); + if (__radeon_fence_process(rdev, iring)) + wake_up_all(&rdev->fence_queue); + else if (radeon_ring_is_lockup(rdev, iring, &rdev->ring[iring])) { + /* good news we believe it's a lockup */ + dev_warn(rdev->dev, "GPU lockup (current fence id " + "0x%016llx last fence id 0x%016llx on ring %ld)\n", + (uint64_t)atomic64_read(&fence_drv->last_seq), + fence_drv->sync_seq[iring], iring); + + /* remember that we need an reset */ + rdev->needs_reset = true; + wake_up_all(&rdev->fence_queue); + } + up_read(&rdev->exclusive_lock); +} + +/** + * radeon_fence_process - process a fence + * + * @rdev: radeon_device pointer + * @ring: ring index the fence is associated with + * + * Checks the current fence value and wakes the fence queue + * if the sequence number has increased (all asics). + */ +void radeon_fence_process(struct radeon_device *rdev, int ring) +{ + if (__radeon_fence_process(rdev, ring)) wake_up_all(&rdev->fence_queue); }
@@ -302,9 +339,10 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled; - int i, r; + int i;
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) { + long r;
/* Save current sequence values, used to check for GPU lockups */ for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -319,11 +357,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); + || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); } else { r = wait_event_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT); + || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); }
for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -334,50 +372,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); }
- if (unlikely(r < 0)) + if (r < 0) return r;
- if (unlikely(!signaled)) { - if (rdev->needs_reset) - return -EDEADLK; - - /* we were interrupted for some reason and fence - * isn't signaled yet, resume waiting */ - if (r) - continue; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; - - if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq)) - break; - } - - if (i != RADEON_NUM_RINGS) - continue; - - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - if (!target_seq[i]) - continue; - - if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i])) - break; - } - - if (i < RADEON_NUM_RINGS) { - /* good news we believe it's a lockup */ - dev_warn(rdev->dev, "GPU lockup (waiting for " - "0x%016llx last fence id 0x%016llx on" - " ring %d)\n", - target_seq[i], last_seq[i], i); - - /* remember that we need an reset */ - rdev->needs_reset = true; - wake_up_all(&rdev->fence_queue); - return -EDEADLK; - } - } + if (rdev->needs_reset) + return -EDEADLK; } return 0; } @@ -711,6 +710,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].sync_seq[i] = 0; atomic64_set(&rdev->fence_drv[ring].last_seq, 0); rdev->fence_drv[ring].initialized = false; + INIT_DELAYED_WORK(&rdev->fence_drv[ring].fence_check_work, + radeon_fence_driver_check_lockup); + rdev->fence_drv[ring].rdev = rdev; }
/** @@ -760,6 +762,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); } + cancel_delayed_work_sync(&rdev->fence_drv[ring].fence_check_work); wake_up_all(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false;
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
V1 had a nasty bug breaking gpu lockup recovery. The fix is not allowing radeon_fence_driver_check_lockup to take exclusive_lock, and kill it during lockup recovery instead. V2 used delayed work that ran during lockup recovery, but required read lock. I've fixed this by downgrading the write, and retrying if recovery fails.
drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_fence.c | 115 +++++++++++++++++----------------- 2 files changed, 61 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1d806983ec7b..29504efe8971 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -355,6 +355,8 @@ struct radeon_fence_driver { uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; bool initialized;
- struct delayed_work fence_check_work;
- struct radeon_device *rdev;
Put the reference to the device as the first field in the structure.
};
struct radeon_fence { diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 913787085dfa..94eca53d99f8 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -125,16 +125,7 @@ int radeon_fence_emit(struct radeon_device *rdev, return 0; }
-/**
- radeon_fence_process - process a fence
- @rdev: radeon_device pointer
- @ring: ring index the fence is associated with
- Checks the current fence value and wakes the fence queue
- if the sequence number has increased (all asics).
- */
-void radeon_fence_process(struct radeon_device *rdev, int ring) +static bool __radeon_fence_process(struct radeon_device *rdev, int ring)
Don't use "__" for internal radeon function names and especially don't remove the function documentation.
{ uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; @@ -190,7 +181,53 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake)
- if (seq < last_emitted && !rdev->in_reset)
mod_delayed_work(system_power_efficient_wq,
&rdev->fence_drv[ring].fence_check_work,
RADEON_FENCE_JIFFIES_TIMEOUT);
Am I wrong or do you queue the work only after radeon_fence_process is called for the first time?
Might be a good idea to have an explicit queue_delayed_work in radeon_fence_emit as well.
- return wake;
+}
+static void radeon_fence_driver_check_lockup(struct work_struct *work) +{
- struct radeon_fence_driver *fence_drv;
- struct radeon_device *rdev;
- unsigned long iring;
- fence_drv = container_of(work, struct radeon_fence_driver, fence_check_work.work);
- rdev = fence_drv->rdev;
- iring = fence_drv - &rdev->fence_drv[0];
- down_read(&rdev->exclusive_lock);
- if (__radeon_fence_process(rdev, iring))
wake_up_all(&rdev->fence_queue);
- else if (radeon_ring_is_lockup(rdev, iring, &rdev->ring[iring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (current fence id "
"0x%016llx last fence id 0x%016llx on ring %ld)\n",
(uint64_t)atomic64_read(&fence_drv->last_seq),
fence_drv->sync_seq[iring], iring);
/* remember that we need an reset */
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
- }
- up_read(&rdev->exclusive_lock);
+}
+/**
- radeon_fence_process - process a fence
- @rdev: radeon_device pointer
- @ring: ring index the fence is associated with
- Checks the current fence value and wakes the fence queue
- if the sequence number has increased (all asics).
- */
+void radeon_fence_process(struct radeon_device *rdev, int ring) +{
- if (__radeon_fence_process(rdev, ring)) wake_up_all(&rdev->fence_queue); }
@@ -302,9 +339,10 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled;
- int i, r;
int i;
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
long r;
/* Save current sequence values, used to check for GPU lockups */ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -319,11 +357,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
} else { r = wait_event_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq))|| rdev->needs_reset), MAX_SCHEDULE_TIMEOUT);
|| rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
|| rdev->needs_reset), MAX_SCHEDULE_TIMEOUT);
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -334,50 +372,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); }
if (unlikely(r < 0))
if (r < 0) return r;
if (unlikely(!signaled)) {
if (rdev->needs_reset)
return -EDEADLK;
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r)
continue;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
break;
}
if (i != RADEON_NUM_RINGS)
continue;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i])
continue;
if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
break;
}
if (i < RADEON_NUM_RINGS) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for "
"0x%016llx last fence id 0x%016llx on"
" ring %d)\n",
target_seq[i], last_seq[i], i);
/* remember that we need an reset */
rdev->needs_reset = true;
wake_up_all(&rdev->fence_queue);
return -EDEADLK;
}
}
if (rdev->needs_reset)
} return 0; }return -EDEADLK;
@@ -711,6 +710,9 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) rdev->fence_drv[ring].sync_seq[i] = 0; atomic64_set(&rdev->fence_drv[ring].last_seq, 0); rdev->fence_drv[ring].initialized = false;
INIT_DELAYED_WORK(&rdev->fence_drv[ring].fence_check_work,
radeon_fence_driver_check_lockup);
rdev->fence_drv[ring].rdev = rdev; }
/**
@@ -760,6 +762,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) /* no need to trigger GPU reset as we are unloading */ radeon_fence_driver_force_completion(rdev); }
wake_up_all(&rdev->fence_queue); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); rdev->fence_drv[ring].initialized = false;cancel_delayed_work_sync(&rdev->fence_drv[ring].fence_check_work);
Op 18-08-14 om 17:12 schreef Christian König:
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
V1 had a nasty bug breaking gpu lockup recovery. The fix is not allowing radeon_fence_driver_check_lockup to take exclusive_lock, and kill it during lockup recovery instead. V2 used delayed work that ran during lockup recovery, but required read lock. I've fixed this by downgrading the write, and retrying if recovery fails.
drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_fence.c | 115 +++++++++++++++++----------------- 2 files changed, 61 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1d806983ec7b..29504efe8971 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -355,6 +355,8 @@ struct radeon_fence_driver { uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; bool initialized;
- struct delayed_work fence_check_work;
- struct radeon_device *rdev;
Put the reference to the device as the first field in the structure.
}; struct radeon_fence { diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 913787085dfa..94eca53d99f8 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -125,16 +125,7 @@ int radeon_fence_emit(struct radeon_device *rdev, return 0; } -/**
- radeon_fence_process - process a fence
- @rdev: radeon_device pointer
- @ring: ring index the fence is associated with
- Checks the current fence value and wakes the fence queue
- if the sequence number has increased (all asics).
- */
-void radeon_fence_process(struct radeon_device *rdev, int ring) +static bool __radeon_fence_process(struct radeon_device *rdev, int ring)
Don't use "__" for internal radeon function names and especially don't remove the function documentation.
I've moved the documentation to the new radeon_fence_process function that calls the __radeon_fence_process one, which is an internal function that is only used by the driver. I guess I can rename it to __radeon_fence_process_nowake or something instead, but documentation doesn't make sense since it's not used outside of radeon_fence.c
{ uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; @@ -190,7 +181,53 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake)
- if (seq < last_emitted && !rdev->in_reset)
mod_delayed_work(system_power_efficient_wq,
&rdev->fence_drv[ring].fence_check_work,
RADEON_FENCE_JIFFIES_TIMEOUT);
Am I wrong or do you queue the work only after radeon_fence_process is called for the first time?
Might be a good idea to have an explicit queue_delayed_work in radeon_fence_emit as well.
Yeah might as well, with these changes it makes sense to run it as soon as possible.
But if there are no waiters, there's no real need. It's a tree falling in a forest with no-one around to hear it. ;-)
Am 18.08.2014 um 17:28 schrieb Maarten Lankhorst:
Op 18-08-14 om 17:12 schreef Christian König:
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
V1 had a nasty bug breaking gpu lockup recovery. The fix is not allowing radeon_fence_driver_check_lockup to take exclusive_lock, and kill it during lockup recovery instead. V2 used delayed work that ran during lockup recovery, but required read lock. I've fixed this by downgrading the write, and retrying if recovery fails.
drivers/gpu/drm/radeon/radeon.h | 2 + drivers/gpu/drm/radeon/radeon_fence.c | 115 +++++++++++++++++----------------- 2 files changed, 61 insertions(+), 56 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 1d806983ec7b..29504efe8971 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -355,6 +355,8 @@ struct radeon_fence_driver { uint64_t sync_seq[RADEON_NUM_RINGS]; atomic64_t last_seq; bool initialized;
- struct delayed_work fence_check_work;
- struct radeon_device *rdev;
Put the reference to the device as the first field in the structure.
}; struct radeon_fence { diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 913787085dfa..94eca53d99f8 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -125,16 +125,7 @@ int radeon_fence_emit(struct radeon_device *rdev, return 0; } -/**
- radeon_fence_process - process a fence
- @rdev: radeon_device pointer
- @ring: ring index the fence is associated with
- Checks the current fence value and wakes the fence queue
- if the sequence number has increased (all asics).
- */
-void radeon_fence_process(struct radeon_device *rdev, int ring) +static bool __radeon_fence_process(struct radeon_device *rdev, int ring)
Don't use "__" for internal radeon function names and especially don't remove the function documentation.
I've moved the documentation to the new radeon_fence_process function that calls the __radeon_fence_process one, which is an internal function that is only used by the driver. I guess I can rename it to __radeon_fence_process_nowake or something instead, but documentation doesn't make sense since it's not used outside of radeon_fence.c
We try to document even static functions in doxygen style even if you can't see them outside the C file.
{ uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; @@ -190,7 +181,53 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) } } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
- if (wake)
- if (seq < last_emitted && !rdev->in_reset)
mod_delayed_work(system_power_efficient_wq,
&rdev->fence_drv[ring].fence_check_work,
RADEON_FENCE_JIFFIES_TIMEOUT);
Am I wrong or do you queue the work only after radeon_fence_process is called for the first time?
Might be a good idea to have an explicit queue_delayed_work in radeon_fence_emit as well.
Yeah might as well, with these changes it makes sense to run it as soon as possible.
But if there are no waiters, there's no real need. It's a tree falling in a forest with no-one around to hear it. ;-)
That's why I suggested to schedule the work item only when the IRQ is enabled for waiting, otherwise it indeed doesn't make much sense.
Christian.
This makes it possible to wait for a specific amount of time, rather than wait until infinity.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com Reviewed-by: Christian König deathsimple@vodafone.de --- drivers/gpu/drm/radeon/radeon_fence.c | 50 +++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 94eca53d99f8..e8ea423648ab 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -320,22 +320,25 @@ static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq) }
/** - * radeon_fence_wait_seq - wait for a specific sequence numbers + * radeon_fence_wait_seq_timeout - wait for a specific sequence numbers * * @rdev: radeon device pointer * @target_seq: sequence number(s) we want to wait for * @intr: use interruptable sleep + * @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait * * Wait for the requested sequence number(s) to be written by any ring * (all asics). Sequnce number array is indexed by ring id. * @intr selects whether to use interruptable (true) or non-interruptable * (false) sleep when waiting for the sequence number. Helper function * for radeon_fence_wait_*(). - * Returns 0 if the sequence number has passed, error for all other cases. + * Returns remaining time if the sequence number has passed, 0 when + * the wait timeout, or an error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ -static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, - bool intr) +static long radeon_fence_wait_seq_timeout(struct radeon_device *rdev, + u64 *target_seq, bool intr, + long timeout) { uint64_t last_seq[RADEON_NUM_RINGS]; bool signaled; @@ -357,11 +360,11 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, if (intr) { r = wait_event_interruptible_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); + || rdev->needs_reset), timeout); } else { r = wait_event_timeout(rdev->fence_queue, ( (signaled = radeon_fence_any_seq_signaled(rdev, target_seq)) - || rdev->needs_reset), MAX_SCHEDULE_TIMEOUT); + || rdev->needs_reset), timeout); }
for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -372,20 +375,22 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); }
- if (r < 0) + if (r <= 0) return r;
if (rdev->needs_reset) return -EDEADLK; + + timeout = r; } - return 0; + return timeout; }
/** * radeon_fence_wait - wait for a fence to signal * * @fence: radeon fence object - * @intr: use interruptable sleep + * @intr: use interruptible sleep * * Wait for the requested fence to signal (all asics). * @intr selects whether to use interruptable (true) or non-interruptable @@ -395,7 +400,7 @@ static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq, int radeon_fence_wait(struct radeon_fence *fence, bool intr) { uint64_t seq[RADEON_NUM_RINGS] = {}; - int r; + long r;
if (fence == NULL) { WARN(1, "Querying an invalid fence : %p !\n", fence); @@ -406,9 +411,10 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ) return 0;
- r = radeon_fence_wait_seq(fence->rdev, seq, intr); - if (r) + r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { return r; + }
fence->seq = RADEON_FENCE_SIGNALED_SEQ; return 0; @@ -433,7 +439,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, { uint64_t seq[RADEON_NUM_RINGS]; unsigned i, num_rings = 0; - int r; + long r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) { seq[i] = 0; @@ -454,8 +460,8 @@ int radeon_fence_wait_any(struct radeon_device *rdev, if (num_rings == 0) return -ENOENT;
- r = radeon_fence_wait_seq(rdev, seq, intr); - if (r) { + r = radeon_fence_wait_seq_timeout(rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { return r; } return 0; @@ -474,6 +480,7 @@ int radeon_fence_wait_any(struct radeon_device *rdev, int radeon_fence_wait_next(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; + long r;
seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL; if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) { @@ -481,7 +488,10 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) already the last emited fence */ return -ENOENT; } - return radeon_fence_wait_seq(rdev, seq, false); + r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) + return r; + return 0; }
/** @@ -497,18 +507,18 @@ int radeon_fence_wait_next(struct radeon_device *rdev, int ring) int radeon_fence_wait_empty(struct radeon_device *rdev, int ring) { uint64_t seq[RADEON_NUM_RINGS] = {}; - int r; + long r;
seq[ring] = rdev->fence_drv[ring].sync_seq[ring]; if (!seq[ring]) return 0;
- r = radeon_fence_wait_seq(rdev, seq, false); - if (r) { + r = radeon_fence_wait_seq_timeout(rdev, seq, false, MAX_SCHEDULE_TIMEOUT); + if (r < 0) { if (r == -EDEADLK) return -EDEADLK;
- dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n", + dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%ld)\n", ring, r); } return 0;
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 61 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/
- bool needs_reset;
- bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..82633fdd399d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,34 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock);
if (!rdev->needs_reset) {
WARN_ON(rdev->in_reset);
up_write(&rdev->exclusive_lock); return 0; }
rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
- if (!rdev->in_reset) {
rdev->in_reset = true;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
That won't work correctly because you might end up with calling pm_resume more often than suspend and that can only lead to a crash. Saying this we probably already have a bug in the reset code at this point anyway, but see below.
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
}}
- }
- } else
memset(ring_data, 0, sizeof(ring_data));
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); @@ -1702,40 +1707,46 @@ retry:
radeon_restore_bios_scratch_regs(rdev);
We should resume PM here as well.
- if (!r) {
- if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]);
}ring_sizes[i] = 0; ring_data[i] = NULL;
- } else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
- }
- downgrade_write(&rdev->exclusive_lock);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
I would unlock the delayed_workqueue first and then downgrade the readlock.
- if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) {
saved = false;
/* if reset fails, try without saving data */
rdev->needs_reset = true; radeon_suspend(rdev);
goto retry;
up_read(&rdev->exclusive_lock);
}return -EAGAIN; }
- } else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}}
radeon_pm_resume(rdev);
Move this more up.
Alex is more into this, but it's probably a bug in the current reset code that this is after the IB tests, cause the IB tests needs everything powered up and with PM handling suspended it is possible that individual blocks are powered down.
Thanks, Christian.
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
up_write(&rdev->exclusive_lock);
- rdev->in_reset = false;
- up_read(&rdev->exclusive_lock); return r; }
This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- Changes since v1:
Changed order of resuming a bit according to ckoenig's feedback.
Reset handling seems just as unreliable as before this commit, but at least it restores mode correctly now. :-)
drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 64 ++++++++++++++++++++-------------- 2 files changed, 39 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/ - bool needs_reset; + bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..0e9541acccd5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,35 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock);
if (!rdev->needs_reset) { + WARN_ON(rdev->in_reset); up_write(&rdev->exclusive_lock); return 0; }
rdev->needs_reset = false; + resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
- radeon_save_bios_scratch_regs(rdev); /* block TTM */ - resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); radeon_pm_suspend(rdev); - radeon_suspend(rdev);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) { - ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], - &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(rdev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); + if (!rdev->in_reset) { + rdev->in_reset = true; + + radeon_save_bios_scratch_regs(rdev); + radeon_suspend(rdev); + + for (i = 0; i < RADEON_NUM_RINGS; ++i) { + ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], + &ring_data[i]); + if (ring_sizes[i]) { + saved = true; + dev_info(rdev->dev, "Saved %d dwords of commands " + "on ring %d.\n", ring_sizes[i], i); + } } - } + } else + memset(ring_data, 0, sizeof(ring_data));
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n"); @@ -1701,41 +1707,47 @@ retry: }
radeon_restore_bios_scratch_regs(rdev); + radeon_pm_resume(rdev);
- if (!r) { + if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; ring_data[i] = NULL; } + } else { + radeon_fence_driver_force_completion(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) { + kfree(ring_data[i]); + } + } + + drm_helper_resume_force_mode(rdev->ddev); + ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); + downgrade_write(&rdev->exclusive_lock); + + if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { - saved = false; + /* if reset fails, try without saving data */ + rdev->needs_reset = true; radeon_suspend(rdev); - goto retry; + up_read(&rdev->exclusive_lock); + return -EAGAIN; } } - } else { - radeon_fence_driver_force_completion(rdev); - for (i = 0; i < RADEON_NUM_RINGS; ++i) { - kfree(ring_data[i]); - } }
- radeon_pm_resume(rdev); - drm_helper_resume_force_mode(rdev->ddev); - - ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
- up_write(&rdev->exclusive_lock); + rdev->in_reset = false; + up_read(&rdev->exclusive_lock); return r; }
On Mon, Aug 18, 2014 at 11:02 AM, Christian König deathsimple@vodafone.de wrote:
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 61 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/
bool needs_reset;
bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..82633fdd399d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,34 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock); if (!rdev->needs_reset) {
WARN_ON(rdev->in_reset); up_write(&rdev->exclusive_lock); return 0; } rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
if (!rdev->in_reset) {
rdev->in_reset = true;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
That won't work correctly because you might end up with calling pm_resume more often than suspend and that can only lead to a crash. Saying this we probably already have a bug in the reset code at this point anyway, but see below.
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev,
&rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of
commands "
"on ring %d.\n", ring_sizes[i],
i);
} }
}
} else
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying tomemset(ring_data, 0, sizeof(ring_data));
resume\n"); @@ -1702,40 +1707,46 @@ retry: radeon_restore_bios_scratch_regs(rdev);
We should resume PM here as well.
if (!r) {
if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0; ring_data[i] = NULL; }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
downgrade_write(&rdev->exclusive_lock);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
I would unlock the delayed_workqueue first and then downgrade the readlock.
if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n",
r); if (saved) {
saved = false;
/* if reset fails, try without saving data
*/
rdev->needs_reset = true; radeon_suspend(rdev);
goto retry;
up_read(&rdev->exclusive_lock);
return -EAGAIN; } }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
} }
radeon_pm_resume(rdev);
Move this more up.
Alex is more into this, but it's probably a bug in the current reset code that this is after the IB tests, cause the IB tests needs everything powered up and with PM handling suspended it is possible that individual blocks are powered down.
Yeah, looks like a bug. I think the attached patch should fix it.
Alex
Thanks, Christian.
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
up_write(&rdev->exclusive_lock);
rdev->in_reset = false;
}up_read(&rdev->exclusive_lock); return r;
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Yeah, looks like a bug. I think the attached patch should fix it.
Sounds logical and the patch is Reviewed-by: Christian König christian.koenig@amd.com
Going to apply Maartens patch on top and test that one a bit to make sure it works as expected.
Regards, Christian.
Am 18.08.2014 um 18:03 schrieb Alex Deucher:
On Mon, Aug 18, 2014 at 11:02 AM, Christian König deathsimple@vodafone.de wrote:
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 61 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/
bool needs_reset;
bool needs_reset, in_reset; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..82633fdd399d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,34 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock); if (!rdev->needs_reset) {
WARN_ON(rdev->in_reset); up_write(&rdev->exclusive_lock); return 0; } rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
if (!rdev->in_reset) {
rdev->in_reset = true;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
That won't work correctly because you might end up with calling pm_resume more often than suspend and that can only lead to a crash. Saying this we probably already have a bug in the reset code at this point anyway, but see below.
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev,
&rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of
commands "
"on ring %d.\n", ring_sizes[i],
i);
} }
}
} else
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying tomemset(ring_data, 0, sizeof(ring_data));
resume\n"); @@ -1702,40 +1707,46 @@ retry: radeon_restore_bios_scratch_regs(rdev);
We should resume PM here as well.
if (!r) {
if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0; ring_data[i] = NULL; }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
downgrade_write(&rdev->exclusive_lock);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
I would unlock the delayed_workqueue first and then downgrade the readlock.
if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n",
r); if (saved) {
saved = false;
/* if reset fails, try without saving data
*/
rdev->needs_reset = true; radeon_suspend(rdev);
goto retry;
up_read(&rdev->exclusive_lock);
return -EAGAIN; } }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
} }
radeon_pm_resume(rdev);
Move this more up.
Alex is more into this, but it's probably a bug in the current reset code that this is after the IB tests, cause the IB tests needs everything powered up and with PM handling suspended it is possible that individual blocks are powered down.
Yeah, looks like a bug. I think the attached patch should fix it.
Alex
Thanks, Christian.
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
up_write(&rdev->exclusive_lock);
rdev->in_reset = false;
}up_read(&rdev->exclusive_lock); return r;
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Mon, Aug 18, 2014 at 12:07 PM, Christian König deathsimple@vodafone.de wrote:
Yeah, looks like a bug. I think the attached patch should fix it.
Sounds logical and the patch is Reviewed-by: Christian König christian.koenig@amd.com
Going to apply Maartens patch on top and test that one a bit to make sure it works as expected.
pushed my current -fixes queue to my drm-fixes-3.17-wip branch if that helps.
Alex
Regards, Christian.
Am 18.08.2014 um 18:03 schrieb Alex Deucher:
On Mon, Aug 18, 2014 at 11:02 AM, Christian König deathsimple@vodafone.de wrote:
Am 18.08.2014 um 16:45 schrieb Maarten Lankhorst:
This is needed for the next commit, because the lockup detection will need the read lock to run.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com
drivers/gpu/drm/radeon/radeon.h | 2 +- drivers/gpu/drm/radeon/radeon_device.c | 61 ++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..1d806983ec7b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -2312,7 +2312,7 @@ struct radeon_device { bool need_dma32; bool accel_working; bool fastfb_working; /* IGP feature*/
bool needs_reset;
bool needs_reset, in_reset; struct radeon_surface_reg
surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..82633fdd399d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1671,29 +1671,34 @@ int radeon_gpu_reset(struct radeon_device *rdev) down_write(&rdev->exclusive_lock); if (!rdev->needs_reset) {
WARN_ON(rdev->in_reset); up_write(&rdev->exclusive_lock); return 0; } rdev->needs_reset = false;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands
"
"on ring %d.\n", ring_sizes[i], i);
if (!rdev->in_reset) {
rdev->in_reset = true;
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
radeon_pm_suspend(rdev);
radeon_suspend(rdev);
That won't work correctly because you might end up with calling pm_resume more often than suspend and that can only lead to a crash. Saying this we probably already have a bug in the reset code at this point anyway, but see below.
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev,
&rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of
commands "
"on ring %d.\n", ring_sizes[i],
i);
} }
}
} else
-retry: r = radeon_asic_reset(rdev); if (!r) { dev_info(rdev->dev, "GPU reset succeeded, trying tomemset(ring_data, 0, sizeof(ring_data));
resume\n"); @@ -1702,40 +1707,46 @@ retry: radeon_restore_bios_scratch_regs(rdev);
We should resume PM here as well.
if (!r) {
if (!r && saved) { for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i],
ring_data[i]);
ring_sizes[i] = 0; ring_data[i] = NULL; }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
downgrade_write(&rdev->exclusive_lock);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
I would unlock the delayed_workqueue first and then downgrade the readlock.
if (!r) { r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed
(%d).\n", r); if (saved) {
saved = false;
/* if reset fails, try without saving
data */
rdev->needs_reset = true; radeon_suspend(rdev);
goto retry;
up_read(&rdev->exclusive_lock);
return -EAGAIN; } }
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
} }
radeon_pm_resume(rdev);
Move this more up.
Alex is more into this, but it's probably a bug in the current reset code that this is after the IB tests, cause the IB tests needs everything powered up and with PM handling suspended it is possible that individual blocks are powered down.
Yeah, looks like a bug. I think the attached patch should fix it.
Alex
Thanks, Christian.
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); if (r) { /* bad news, how to tell it to userspace ? */ dev_info(rdev->dev, "GPU reset failed\n"); }
up_write(&rdev->exclusive_lock);
rdev->in_reset = false;
}up_read(&rdev->exclusive_lock); return r;
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
Hey,
On 18-08-14 18:10, Alex Deucher wrote:
On Mon, Aug 18, 2014 at 12:07 PM, Christian König deathsimple@vodafone.de wrote:
Yeah, looks like a bug. I think the attached patch should fix it.
Sounds logical and the patch is Reviewed-by: Christian König christian.koenig@amd.com
Going to apply Maartens patch on top and test that one a bit to make sure it works as expected.
pushed my current -fixes queue to my drm-fixes-3.17-wip branch if that helps.
Thanks, maybe that fixes uvd on resume for me. :-)
I'll have to rework it to include the changes, but does resuming everything in the order of my v2 patch look sane? Then as a final act I'm downgrading to read, and run the tests.
~Maarten
On Mon, Aug 18, 2014 at 4:02 PM, Maarten Lankhorst maarten.lankhorst@canonical.com wrote:
Hey,
On 18-08-14 18:10, Alex Deucher wrote:
On Mon, Aug 18, 2014 at 12:07 PM, Christian König deathsimple@vodafone.de wrote:
Yeah, looks like a bug. I think the attached patch should fix it.
Sounds logical and the patch is Reviewed-by: Christian König christian.koenig@amd.com
Going to apply Maartens patch on top and test that one a bit to make sure it works as expected.
pushed my current -fixes queue to my drm-fixes-3.17-wip branch if that helps.
Thanks, maybe that fixes uvd on resume for me. :-)
I'll have to rework it to include the changes, but does resuming everything in the order of my v2 patch look sane? Then as a final act I'm downgrading to read, and run the tests.
Seems sane. Looks like we probably also need this attached patch as well to be on the safe side for displays. It would be nice to unify the suspend/resume and gpu_reset paths at some point.
Alex
dri-devel@lists.freedesktop.org