2011/7/13 Michel Dänzer michel@daenzer.net:
From: Michel Dänzer michel.daenzer@amd.com
Use the fence of the new frontbuffer, if any.
Generating a new fence could cause us to wait for completely unrelated rendering to finish before performing the flip.
Signed-off-by: Michel Dänzer michel.daenzer@amd.com
Reviewed-by: Alex Deucher alexander.deucher@amd.com
v2:
- Rebase against drm-core-next.
- Leave radeon_fence_unref() call in error path, or we leak the fence there.
drivers/gpu/drm/radeon/radeon_display.c | 33 +++++------------------------- 1 files changed, 6 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 292f73f..ffce85a 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -282,7 +282,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->unpin_work; if (work == NULL ||
- !radeon_fence_signaled(work->fence)) {
- (work->fence && !radeon_fence_signaled(work->fence))) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); return; } @@ -348,7 +348,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, struct radeon_framebuffer *new_radeon_fb; struct drm_gem_object *obj; struct radeon_bo *rbo;
- struct radeon_fence *fence;
struct radeon_unpin_work *work; unsigned long flags; u32 tiling_flags, pitch_pixels; @@ -359,16 +358,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, if (work == NULL) return -ENOMEM;
- r = radeon_fence_create(rdev, &fence);
- if (unlikely(r != 0)) {
- kfree(work);
- DRM_ERROR("flip queue: failed to create fence.\n");
- return -ENOMEM;
- }
work->event = event; work->rdev = rdev; work->crtc_id = radeon_crtc->crtc_id;
- work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb); new_radeon_fb = to_radeon_framebuffer(fb); /* schedule unpin of the old buffer */ @@ -377,6 +369,10 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(obj); rbo = gem_to_radeon_bo(obj); work->old_rbo = rbo;
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
- if (rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */ @@ -391,9 +387,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", work->old_rbo, rbo);
@@ -461,25 +454,11 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, goto pflip_cleanup1; }
- /* 32 ought to cover us */
- r = radeon_ring_lock(rdev, 32);
- if (r) {
- DRM_ERROR("failed to lock the ring before flip\n");
- goto pflip_cleanup2;
- }
- /* emit the fence */
- radeon_fence_emit(rdev, fence);
/* set the proper interrupt */ radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
- /* fire the ring */
- radeon_ring_unlock_commit(rdev);
return 0;
-pflip_cleanup2:
- drm_vblank_put(dev, radeon_crtc->crtc_id);
pflip_cleanup1: r = radeon_bo_reserve(rbo, false); if (unlikely(r != 0)) { @@ -501,7 +480,7 @@ pflip_cleanup: unlock_free: drm_gem_object_unreference_unlocked(old_radeon_fb->obj); spin_unlock_irqrestore(&dev->event_lock, flags);
- radeon_fence_unref(&fence);
- radeon_fence_unref(&work->fence);
kfree(work);
return r;
1.7.5.4
dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel