This is temporary until the fence framework can be used. With the lru/reservation atomicity removal it is possible to see your old sequence number and the buffer being reserved, leading to erroneously reporting -EDEADLK.
Workaround it by bumping the sequence number every retry.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 7b9364b..f2bd59e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -322,8 +322,8 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, int trycnt = 0; int ret, i;
- sequence = atomic_add_return(1, &drm->ttm.validate_sequence); retry: + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); if (++trycnt > 100000) { NV_ERROR(drm, "%s failed and gave up.\n", __func__); return -EINVAL;
There should no longer be assumptions that reserve will always succeed with the lru lock held, so we can safely break the whole atomic reserve/lru thing. As a bonus this fixes most lockdep annotations for reservations.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com Reviewed-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_bo.c | 54 ++++++++++++++++++++++------------ drivers/gpu/drm/ttm/ttm_execbuf_util.c | 2 +- include/drm/ttm/ttm_bo_driver.h | 19 +++--------- 3 files changed, 40 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9028327..61b5cd0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) return put_count; }
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence) { - struct ttm_bo_global *glob = bo->glob; int ret;
- while (unlikely(atomic_read(&bo->reserved) != 0)) { + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { /** * Deadlock avoidance for multi-bo reserving. */ @@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, if (no_wait) return -EBUSY;
- spin_unlock(&glob->lru_lock); ret = ttm_bo_wait_unreserved(bo, interruptible); - spin_lock(&glob->lru_lock);
if (unlikely(ret)) return ret; }
- atomic_set(&bo->reserved, 1); if (use_sequence) { + bool wake_up = false; /** * Wake up waiters that may need to recheck for deadlock, * if we decreased the sequence number. */ if (unlikely((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)) - wake_up_all(&bo->event_queue); + wake_up = true;
+ /* + * In the worst case with memory ordering these values can be + * seen in the wrong order. However since we call wake_up_all + * in that case, this will hopefully not pose a problem, + * and the worst case would only cause someone to accidentally + * hit -EAGAIN in ttm_bo_reserve when they see old value of + * val_seq. However this would only happen if seq_valid was + * written before val_seq was, and just means some slightly + * increased cpu usage + */ bo->val_seq = sequence; bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); } else { bo->seq_valid = false; } @@ -289,14 +298,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, int put_count = 0; int ret;
- spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, - sequence); - if (likely(ret == 0)) + ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence, + sequence); + if (likely(ret == 0)) { + spin_lock(&glob->lru_lock); put_count = ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); - - ttm_bo_list_ref_sub(bo, put_count, true); + spin_unlock(&glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); + }
return ret; } @@ -510,7 +519,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret;
spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -603,7 +612,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret;
spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
/* * We raced, and lost, someone else holds the reservation now, @@ -667,7 +676,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); }
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); + ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); + if (remove_all && ret) { + spin_unlock(&glob->lru_lock); + ret = ttm_bo_reserve_nolru(entry, false, false, + false, 0); + spin_lock(&glob->lru_lock); + } + if (!ret) ret = ttm_bo_cleanup_refs_and_unlock(entry, false, !remove_all); @@ -818,7 +834,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } @@ -1799,7 +1815,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = ttm_bo_reserve_locked(bo, false, true, false, 0); + ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); if (!ret) break; } diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index cd9e452..bd37b5c 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -153,7 +153,7 @@ retry: struct ttm_buffer_object *bo = entry->bo;
retry_this_bo: - ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: break; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e3a43a4..6fff432 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); * to make room for a buffer already reserved. (Buffers are reserved before * they are evicted). The following algorithm prevents such deadlocks from * occurring: - * 1) Buffers are reserved with the lru spinlock held. Upon successful - * reservation they are removed from the lru list. This stops a reserved buffer - * from being evicted. However the lru spinlock is released between the time - * a buffer is selected for eviction and the time it is reserved. - * Therefore a check is made when a buffer is reserved for eviction, that it - * is still the first buffer in the lru list, before it is removed from the - * list. @check_lru == 1 forces this check. If it fails, the function returns - * -EINVAL, and the caller should then choose a new buffer to evict and repeat - * the procedure. - * 2) Processes attempting to reserve multiple buffers other than for eviction, + * Processes attempting to reserve multiple buffers other than for eviction, * (typically execbuf), should first obtain a unique 32-bit * validation sequence number, * and call this function with @use_sequence == 1 and @sequence == the unique @@ -832,7 +823,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
/** - * ttm_bo_reserve_locked: + * ttm_bo_reserve_nolru: * * @bo: A pointer to a struct ttm_buffer_object. * @interruptible: Sleep interruptible if waiting. @@ -840,9 +831,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * @use_sequence: If @bo is already reserved, Only sleep waiting for * it to become unreserved if @sequence < (@bo)->sequence. * - * Must be called with struct ttm_bo_global::lru_lock held, - * and will not remove reserved buffers from the lru lists. - * The function may release the LRU spinlock if it needs to sleep. + * Will not remove reserved buffers from the lru lists. * Otherwise identical to ttm_bo_reserve. * * Returns: @@ -855,7 +844,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, * -EDEADLK: Bo already reserved using @sequence. This error code will only * be returned if @use_sequence is set to true. */ -extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, +extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence);
With the lru lock no longer required for protecting reservations we can just do a ttm_bo_reserve_nolru on -EBUSY, and handle all errors in a single path.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com Reviewed-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 53 ++++++++++++++-------------------- 1 file changed, 21 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index bd37b5c..c7d3236 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } }
-static int ttm_eu_wait_unreserved_locked(struct list_head *list, - struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->glob; - int ret; - - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - ret = ttm_bo_wait_unreserved(bo, true); - spin_lock(&glob->lru_lock); - if (unlikely(ret != 0)) - ttm_eu_backoff_reservation_locked(list); - return ret; -} - - void ttm_eu_backoff_reservation(struct list_head *list) { struct ttm_validate_buffer *entry; @@ -152,19 +136,23 @@ retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo;
-retry_this_bo: ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: break; case -EBUSY: - ret = ttm_eu_wait_unreserved_locked(list, bo); - if (unlikely(ret != 0)) { - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; - } - goto retry_this_bo; + ttm_eu_del_from_lru_locked(list); + spin_unlock(&glob->lru_lock); + ret = ttm_bo_reserve_nolru(bo, true, false, + true, val_seq); + spin_lock(&glob->lru_lock); + if (!ret) + break; + + if (unlikely(ret != -EAGAIN)) + goto err; + + /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); @@ -174,18 +162,13 @@ retry_this_bo: return ret; goto retry; default: - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; + goto err; }
entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return -EBUSY; + ret = -EBUSY; + goto err; } }
@@ -194,6 +177,12 @@ retry_this_bo: ttm_eu_list_ref_sub(list);
return 0; + +err: + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); + return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers);
Instead of dropping everything, waiting for the bo to be unreserved and trying over, a better strategy would be to do a blocking wait.
This can be mapped a lot better to a mutex_lock-like call.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com Reviewed-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_bo.c | 47 +++++++++++++++++++++++++++++++++++++++++ include/drm/ttm/ttm_bo_driver.h | 30 ++++++++++++++++++++++++++ 2 files changed, 77 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 61b5cd0..174b325 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo, return ret; }
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + bool wake_up = false; + int ret; + + while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) { + WARN_ON(bo->seq_valid && sequence == bo->val_seq); + + ret = ttm_bo_wait_unreserved(bo, interruptible); + + if (unlikely(ret)) + return ret; + } + + if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid) + wake_up = true; + + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + bo->val_seq = sequence; + bo->seq_valid = true; + if (wake_up) + wake_up_all(&bo->event_queue); + + return 0; +} + +int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence) +{ + struct ttm_bo_global *glob = bo->glob; + int put_count, ret; + + ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence); + if (likely(!ret)) { + spin_lock(&glob->lru_lock); + put_count = ttm_bo_del_from_lru(bo); + spin_unlock(&glob->lru_lock); + ttm_bo_list_ref_sub(bo, put_count, true); + } + return ret; +} +EXPORT_SYMBOL(ttm_bo_reserve_slowpath); + void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) { ttm_bo_add_to_lru(bo); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 6fff432..5af71af 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -821,6 +821,36 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo, bool interruptible, bool no_wait, bool use_sequence, uint32_t sequence);
+/** + * ttm_bo_reserve_slowpath_nolru: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + * + * Will not remove reserved buffers from the lru lists. + * Otherwise identical to ttm_bo_reserve_slowpath. + */ +extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo, + bool interruptible, + uint32_t sequence); + + +/** + * ttm_bo_reserve_slowpath: + * @bo: A pointer to a struct ttm_buffer_object. + * @interruptible: Sleep interruptible if waiting. + * @sequence: Set (@bo)->sequence to this value after lock + * + * This is called after ttm_bo_reserve returns -EAGAIN and we backed off + * from all our other reservations. Because there are no other reservations + * held by us, this function cannot deadlock any more. + */ +extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, + bool interruptible, uint32_t sequence);
/** * ttm_bo_reserve_nolru:
This requires re-use of the seqno, which increases fairness slightly. Instead of spinning with a new seqno every time we keep the current one, but still drop all other reservations we hold. Only when we succeed, we try to get back our other reservations again.
This should increase fairness slightly as well.
Changes since v1: - Increase val_seq before calling ttm_bo_reserve_slowpath_nolru and retrying to take all entries to prevent a race.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- drivers/gpu/drm/ttm/ttm_execbuf_util.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index c7d3236..7b90def 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -129,13 +129,17 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob;
-retry: spin_lock(&glob->lru_lock); val_seq = entry->bo->bdev->val_seq++;
+retry: list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo;
+ /* already slowpath reserved? */ + if (entry->reserved) + continue; + ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq); switch (ret) { case 0: @@ -155,11 +159,26 @@ retry: /* fallthrough */ case -EAGAIN: ttm_eu_backoff_reservation_locked(list); + + /* + * temporarily increase sequence number every retry, + * to prevent us from seeing our old reservation + * sequence when someone else reserved the buffer, + * but hasn't updated the seq_valid/seqno members yet. + */ + val_seq = entry->bo->bdev->val_seq++; + spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_unreserved(bo, true); + ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq); if (unlikely(ret != 0)) return ret; + spin_lock(&glob->lru_lock); + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ret = -EBUSY; + goto err; + } goto retry; default: goto err;
Similar rationale to the identical commit in drm/ttm. Instead of only waiting for unreservation, we make sure we actually own the reservation, then retry to get the rest.
Changes since v1: - Increase the seqno before calling ttm_bo_reserve_slowpath
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com --- drivers/gpu/drm/nouveau/nouveau_gem.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index f2bd59e..afcba1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -321,9 +321,10 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, uint32_t sequence; int trycnt = 0; int ret, i; + struct nouveau_bo *res_bo = NULL;
-retry: sequence = atomic_add_return(1, &drm->ttm.validate_sequence); +retry: if (++trycnt > 100000) { NV_ERROR(drm, "%s failed and gave up.\n", __func__); return -EINVAL; @@ -341,6 +342,11 @@ retry: return -ENOENT; } nvbo = gem->driver_private; + if (nvbo == res_bo) { + res_bo = NULL; + drm_gem_object_unreference_unlocked(gem); + continue; + }
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { NV_ERROR(drm, "multiple instances of buffer %d on " @@ -353,15 +359,19 @@ retry: ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); if (ret) { validate_fini(op, NULL); - if (unlikely(ret == -EAGAIN)) - ret = ttm_bo_wait_unreserved(&nvbo->bo, true); - drm_gem_object_unreference_unlocked(gem); + if (unlikely(ret == -EAGAIN)) { + sequence = atomic_add_return(1, &drm->ttm.validate_sequence); + ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, + sequence); + if (!ret) + res_bo = nvbo; + } if (unlikely(ret)) { + drm_gem_object_unreference_unlocked(gem); if (ret != -ERESTARTSYS) NV_ERROR(drm, "fail reserve\n"); return ret; } - goto retry; }
b->user_priv = (uint64_t)(unsigned long)nvbo; @@ -383,6 +393,8 @@ retry: validate_fini(op, NULL); return -EINVAL; } + if (nvbo == res_bo) + goto retry; }
return 0;
All legitimate users of this function outside ttm_bo.c are gone, now it's only an implementation detail.
Signed-off-by: Maarten Lankhorst maarten.lankhorst@canonical.com Reviewed-by: Jerome Glisse jglisse@redhat.com --- drivers/gpu/drm/ttm/ttm_bo.c | 4 ++-- include/drm/ttm/ttm_bo_driver.h | 12 ------------ 2 files changed, 2 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 174b325..fd78104 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref) ttm_mem_global_free(bdev->glob->mem_glob, acc_size); }
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) +static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, + bool interruptible) { if (interruptible) { return wait_event_interruptible(bo->event_queue, @@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) return 0; } } -EXPORT_SYMBOL(ttm_bo_wait_unreserved);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 5af71af..0fbd046 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -898,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo); */ extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
-/** - * ttm_bo_wait_unreserved - * - * @bo: A pointer to a struct ttm_buffer_object. - * - * Wait for a struct ttm_buffer_object to become unreserved. - * This is typically used in the execbuf code to relax cpu-usage when - * a potential deadlock condition backoff. - */ -extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, - bool interruptible); - /* * ttm_bo_util.c */
dri-devel@lists.freedesktop.org