Daniel pointed me towards this function and there are multiple obvious problems in the implementation.
First of all the retry loop is not working as intended. In general the retry makes only sense if you grab the reference first and then check the retry. Then we skipped checking the exclusive fence when shared fences were present. And last the whole implementation was unnecessary complex and rather hard to understand which could lead to probably unexpected behavior of the IOCTL.
Fix all this by reworking the implementation from scratch.
Only mildly tested and needs a thoughtful review of the code.
Signed-off-by: Christian König christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 132 +++++++++++++++----------------------- include/linux/dma-buf.h | 2 +- 2 files changed, 54 insertions(+), 80 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 511fe0d217a0..1bd00e18291f 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -72,7 +72,7 @@ static void dma_buf_release(struct dentry *dentry) * If you hit this BUG() it means someone dropped their ref to the * dma-buf while still having pending operation to the buffer. */ - BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); + BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dmabuf->ops->release(dmabuf);
@@ -206,12 +206,15 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
static __poll_t dma_buf_poll(struct file *file, poll_table *poll) { + struct dma_buf_poll_cb_t *dcb; struct dma_buf *dmabuf; struct dma_resv *resv; struct dma_resv_list *fobj; struct dma_fence *fence_excl; - __poll_t events; unsigned shared_count, seq; + struct dma_fence *fence; + __poll_t events; + int r, i;
dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv) @@ -225,99 +228,70 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) if (!events) return 0;
+ dcb = events & EPOLLOUT ? &dmabuf->cb_out : &dmabuf->cb_in; + + /* Only queue a new one if we are not still waiting for the old one */ + spin_lock_irq(&dmabuf->poll.lock); + if (dcb->active) + events = 0; + else + dcb->active = events; + spin_unlock_irq(&dmabuf->poll.lock); + if (!events) + return 0; + retry: seq = read_seqcount_begin(&resv->seq); rcu_read_lock();
fobj = rcu_dereference(resv->fence); - if (fobj) + if (fobj && events & EPOLLOUT) shared_count = fobj->shared_count; else shared_count = 0; - fence_excl = dma_resv_excl_fence(resv); - if (read_seqcount_retry(&resv->seq, seq)) { - rcu_read_unlock(); - goto retry; - }
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl; - __poll_t pevents = EPOLLIN; - - if (shared_count == 0) - pevents |= EPOLLOUT; - - spin_lock_irq(&dmabuf->poll.lock); - if (dcb->active) { - dcb->active |= pevents; - events &= ~pevents; - } else - dcb->active = pevents; - spin_unlock_irq(&dmabuf->poll.lock); - - if (events & pevents) { - if (!dma_fence_get_rcu(fence_excl)) { - /* force a recheck */ - events &= ~pevents; - dma_buf_poll_cb(NULL, &dcb->cb); - } else if (!dma_fence_add_callback(fence_excl, &dcb->cb, - dma_buf_poll_cb)) { - events &= ~pevents; - dma_fence_put(fence_excl); - } else { - /* - * No callback queued, wake up any additional - * waiters. - */ - dma_fence_put(fence_excl); - dma_buf_poll_cb(NULL, &dcb->cb); - } + for (i = 0; i < shared_count; ++i) { + fence = rcu_dereference(fobj->shared[i]); + fence = dma_fence_get_rcu(fence); + if (!fence || read_seqcount_retry(&resv->seq, seq)) { + /* Concurrent modify detected, force re-check */ + dma_fence_put(fence); + rcu_read_unlock(); + goto retry; } - }
- if ((events & EPOLLOUT) && shared_count > 0) { - struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared; - int i; - - /* Only queue a new callback if no event has fired yet */ - spin_lock_irq(&dmabuf->poll.lock); - if (dcb->active) - events &= ~EPOLLOUT; - else - dcb->active = EPOLLOUT; - spin_unlock_irq(&dmabuf->poll.lock); - - if (!(events & EPOLLOUT)) + r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); + dma_fence_put(fence); + if (!r) { + /* Callback queued */ + events = 0; goto out; + } + }
- for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence = rcu_dereference(fobj->shared[i]); - - if (!dma_fence_get_rcu(fence)) { - /* - * fence refcount dropped to zero, this means - * that fobj has been freed - * - * call dma_buf_poll_cb and force a recheck! - */ - events &= ~EPOLLOUT; - dma_buf_poll_cb(NULL, &dcb->cb); - break; - } - if (!dma_fence_add_callback(fence, &dcb->cb, - dma_buf_poll_cb)) { - dma_fence_put(fence); - events &= ~EPOLLOUT; - break; - } + fence = dma_resv_excl_fence(resv); + if (fence) { + fence = dma_fence_get_rcu(fence); + if (!fence || read_seqcount_retry(&resv->seq, seq)) { + /* Concurrent modify detected, force re-check */ dma_fence_put(fence); + rcu_read_unlock(); + goto retry; + }
- /* No callback queued, wake up any additional waiters. */ - if (i == shared_count) - dma_buf_poll_cb(NULL, &dcb->cb); + r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); + dma_fence_put(fence_excl); + if (!r) { + /* Callback queued */ + events = 0; + goto out; + } }
+ /* No callback queued, wake up any additional waiters. */ + dma_buf_poll_cb(NULL, &dcb->cb); + out: rcu_read_unlock(); return events; @@ -562,8 +536,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->owner = exp_info->owner; spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll); - dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll; - dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0; + dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll; + dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) { resv = (struct dma_resv *)&dmabuf[1]; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index efdc56b9d95f..7e747ad54c81 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -329,7 +329,7 @@ struct dma_buf { wait_queue_head_t *poll;
__poll_t active; - } cb_excl, cb_shared; + } cb_in, cb_out; };
/**
Hi "Christian,
I love your patch! Perhaps something to improve:
[auto build test WARNING on next-20210616] [cannot apply to tegra-drm/drm/tegra/for-next linus/master v5.13-rc6 v5.13-rc5 v5.13-rc4 v5.13-rc6] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Christian-K-nig/dma-buf-fix-and-rew... base: c7d4c1fd91ab4a6d2620497921a9c6bf54650ab8 config: x86_64-randconfig-r022-20210617 (attached as .config) compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 64720f57bea6a6bf033feef4a5751ab9c0c3b401) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/dfa9f2ec4c082b73e644e2c565e58e2291f9... git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Christian-K-nig/dma-buf-fix-and-rework-dma_buf_poll/20210617-103036 git checkout dfa9f2ec4c082b73e644e2c565e58e2291f94463 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot lkp@intel.com
All warnings (new ones prefixed by >>):
drivers/dma-buf/dma-buf.c:284:17: warning: variable 'fence_excl' is uninitialized when used here [-Wuninitialized]
dma_fence_put(fence_excl); ^~~~~~~~~~ drivers/dma-buf/dma-buf.c:213:30: note: initialize the variable 'fence_excl' to silence this warning struct dma_fence *fence_excl; ^ = NULL 1 warning generated.
vim +/fence_excl +284 drivers/dma-buf/dma-buf.c
206 207 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 208 { 209 struct dma_buf_poll_cb_t *dcb; 210 struct dma_buf *dmabuf; 211 struct dma_resv *resv; 212 struct dma_resv_list *fobj; 213 struct dma_fence *fence_excl; 214 unsigned shared_count, seq; 215 struct dma_fence *fence; 216 __poll_t events; 217 int r, i; 218 219 dmabuf = file->private_data; 220 if (!dmabuf || !dmabuf->resv) 221 return EPOLLERR; 222 223 resv = dmabuf->resv; 224 225 poll_wait(file, &dmabuf->poll, poll); 226 227 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 228 if (!events) 229 return 0; 230 231 dcb = events & EPOLLOUT ? &dmabuf->cb_out : &dmabuf->cb_in; 232 233 /* Only queue a new one if we are not still waiting for the old one */ 234 spin_lock_irq(&dmabuf->poll.lock); 235 if (dcb->active) 236 events = 0; 237 else 238 dcb->active = events; 239 spin_unlock_irq(&dmabuf->poll.lock); 240 if (!events) 241 return 0; 242 243 retry: 244 seq = read_seqcount_begin(&resv->seq); 245 rcu_read_lock(); 246 247 fobj = rcu_dereference(resv->fence); 248 if (fobj && events & EPOLLOUT) 249 shared_count = fobj->shared_count; 250 else 251 shared_count = 0; 252 253 for (i = 0; i < shared_count; ++i) { 254 fence = rcu_dereference(fobj->shared[i]); 255 fence = dma_fence_get_rcu(fence); 256 if (!fence || read_seqcount_retry(&resv->seq, seq)) { 257 /* Concurrent modify detected, force re-check */ 258 dma_fence_put(fence); 259 rcu_read_unlock(); 260 goto retry; 261 } 262 263 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 264 dma_fence_put(fence); 265 if (!r) { 266 /* Callback queued */ 267 events = 0; 268 goto out; 269 } 270 } 271 272 fence = dma_resv_excl_fence(resv); 273 if (fence) { 274 fence = dma_fence_get_rcu(fence); 275 if (!fence || read_seqcount_retry(&resv->seq, seq)) { 276 /* Concurrent modify detected, force re-check */ 277 dma_fence_put(fence); 278 rcu_read_unlock(); 279 goto retry; 280 281 } 282 283 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
284 dma_fence_put(fence_excl);
285 if (!r) { 286 /* Callback queued */ 287 events = 0; 288 goto out; 289 } 290 } 291 292 /* No callback queued, wake up any additional waiters. */ 293 dma_buf_poll_cb(NULL, &dcb->cb); 294 295 out: 296 rcu_read_unlock(); 297 return events; 298 } 299
--- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Hi "Christian,
I love your patch! Perhaps something to improve:
[auto build test WARNING on next-20210616] [cannot apply to tegra-drm/drm/tegra/for-next linus/master v5.13-rc6 v5.13-rc5 v5.13-rc4 v5.13-rc6] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Christian-K-nig/dma-buf-fix-and-rew... base: c7d4c1fd91ab4a6d2620497921a9c6bf54650ab8 config: s390-randconfig-r022-20210617 (attached as .config) compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 64720f57bea6a6bf033feef4a5751ab9c0c3b401) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install s390 cross compiling tool for clang build # apt-get install binutils-s390x-linux-gnu # https://github.com/0day-ci/linux/commit/dfa9f2ec4c082b73e644e2c565e58e2291f9... git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Christian-K-nig/dma-buf-fix-and-rework-dma_buf_poll/20210617-103036 git checkout dfa9f2ec4c082b73e644e2c565e58e2291f94463 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=s390
If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot lkp@intel.com
All warnings (new ones prefixed by >>):
drivers/dma-buf/dma-buf.c:284:17: warning: variable 'fence_excl' is uninitialized when used here [-Wuninitialized]
dma_fence_put(fence_excl); ^~~~~~~~~~ drivers/dma-buf/dma-buf.c:213:30: note: initialize the variable 'fence_excl' to silence this warning struct dma_fence *fence_excl; ^ = NULL 1 warning generated.
vim +/fence_excl +284 drivers/dma-buf/dma-buf.c
206 207 static __poll_t dma_buf_poll(struct file *file, poll_table *poll) 208 { 209 struct dma_buf_poll_cb_t *dcb; 210 struct dma_buf *dmabuf; 211 struct dma_resv *resv; 212 struct dma_resv_list *fobj; 213 struct dma_fence *fence_excl; 214 unsigned shared_count, seq; 215 struct dma_fence *fence; 216 __poll_t events; 217 int r, i; 218 219 dmabuf = file->private_data; 220 if (!dmabuf || !dmabuf->resv) 221 return EPOLLERR; 222 223 resv = dmabuf->resv; 224 225 poll_wait(file, &dmabuf->poll, poll); 226 227 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT); 228 if (!events) 229 return 0; 230 231 dcb = events & EPOLLOUT ? &dmabuf->cb_out : &dmabuf->cb_in; 232 233 /* Only queue a new one if we are not still waiting for the old one */ 234 spin_lock_irq(&dmabuf->poll.lock); 235 if (dcb->active) 236 events = 0; 237 else 238 dcb->active = events; 239 spin_unlock_irq(&dmabuf->poll.lock); 240 if (!events) 241 return 0; 242 243 retry: 244 seq = read_seqcount_begin(&resv->seq); 245 rcu_read_lock(); 246 247 fobj = rcu_dereference(resv->fence); 248 if (fobj && events & EPOLLOUT) 249 shared_count = fobj->shared_count; 250 else 251 shared_count = 0; 252 253 for (i = 0; i < shared_count; ++i) { 254 fence = rcu_dereference(fobj->shared[i]); 255 fence = dma_fence_get_rcu(fence); 256 if (!fence || read_seqcount_retry(&resv->seq, seq)) { 257 /* Concurrent modify detected, force re-check */ 258 dma_fence_put(fence); 259 rcu_read_unlock(); 260 goto retry; 261 } 262 263 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb); 264 dma_fence_put(fence); 265 if (!r) { 266 /* Callback queued */ 267 events = 0; 268 goto out; 269 } 270 } 271 272 fence = dma_resv_excl_fence(resv); 273 if (fence) { 274 fence = dma_fence_get_rcu(fence); 275 if (!fence || read_seqcount_retry(&resv->seq, seq)) { 276 /* Concurrent modify detected, force re-check */ 277 dma_fence_put(fence); 278 rcu_read_unlock(); 279 goto retry; 280 281 } 282 283 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
284 dma_fence_put(fence_excl);
285 if (!r) { 286 /* Callback queued */ 287 events = 0; 288 goto out; 289 } 290 } 291 292 /* No callback queued, wake up any additional waiters. */ 293 dma_buf_poll_cb(NULL, &dcb->cb); 294 295 out: 296 rcu_read_unlock(); 297 return events; 298 } 299
--- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
On Tue, Jun 15, 2021 at 01:21:17PM +0200, Christian König wrote:
Daniel pointed me towards this function and there are multiple obvious problems in the implementation.
First of all the retry loop is not working as intended. In general the retry makes only sense if you grab the reference first and then check the retry. Then we skipped checking the exclusive fence when shared fences were present. And last the whole implementation was unnecessary complex and rather hard to understand which could lead to probably unexpected behavior of the IOCTL.
Fix all this by reworking the implementation from scratch.
Can't we split this a bit?
The other thing I'm wondering, instead of open-coding this and breaking our heads trying to make sure we got it right. Can't we reuse dma_resv_get_fences? That's what a lot of drivers use already to get a consistent copy of the fence set without holding the lock.
I think then the actual semantics, i.e. do we need to include the exclusive fence or not, stick out more. -Daniel
Only mildly tested and needs a thoughtful review of the code.
Signed-off-by: Christian König christian.koenig@amd.com
drivers/dma-buf/dma-buf.c | 132 +++++++++++++++----------------------- include/linux/dma-buf.h | 2 +- 2 files changed, 54 insertions(+), 80 deletions(-)
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 511fe0d217a0..1bd00e18291f 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -72,7 +72,7 @@ static void dma_buf_release(struct dentry *dentry) * If you hit this BUG() it means someone dropped their ref to the * dma-buf while still having pending operation to the buffer. */
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dmabuf->ops->release(dmabuf);
@@ -206,12 +206,15 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
static __poll_t dma_buf_poll(struct file *file, poll_table *poll) {
- struct dma_buf_poll_cb_t *dcb; struct dma_buf *dmabuf; struct dma_resv *resv; struct dma_resv_list *fobj; struct dma_fence *fence_excl;
- __poll_t events; unsigned shared_count, seq;
struct dma_fence *fence;
__poll_t events;
int r, i;
dmabuf = file->private_data; if (!dmabuf || !dmabuf->resv)
@@ -225,99 +228,70 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll) if (!events) return 0;
- dcb = events & EPOLLOUT ? &dmabuf->cb_out : &dmabuf->cb_in;
- /* Only queue a new one if we are not still waiting for the old one */
- spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active)
events = 0;
- else
dcb->active = events;
- spin_unlock_irq(&dmabuf->poll.lock);
- if (!events)
return 0;
retry: seq = read_seqcount_begin(&resv->seq); rcu_read_lock();
fobj = rcu_dereference(resv->fence);
- if (fobj)
- if (fobj && events & EPOLLOUT) shared_count = fobj->shared_count; else shared_count = 0;
fence_excl = dma_resv_excl_fence(resv);
if (read_seqcount_retry(&resv->seq, seq)) {
rcu_read_unlock();
goto retry;
}
if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
__poll_t pevents = EPOLLIN;
if (shared_count == 0)
pevents |= EPOLLOUT;
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active) {
dcb->active |= pevents;
events &= ~pevents;
} else
dcb->active = pevents;
spin_unlock_irq(&dmabuf->poll.lock);
if (events & pevents) {
if (!dma_fence_get_rcu(fence_excl)) {
/* force a recheck */
events &= ~pevents;
dma_buf_poll_cb(NULL, &dcb->cb);
} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
dma_buf_poll_cb)) {
events &= ~pevents;
dma_fence_put(fence_excl);
} else {
/*
* No callback queued, wake up any additional
* waiters.
*/
dma_fence_put(fence_excl);
dma_buf_poll_cb(NULL, &dcb->cb);
}
- for (i = 0; i < shared_count; ++i) {
fence = rcu_dereference(fobj->shared[i]);
fence = dma_fence_get_rcu(fence);
if (!fence || read_seqcount_retry(&resv->seq, seq)) {
/* Concurrent modify detected, force re-check */
dma_fence_put(fence);
rcu_read_unlock();
}goto retry;
}
if ((events & EPOLLOUT) && shared_count > 0) {
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
int i;
/* Only queue a new callback if no event has fired yet */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
events &= ~EPOLLOUT;
else
dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
if (!(events & EPOLLOUT))
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
dma_fence_put(fence);
if (!r) {
/* Callback queued */
events = 0; goto out;
}
- }
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
if (!dma_fence_get_rcu(fence)) {
/*
* fence refcount dropped to zero, this means
* that fobj has been freed
*
* call dma_buf_poll_cb and force a recheck!
*/
events &= ~EPOLLOUT;
dma_buf_poll_cb(NULL, &dcb->cb);
break;
}
if (!dma_fence_add_callback(fence, &dcb->cb,
dma_buf_poll_cb)) {
dma_fence_put(fence);
events &= ~EPOLLOUT;
break;
}
- fence = dma_resv_excl_fence(resv);
- if (fence) {
fence = dma_fence_get_rcu(fence);
if (!fence || read_seqcount_retry(&resv->seq, seq)) {
/* Concurrent modify detected, force re-check */ dma_fence_put(fence);
rcu_read_unlock();
goto retry;
- }
/* No callback queued, wake up any additional waiters. */
if (i == shared_count)
dma_buf_poll_cb(NULL, &dcb->cb);
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
dma_fence_put(fence_excl);
if (!r) {
/* Callback queued */
events = 0;
goto out;
}
}
/* No callback queued, wake up any additional waiters. */
dma_buf_poll_cb(NULL, &dcb->cb);
out: rcu_read_unlock(); return events; @@ -562,8 +536,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) dmabuf->owner = exp_info->owner; spin_lock_init(&dmabuf->name_lock); init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) { resv = (struct dma_resv *)&dmabuf[1];
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index efdc56b9d95f..7e747ad54c81 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -329,7 +329,7 @@ struct dma_buf { wait_queue_head_t *poll;
__poll_t active;
- } cb_excl, cb_shared;
- } cb_in, cb_out;
};
/**
2.25.1
dri-devel@lists.freedesktop.org