At Thu, 4 Dec 2014 16:38:57 +0000, Chris Wilson wrote:
On Thu, Dec 04, 2014 at 04:31:15PM +0000, Chris Wilson wrote:
On Thu, Dec 04, 2014 at 01:28:39PM +0100, Daniel Vetter wrote:
On Thu, Dec 04, 2014 at 11:51:14AM +0000, Chris Wilson wrote:
On Thu, Dec 04, 2014 at 11:56:42AM +0100, Takashi Iwai wrote:
Signed-off-by: Takashi Iwai tiwai@suse.de
drivers/gpu/drm/drm_fops.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index ed7bc68f7e87..a82dc28d54f3 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -525,6 +525,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer, if (copy_to_user(buffer + total, e->event, e->event->length)) { total = -EFAULT;
e->destroy(e);
We shouldn't just be throwing away the event here, but put the event back at the front of the queue. Poses an interesting race issue. Seems like we want to hold the spinlock until the copy is complete so that we can fix up the failure correctly.
I've read the manpage for read and it explicitly states that when you get an error it's undefined what happens to the read position. Since -EFAULT is really just a userspace bug I think we can happily drop the event on the floor, no reason to bend over in the kernel.
Hmm. Actually the code is buggy is the provided buffer is too short for the first event in O_NONBLOCK mode.
Just inlining drm_dequeue_event is simple enough to fix the issue:
Right, this avoids the dequeuing at error.
But still there is a race between wait_event_interruptible() and dequeuing. So, the more comprehensive code should look like:
if (!access_ok(VERIFY_WRITE, buffer, count)) return -EFAULT;
spin_lock_irq(&dev->event_lock); for (;;) { if (list_empty(&file_priv->event_list)) { if (ret) break; if (!(filp->f_flags & O_NONBLOCK)) { ret = -EAGAIN; break; } spin_unlock_irq(&dev->event_lock); ret = wait_event_interruptible(....); spin_lock_irq(&dev->event_lock); if (ret < 0) break; ret = 0; continue; }
e = list_first_entry(&file_priv->event_list, struct drm_pending_event, link); if (e->event->length + ret > count) break;
if (__copy_to_user_inatomic(buffer + ret, e->event, e->event->length)) { if (!ret) ret = -EFAULT; break; }
file_priv->event_space += e->event->length; ret += e->event->length; list_del(&e->link); e->destroy(e); }
spin_unlock_irq(&dev->event_lock); return ret;
thanks,
Takashi
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 91e1105..7af6676 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -478,43 +478,17 @@ int drm_release(struct inode *inode, struct file *filp) } EXPORT_SYMBOL(drm_release);
-static bool -drm_dequeue_event(struct drm_file *file_priv,
size_t total, size_t max, struct drm_pending_event **out)
-{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e;
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&dev->event_lock, flags);
*out = NULL;
if (list_empty(&file_priv->event_list))
goto out;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + total > max)
goto out;
file_priv->event_space += e->event->length;
list_del(&e->link);
*out = e;
ret = true;
-out:
spin_unlock_irqrestore(&dev->event_lock, flags);
return ret;
-}
ssize_t drm_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset) { struct drm_file *file_priv = filp->private_data;
struct drm_pending_event *e;
size_t total;
struct drm_device *dev = file_priv->minor->dev;
unsigned long flags; ssize_t ret;
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
if ((filp->f_flags & O_NONBLOCK) == 0) { ret = wait_event_interruptible(file_priv->event_wait, !list_empty(&file_priv->event_list));
@@ -522,19 +496,35 @@ ssize_t drm_read(struct file *filp, char __user *buffer, return ret; }
total = 0;
while (drm_dequeue_event(file_priv, total, count, &e)) {
if (copy_to_user(buffer + total,
e->event, e->event->length)) {
total = -EFAULT;
break;
}
spin_lock_irqsave(&dev->event_lock, flags);
if (list_empty(&file_priv->event_list)) {
ret = -EAGAIN;
} else {
ret = 0;
do {
struct drm_pending_event *e;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + ret > count)
break;
if (__copy_to_user_inatomic(buffer + ret,
e->event, e->event->length)) {
if (ret == 0)
ret = -EFAULT;
break;
}
total += e->event->length;
e->destroy(e);
file_priv->event_space += e->event->length;
ret += e->event->length;
list_del(&e->link);
e->destroy(e);
} while (!list_empty(&file_priv->event_list)); }
spin_unlock_irqrestore(&dev->event_lock, flags);
return total ?: -EAGAIN;
return ret;
} EXPORT_SYMBOL(drm_read);
which looks familar to the previous attempt to get O_NONBLOCK fixed. -Chris
-- Chris Wilson, Intel Open Source Technology Centre