There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
Signed-off-by: David Herrmann dh.herrmann@gmail.com --- drivers/gpu/drm/drm_crtc.c | 34 ++++++++++++++++++++-------------- include/drm/drm_crtc.h | 4 ++-- 2 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 61b6978..97eba56 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -283,8 +283,10 @@ static int drm_mode_object_get_reg(struct drm_device *dev, { int ret;
- mutex_lock(&dev->mode_config.idr_mutex); - ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); + idr_preload(GFP_KERNEL); + spin_lock_irq(&dev->mode_config.idr_lock); + + ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT); if (ret >= 0) { /* * Set up the object linking under the protection of the idr @@ -293,7 +295,9 @@ static int drm_mode_object_get_reg(struct drm_device *dev, obj->id = ret; obj->type = obj_type; } - mutex_unlock(&dev->mode_config.idr_mutex); + + spin_unlock_irq(&dev->mode_config.idr_lock); + idr_preload_end();
return ret < 0 ? ret : 0; } @@ -322,9 +326,9 @@ int drm_mode_object_get(struct drm_device *dev, static void drm_mode_object_register(struct drm_device *dev, struct drm_mode_object *obj) { - mutex_lock(&dev->mode_config.idr_mutex); + spin_lock_irq(&dev->mode_config.idr_lock); idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); - mutex_unlock(&dev->mode_config.idr_mutex); + spin_unlock_irq(&dev->mode_config.idr_lock); }
/** @@ -339,17 +343,18 @@ static void drm_mode_object_register(struct drm_device *dev, void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) { - mutex_lock(&dev->mode_config.idr_mutex); + spin_lock_irq(&dev->mode_config.idr_lock); idr_remove(&dev->mode_config.crtc_idr, object->id); - mutex_unlock(&dev->mode_config.idr_mutex); + spin_unlock_irq(&dev->mode_config.idr_lock); }
static struct drm_mode_object *_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; + unsigned long flags;
- mutex_lock(&dev->mode_config.idr_mutex); + spin_lock_irqsave(&dev->mode_config.idr_lock, flags); obj = idr_find(&dev->mode_config.crtc_idr, id); if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) obj = NULL; @@ -358,7 +363,7 @@ static struct drm_mode_object *_object_find(struct drm_device *dev, /* don't leak out unref'd fb's */ if (obj && (obj->type == DRM_MODE_OBJECT_FB)) obj = NULL; - mutex_unlock(&dev->mode_config.idr_mutex); + spin_unlock_irqrestore(&dev->mode_config.idr_lock, flags);
return obj; } @@ -433,9 +438,9 @@ EXPORT_SYMBOL(drm_framebuffer_init); static void __drm_framebuffer_unregister(struct drm_device *dev, struct drm_framebuffer *fb) { - mutex_lock(&dev->mode_config.idr_mutex); + spin_lock_irq(&dev->mode_config.idr_lock); idr_remove(&dev->mode_config.crtc_idr, fb->base.id); - mutex_unlock(&dev->mode_config.idr_mutex); + spin_unlock_irq(&dev->mode_config.idr_lock);
fb->base.id = 0; } @@ -465,14 +470,15 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, { struct drm_mode_object *obj = NULL; struct drm_framebuffer *fb; + unsigned long flags;
- mutex_lock(&dev->mode_config.idr_mutex); + spin_lock_irqsave(&dev->mode_config.idr_lock, flags); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id)) fb = NULL; else fb = obj_to_fb(obj); - mutex_unlock(&dev->mode_config.idr_mutex); + spin_unlock_irqrestore(&dev->mode_config.idr_lock, flags);
return fb; } @@ -5049,7 +5055,7 @@ void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); - mutex_init(&dev->mode_config.idr_mutex); + spin_lock_init(&dev->mode_config.idr_lock); mutex_init(&dev->mode_config.fb_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 77d9763..9c57b56 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -742,7 +742,7 @@ struct drm_mode_group { /** * drm_mode_config - Mode configuration control structure * @mutex: mutex protecting KMS related lists and structures - * @idr_mutex: mutex for KMS ID allocation and management + * @idr_lock: lock for KMS ID allocation and management * @crtc_idr: main KMS ID tracking object * @num_fb: number of fbs available * @fb_list: list of framebuffers available @@ -772,7 +772,7 @@ struct drm_mode_config { struct mutex mutex; /* protects configuration (mode lists etc.) */ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ - struct mutex idr_mutex; /* for IDR management */ + spinlock_t idr_lock; /* for IDR management */ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ /* this is limited to one for now */
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races: * If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc. * possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com --- drivers/gpu/drm/drm_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 97eba56..ab0fe6d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -286,7 +286,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev, idr_preload(GFP_KERNEL); spin_lock_irq(&dev->mode_config.idr_lock);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT); + ret = idr_alloc_cyclic(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT); if (ret >= 0) { /* * Set up the object linking under the protection of the idr
On Fri, Aug 29, 2014 at 02:01:01PM +0200, David Herrmann wrote:
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races:
- If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc.
- possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com
So userspace just needs to cycle through piles of framebuffer objects to make bad things happen? Doesn't sound like a terribly solid plan.
I guess we could save this by doing normal id allocations for fbs and monotonically increasing allocations for all other objects. -Daniel
drivers/gpu/drm/drm_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 97eba56..ab0fe6d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -286,7 +286,7 @@ static int drm_mode_object_get_reg(struct drm_device *dev, idr_preload(GFP_KERNEL); spin_lock_irq(&dev->mode_config.idr_lock);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT);
- ret = idr_alloc_cyclic(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT); if (ret >= 0) { /*
- Set up the object linking under the protection of the idr
-- 2.1.0
Hi
On Fri, Aug 29, 2014 at 2:51 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:01PM +0200, David Herrmann wrote:
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races:
- If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc.
- possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com
So userspace just needs to cycle through piles of framebuffer objects to make bad things happen? Doesn't sound like a terribly solid plan.
IDs will still get recycled, but only once all IDs got used. So this is "safe".
Sure, user-space can create 2 billion framebuffers and destroy them again, thus causing the ID range to overflow and recycle old IDs. Not sure how fast you can run 2 billion syscalls.. If that's a real issue, I'd vote for using the high-range for user-space managed objects, low-range for kernel-space managed objects ([1...INT_MAX] and [INT_MAX+1...UINT_MAX] or so).
I guess we could save this by doing normal id allocations for fbs and monotonically increasing allocations for all other objects.
This doesn't work. A connector with ID #n might get unplugged and another process created a new FB, which will then get ID #n. Sure, I doubt there's a real conflict as ioctls check the type, but it still sounds really ugly to me.
Thanks David
Hi
On Fri, Aug 29, 2014 at 2:57 PM, David Herrmann dh.herrmann@gmail.com wrote:
Hi
On Fri, Aug 29, 2014 at 2:51 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:01PM +0200, David Herrmann wrote:
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races:
- If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc.
- possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com
So userspace just needs to cycle through piles of framebuffer objects to make bad things happen? Doesn't sound like a terribly solid plan.
IDs will still get recycled, but only once all IDs got used. So this is "safe".
Sure, user-space can create 2 billion framebuffers and destroy them again, thus causing the ID range to overflow and recycle old IDs. Not sure how fast you can run 2 billion syscalls.. If that's a real issue, I'd vote for using the high-range for user-space managed objects, low-range for kernel-space managed objects ([1...INT_MAX] and [INT_MAX+1...UINT_MAX] or so).
I guess we could save this by doing normal id allocations for fbs and monotonically increasing allocations for all other objects.
This doesn't work. A connector with ID #n might get unplugged and another process created a new FB, which will then get ID #n. Sure, I doubt there's a real conflict as ioctls check the type, but it still sounds really ugly to me.
On a second thought: maybe your idea isn't as bad as I thought. I mean, everyone must do type-checking when looking up mode-objects, so it seems safe to rely on that.
Thanks David
On Fri, Aug 29, 2014 at 9:10 AM, David Herrmann dh.herrmann@gmail.com wrote:
Hi
On Fri, Aug 29, 2014 at 2:57 PM, David Herrmann dh.herrmann@gmail.com wrote:
Hi
On Fri, Aug 29, 2014 at 2:51 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:01PM +0200, David Herrmann wrote:
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races:
- If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc.
- possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com
So userspace just needs to cycle through piles of framebuffer objects to make bad things happen? Doesn't sound like a terribly solid plan.
IDs will still get recycled, but only once all IDs got used. So this is "safe".
Sure, user-space can create 2 billion framebuffers and destroy them again, thus causing the ID range to overflow and recycle old IDs. Not sure how fast you can run 2 billion syscalls.. If that's a real issue, I'd vote for using the high-range for user-space managed objects, low-range for kernel-space managed objects ([1...INT_MAX] and [INT_MAX+1...UINT_MAX] or so).
I guess we could save this by doing normal id allocations for fbs and monotonically increasing allocations for all other objects.
This doesn't work. A connector with ID #n might get unplugged and another process created a new FB, which will then get ID #n. Sure, I doubt there's a real conflict as ioctls check the type, but it still sounds really ugly to me.
On a second thought: maybe your idea isn't as bad as I thought. I mean, everyone must do type-checking when looking up mode-objects, so it seems safe to rely on that.
note that for atomic there are cases where we do lookup w/ ANY_TYPE.. *but* the idea might still work since FB's are still handled specially(ish) do to refcnting..
BR, -R
Thanks David _______________________________________________ dri-devel mailing list dri-devel@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/dri-devel
On Fri, Aug 29, 2014 at 02:57:12PM +0200, David Herrmann wrote:
Hi
On Fri, Aug 29, 2014 at 2:51 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:01PM +0200, David Herrmann wrote:
With MST, we now have connector hotplugging, yey! Pretty easy to use in user-space, but introduces some nasty races:
- If a connector is removed and added again while a compositor is in background, it will get the same ID again. If the compositor wakes up, it cannot know whether its the same connector or a new one, thus they must re-read EDID information, etc.
- possible_clones, possible_crtcs, etc. depend on indices, not IDs. So if an object is removed and a new one is added, those bitmasks are invalid and must be refreshed. This currently does not affect connectors, but only crtcs and encoders, but it's only a matter of time when this will change.
The easiest way to protect against this, is to not recylce modeset IDs. Instead, always allocate a new, higher ID. All ioctls that affect modeset objects, take IDs. Thus, during hotplug races, those ioctls will simply fail if invalid IDs are passed. They will no longer silently run on a newly hotplugged object.
Furthermore, hotplug-races during state sync can now be easily detected. A call to GET_RESOURCES returns a list of available IDs atomically. User-space can now start fetching all those objects via GET_* ioctls. If any of those fails, they know that the given object was unplugged. Thus, the "possible_*" bit-fields are invalidated. User-space can now decide whether to restart the sync all over or wait for the 'change' uevent that is sent on modeset object modifications (or, well, is supposed to be sent and probably will be at some point).
With this in place, modeset object hotplugging should work fine for all modeset objects in the KMS API.
CC'ing stable so we can rely on all kernels with MST to not recycle IDs.
Cc: stable@vger.kernel.org # 3.16+ Signed-off-by: David Herrmann dh.herrmann@gmail.com
So userspace just needs to cycle through piles of framebuffer objects to make bad things happen? Doesn't sound like a terribly solid plan.
IDs will still get recycled, but only once all IDs got used. So this is "safe".
Sure, user-space can create 2 billion framebuffers and destroy them again, thus causing the ID range to overflow and recycle old IDs. Not sure how fast you can run 2 billion syscalls.. If that's a real issue, I'd vote for using the high-range for user-space managed objects, low-range for kernel-space managed objects ([1...INT_MAX] and [INT_MAX+1...UINT_MAX] or so).
I guess we could save this by doing normal id allocations for fbs and monotonically increasing allocations for all other objects.
This doesn't work. A connector with ID #n might get unplugged and another process created a new FB, which will then get ID #n. Sure, I doubt there's a real conflict as ioctls check the type, but it still sounds really ugly to me.
Oh, ugly for sure, but it should work since userspace doesn't really have any reason to mix up ids and the kernel does need to check the type anyway. And with a simple
if (type == OBJ_FB) idr_alloc else idr_alloc_cyclic
I think it should work out with minimal fuss. At least the full separation of id spaces wouldn't be any more or less complexity. -Daniel
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
Signed-off-by: David Herrmann dh.herrmann@gmail.com
I've thought irqsave/restore are terribly serializing instructions, so this might actually be slower than a plain mutex. And imo if it doesn't show up in profiles it's not worth to optimize it - generally mutexes are really fast and in most cases already nicely degenerate to spinlocks anyway. -Daniel
drivers/gpu/drm/drm_crtc.c | 34 ++++++++++++++++++++-------------- include/drm/drm_crtc.h | 4 ++-- 2 files changed, 22 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 61b6978..97eba56 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -283,8 +283,10 @@ static int drm_mode_object_get_reg(struct drm_device *dev, { int ret;
- mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&dev->mode_config.idr_lock);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_NOWAIT); if (ret >= 0) { /*
- Set up the object linking under the protection of the idr
@@ -293,7 +295,9 @@ static int drm_mode_object_get_reg(struct drm_device *dev, obj->id = ret; obj->type = obj_type; }
- mutex_unlock(&dev->mode_config.idr_mutex);
spin_unlock_irq(&dev->mode_config.idr_lock);
idr_preload_end();
return ret < 0 ? ret : 0;
} @@ -322,9 +326,9 @@ int drm_mode_object_get(struct drm_device *dev, static void drm_mode_object_register(struct drm_device *dev, struct drm_mode_object *obj) {
- mutex_lock(&dev->mode_config.idr_mutex);
- spin_lock_irq(&dev->mode_config.idr_lock); idr_replace(&dev->mode_config.crtc_idr, obj, obj->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- spin_unlock_irq(&dev->mode_config.idr_lock);
}
/** @@ -339,17 +343,18 @@ static void drm_mode_object_register(struct drm_device *dev, void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) {
- mutex_lock(&dev->mode_config.idr_mutex);
- spin_lock_irq(&dev->mode_config.idr_lock); idr_remove(&dev->mode_config.crtc_idr, object->id);
- mutex_unlock(&dev->mode_config.idr_mutex);
- spin_unlock_irq(&dev->mode_config.idr_lock);
}
static struct drm_mode_object *_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL;
- unsigned long flags;
- mutex_lock(&dev->mode_config.idr_mutex);
- spin_lock_irqsave(&dev->mode_config.idr_lock, flags); obj = idr_find(&dev->mode_config.crtc_idr, id); if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) obj = NULL;
@@ -358,7 +363,7 @@ static struct drm_mode_object *_object_find(struct drm_device *dev, /* don't leak out unref'd fb's */ if (obj && (obj->type == DRM_MODE_OBJECT_FB)) obj = NULL;
- mutex_unlock(&dev->mode_config.idr_mutex);
spin_unlock_irqrestore(&dev->mode_config.idr_lock, flags);
return obj;
} @@ -433,9 +438,9 @@ EXPORT_SYMBOL(drm_framebuffer_init); static void __drm_framebuffer_unregister(struct drm_device *dev, struct drm_framebuffer *fb) {
- mutex_lock(&dev->mode_config.idr_mutex);
- spin_lock_irq(&dev->mode_config.idr_lock); idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
- mutex_unlock(&dev->mode_config.idr_mutex);
spin_unlock_irq(&dev->mode_config.idr_lock);
fb->base.id = 0;
} @@ -465,14 +470,15 @@ static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, { struct drm_mode_object *obj = NULL; struct drm_framebuffer *fb;
- unsigned long flags;
- mutex_lock(&dev->mode_config.idr_mutex);
- spin_lock_irqsave(&dev->mode_config.idr_lock, flags); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id)) fb = NULL; else fb = obj_to_fb(obj);
- mutex_unlock(&dev->mode_config.idr_mutex);
spin_unlock_irqrestore(&dev->mode_config.idr_lock, flags);
return fb;
} @@ -5049,7 +5055,7 @@ void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex);
- mutex_init(&dev->mode_config.idr_mutex);
- spin_lock_init(&dev->mode_config.idr_lock); mutex_init(&dev->mode_config.fb_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 77d9763..9c57b56 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -742,7 +742,7 @@ struct drm_mode_group { /**
- drm_mode_config - Mode configuration control structure
- @mutex: mutex protecting KMS related lists and structures
- @idr_mutex: mutex for KMS ID allocation and management
- @idr_lock: lock for KMS ID allocation and management
- @crtc_idr: main KMS ID tracking object
- @num_fb: number of fbs available
- @fb_list: list of framebuffers available
@@ -772,7 +772,7 @@ struct drm_mode_config { struct mutex mutex; /* protects configuration (mode lists etc.) */ struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
- struct mutex idr_mutex; /* for IDR management */
- spinlock_t idr_lock; /* for IDR management */ struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ /* this is limited to one for now */
-- 2.1.0
Hi
On Fri, Aug 29, 2014 at 2:53 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
Signed-off-by: David Herrmann dh.herrmann@gmail.com
I've thought irqsave/restore are terribly serializing instructions, so this might actually be slower than a plain mutex. And imo if it doesn't show up in profiles it's not worth to optimize it - generally mutexes are really fast and in most cases already nicely degenerate to spinlocks anyway.
Honestly, this patch is less about speed than 'correctness'. Sure, a mutex is just a spin-lock in low-contention cases and there really is no high-contention here. However, spin-locks are the major lock-type for pure data. mutexes only make sense when you have to lock data structures _while_ performing operations that can sleep. Using a spin-lock here prevents people from doing stupid things while holding this lock. And really, this lock is about ID registration and deregistration, nothing else.
Btw., I can happily turn all those lock/unlock sequences into spin_lock() and spin_unlock() so we ignore irq-contexts completely, if that's the only issue.
Thanks David
On Fri, Aug 29, 2014 at 03:03:58PM +0200, David Herrmann wrote:
Hi
On Fri, Aug 29, 2014 at 2:53 PM, Daniel Vetter daniel@ffwll.ch wrote:
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
Signed-off-by: David Herrmann dh.herrmann@gmail.com
I've thought irqsave/restore are terribly serializing instructions, so this might actually be slower than a plain mutex. And imo if it doesn't show up in profiles it's not worth to optimize it - generally mutexes are really fast and in most cases already nicely degenerate to spinlocks anyway.
Honestly, this patch is less about speed than 'correctness'. Sure, a mutex is just a spin-lock in low-contention cases and there really is no high-contention here. However, spin-locks are the major lock-type for pure data. mutexes only make sense when you have to lock data structures _while_ performing operations that can sleep. Using a spin-lock here prevents people from doing stupid things while holding this lock. And really, this lock is about ID registration and deregistration, nothing else.
Btw., I can happily turn all those lock/unlock sequences into spin_lock() and spin_unlock() so we ignore irq-contexts completely, if that's the only issue.
Yeah, if you want I'll r-b plain spinlocks. Imo locks also serve as documentation, so making it clear that we neither allocate nor sleep while holding them is good. But making it irq save just because is imo counterproductive. -Daniel
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
I don't think that's entirely accurate. idr_preload(GFP_KERNEL) might sleep since GFP_KERNEL & __GFP_WAIT != 0.
Thierry
Hi
On Fri, Aug 29, 2014 at 3:10 PM, Thierry Reding thierry.reding@gmail.com wrote:
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
I don't think that's entirely accurate. idr_preload(GFP_KERNEL) might sleep since GFP_KERNEL & __GFP_WAIT != 0.
idr_preload() is only used in registration, not in lookups. Not sure what you refer to?
Thanks David
On Fri, Aug 29, 2014 at 03:11:19PM +0200, David Herrmann wrote:
Hi
On Fri, Aug 29, 2014 at 3:10 PM, Thierry Reding thierry.reding@gmail.com wrote:
On Fri, Aug 29, 2014 at 02:01:00PM +0200, David Herrmann wrote:
There is no reason to use a heavy mutex for idr protection. Use a spinlock and make idr-allocation use idr_preload().
This patch also makes mode-object lookup irq-save, in case you ever wanna lookup modeset objects from interrupts. This is just a side-effect of avoiding a mutex.
I don't think that's entirely accurate. idr_preload(GFP_KERNEL) might sleep since GFP_KERNEL & __GFP_WAIT != 0.
idr_preload() is only used in registration, not in lookups. Not sure what you refer to?
Yes, you're right. I wasn't paying close enough attention and somehow applied "makes irq-safe" to the "idr_preload()" change without relating it to "lookup".
Thierry
dri-devel@lists.freedesktop.org