From: Rob Clark robdclark@chromium.org
I've been spending some time looking into how things behave under high memory pressure. The first patch is a random cleanup I noticed along the way. The second improves the situation significantly when we are getting shrinker called from many threads in parallel. And the last two are $debugfs/gem fixes I needed so I could monitor the state of GEM objects (ie. how many are active/purgable/purged) while triggering high memory pressure.
We could probably go a bit further with dropping the mm_lock in the shrinker->scan() loop, but this is already a pretty big improvement. The next step is probably actually to add support to unpin/evict inactive objects. (We are part way there since we have already de- coupled the iova lifetime from the pages lifetime, but there are a few sharp corners to work through.)
Rob Clark (4): drm/msm: Remove unused freed llist node drm/msm: Avoid mutex in shrinker_count() drm/msm: Fix debugfs deadlock drm/msm: Improved debugfs gem stats
drivers/gpu/drm/msm/msm_debugfs.c | 14 ++---- drivers/gpu/drm/msm/msm_drv.c | 4 ++ drivers/gpu/drm/msm/msm_drv.h | 10 ++++- drivers/gpu/drm/msm/msm_fb.c | 3 +- drivers/gpu/drm/msm/msm_gem.c | 61 +++++++++++++++++++++----- drivers/gpu/drm/msm/msm_gem.h | 58 +++++++++++++++++++++--- drivers/gpu/drm/msm/msm_gem_shrinker.c | 17 +------ 7 files changed, 122 insertions(+), 45 deletions(-)
From: Rob Clark robdclark@chromium.org
Unused since c951a9b284b907604759628d273901064c60d09f
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_gem.h | 2 -- 1 file changed, 2 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b3a0a880cbab..7a9107cf1818 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -78,8 +78,6 @@ struct msm_gem_object {
struct list_head vmas; /* list of msm_gem_vma */
- struct llist_node freed; - /* For physically contiguous buffers. Used when we don't have * an IOMMU. Also used for stolen/splashscreen buffer. */
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
Unused since c951a9b284b907604759628d273901064c60d09f
Not terribly important, but checkpatch always yells at me when I don't reference commits by saying:
commit c951a9b284b9 ("drm/msm: Remove msm_gem_free_work")
Signed-off-by: Rob Clark robdclark@chromium.org
drivers/gpu/drm/msm/msm_gem.h | 2 -- 1 file changed, 2 deletions(-)
Reviewed-by: Douglas Anderson dianders@chromium.org
From: Rob Clark robdclark@chromium.org
When the system is under heavy memory pressure, we can end up with lots of concurrent calls into the shrinker. Keeping a running tab on what we can shrink avoids grabbing a lock in shrinker->count(), and avoids shrinker->scan() getting called when not profitable.
Also, we can keep purged objects in their own list to avoid re-traversing them to help cut down time in the critical section further.
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_drv.h | 2 ++ drivers/gpu/drm/msm/msm_gem.c | 16 +++++++++++-- drivers/gpu/drm/msm/msm_gem.h | 32 ++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gem_shrinker.c | 17 +------------- 5 files changed, 50 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4f9fa0189a07..3462b0ea14c6 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -476,6 +476,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); + INIT_LIST_HEAD(&priv->inactive_purged); mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index a1264cfcac5e..3ead5755f695 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -188,6 +188,8 @@ struct msm_drm_private { */ struct list_head inactive_willneed; /* inactive + !shrinkable */ struct list_head inactive_dontneed; /* inactive + shrinkable */ + struct list_head inactive_purged; /* inactive + purged */ + int shrinkable_count; /* write access under mm_lock */ struct mutex mm_lock;
struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9d10739c4eb2..74a92eedc992 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -719,6 +719,7 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED; + mark_unpurgable(msm_obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); drm_gem_free_mmap_offset(obj); @@ -790,6 +791,7 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) might_sleep(); WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); + WARN_ON(msm_obj->dontneed);
if (msm_obj->active_count++ == 0) { mutex_lock(&priv->mm_lock); @@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
+ if (msm_obj->dontneed) + mark_unpurgable(msm_obj); + list_del_init(&msm_obj->mm_list); - if (msm_obj->madv == MSM_MADV_WILLNEED) + if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); - else + } else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); + mark_purgable(msm_obj); + } else { + WARN_ON(msm_obj->madv != __MSM_MADV_PURGED); + list_add_tail(&msm_obj->mm_list, &priv->inactive_purged); + }
mutex_unlock(&priv->mm_lock); } @@ -971,6 +981,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct msm_drm_private *priv = dev->dev_private;
mutex_lock(&priv->mm_lock); + if (msm_obj->dontneed) + mark_unpurgable(msm_obj); list_del(&msm_obj->mm_list); mutex_unlock(&priv->mm_lock);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 7a9107cf1818..0feabae75d3d 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -50,6 +50,11 @@ struct msm_gem_object { */ uint8_t madv;
+ /** + * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? + */ + bool dontneed : 1; + /** * count of active vmap'ing */ @@ -198,6 +203,33 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (WARN_ON(msm_obj->dontneed)) + return; + + priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; + msm_obj->dontneed = true; +} + +static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (WARN_ON(!msm_obj->dontneed)) + return; + + priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; + WARN_ON(priv->shrinkable_count < 0); + msm_obj->dontneed = false; +} + void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 9d5248be746f..7db8375f2430 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -14,22 +14,7 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - struct msm_gem_object *msm_obj; - unsigned long count = 0; - - mutex_lock(&priv->mm_lock); - - list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { - if (!msm_gem_trylock(&msm_obj->base)) - continue; - if (is_purgeable(msm_obj)) - count += msm_obj->base.size >> PAGE_SHIFT; - msm_gem_unlock(&msm_obj->base); - } - - mutex_unlock(&priv->mm_lock); - - return count; + return priv->shrinkable_count; }
static unsigned long
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed)
mark_unpurgable(msm_obj);
list_del_init(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED)
if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
else
} else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgable(msm_obj);
} else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
I'm probably being dense, but what's the point of adding it to the "inactive_purged" list here? You never look at that list, right? You already did a list_del_init() on this object's list pointer ("mm_list"). I don't see how adding it to a bogus list helps with anything.
@@ -198,6 +203,33 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(msm_obj->dontneed))
return;
The is_purgeable() function also checks other things besides just "MSM_MADV_DONTNEED". Do we need to check those too? Specifically:
msm_obj->sgt && !msm_obj->base.dma_buf && !msm_obj->base.import_attach
...or is it just being paranoid?
I guess I'm just worried that if any of those might be important then we'll consistently report back that we have a count of things that can be purged but then scan() won't find anything to do. That wouldn't be great.
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
+}
+static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->shrinkable_count < 0);
If you changed the order maybe you could make shrinkable_count "unsigned long" to match the shrinker API?
new_shrinkable = msm_obj->base.size >> PAGE_SHIFT; WARN_ON(new_shrinkable > priv->shrinkable_count); priv->shrinkable_count -= new_shrinkable
-Doug
On Wed, Mar 31, 2021 at 3:44 PM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed)
mark_unpurgable(msm_obj);
list_del_init(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED)
if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
else
} else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgable(msm_obj);
} else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
I'm probably being dense, but what's the point of adding it to the "inactive_purged" list here? You never look at that list, right? You already did a list_del_init() on this object's list pointer ("mm_list"). I don't see how adding it to a bogus list helps with anything.
It preserves the "every bo is in one of these lists" statement, but other than that you are right we aren't otherwise doing anything with that list. (Or we could replace the list_del_init() with list_del().. I tend to instinctively go for list_del_init())
@@ -198,6 +203,33 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(msm_obj->dontneed))
return;
The is_purgeable() function also checks other things besides just "MSM_MADV_DONTNEED". Do we need to check those too? Specifically:
msm_obj->sgt && !msm_obj->base.dma_buf && !msm_obj->base.import_attach
...or is it just being paranoid?
I guess I'm just worried that if any of those might be important then we'll consistently report back that we have a count of things that can be purged but then scan() won't find anything to do. That wouldn't be great.
Hmm, I thought msm_gem_madvise() returned an error instead of allowing MSM_MADV_DONTNEED to be set on imported/exported dma-bufs.. it probably should to be complete (but userspace already knows not to madvise an imported/exported buffer for other reasons.. ie. we can't let a shared buffer end up in the bo cache). I'll re-work that a bit.
The msm_obj->sgt case is a bit more tricky.. that will be the case of a freshly allocated obj that does not have backing patches yet. But it seems like enough of a corner case, that I'm happy to live with it.. ie. the tricky thing is not leaking decrements of priv->shrinkable_count or underflowing priv->shrinkable_count, and caring about the !msm_obj->sgt case doubles the number of states an object can be in, and the shrinker->count() return value is just an estimate.
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
+}
+static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->shrinkable_count < 0);
If you changed the order maybe you could make shrinkable_count "unsigned long" to match the shrinker API?
new_shrinkable = msm_obj->base.size >> PAGE_SHIFT; WARN_ON(new_shrinkable > priv->shrinkable_count); priv->shrinkable_count -= new_shrinkable
True, although I've developed a preference for signed integers in cases where it can underflow if you mess up
BR, -R
Hi,
On Wed, Mar 31, 2021 at 4:23 PM Rob Clark robdclark@gmail.com wrote:
On Wed, Mar 31, 2021 at 3:44 PM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed)
mark_unpurgable(msm_obj);
list_del_init(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED)
if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
else
} else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgable(msm_obj);
} else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
I'm probably being dense, but what's the point of adding it to the "inactive_purged" list here? You never look at that list, right? You already did a list_del_init() on this object's list pointer ("mm_list"). I don't see how adding it to a bogus list helps with anything.
It preserves the "every bo is in one of these lists" statement, but other than that you are right we aren't otherwise doing anything with that list. (Or we could replace the list_del_init() with list_del().. I tend to instinctively go for list_del_init())
If you really want this list, it wouldn't hurt to at least have a comment saying that it's not used for anything so people like me doing go trying to figure out what it's used for. ;-)
@@ -198,6 +203,33 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(msm_obj->dontneed))
return;
The is_purgeable() function also checks other things besides just "MSM_MADV_DONTNEED". Do we need to check those too? Specifically:
msm_obj->sgt && !msm_obj->base.dma_buf && !msm_obj->base.import_attach
...or is it just being paranoid?
I guess I'm just worried that if any of those might be important then we'll consistently report back that we have a count of things that can be purged but then scan() won't find anything to do. That wouldn't be great.
Hmm, I thought msm_gem_madvise() returned an error instead of allowing MSM_MADV_DONTNEED to be set on imported/exported dma-bufs.. it probably should to be complete (but userspace already knows not to madvise an imported/exported buffer for other reasons.. ie. we can't let a shared buffer end up in the bo cache). I'll re-work that a bit.
The msm_obj->sgt case is a bit more tricky.. that will be the case of a freshly allocated obj that does not have backing patches yet. But it seems like enough of a corner case, that I'm happy to live with it.. ie. the tricky thing is not leaking decrements of priv->shrinkable_count or underflowing priv->shrinkable_count, and caring about the !msm_obj->sgt case doubles the number of states an object can be in, and the shrinker->count() return value is just an estimate.
I think it's equally important to make sure that we don't constantly have a non-zero count and then have scan() do nothing. If there's a transitory blip then it's fine, but it's not OK if it can be steady state. Then you end up with:
1. How many objects do you have to free? 10 2. OK, free some. How many did you free? 0 3. Oh. You got more to do, I'll call you again. 4. Goto #1
...and it just keeps looping, right?
As long as you're confident that this case can't happen then we're probably fine, but good to be careful. Is there any way we can make sure that a "freshly allocated object" isn't ever in the "DONTNEED" state?
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
+}
+static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->shrinkable_count < 0);
If you changed the order maybe you could make shrinkable_count "unsigned long" to match the shrinker API?
new_shrinkable = msm_obj->base.size >> PAGE_SHIFT; WARN_ON(new_shrinkable > priv->shrinkable_count); priv->shrinkable_count -= new_shrinkable
True, although I've developed a preference for signed integers in cases where it can underflow if you mess up
Yeah, I guess it's fine since it's a count of pages and we really can't have _that_ many pages worth of stuff to purge. It might not hurt to at least declare it as a "long" instead of an "int" though to match the shrinker API.
-Doug
On Wed, Mar 31, 2021 at 4:39 PM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 4:23 PM Rob Clark robdclark@gmail.com wrote:
On Wed, Mar 31, 2021 at 3:44 PM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
if (msm_obj->dontneed)
mark_unpurgable(msm_obj);
list_del_init(&msm_obj->mm_list);
if (msm_obj->madv == MSM_MADV_WILLNEED)
if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
else
} else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
mark_purgable(msm_obj);
} else {
WARN_ON(msm_obj->madv != __MSM_MADV_PURGED);
list_add_tail(&msm_obj->mm_list, &priv->inactive_purged);
I'm probably being dense, but what's the point of adding it to the "inactive_purged" list here? You never look at that list, right? You already did a list_del_init() on this object's list pointer ("mm_list"). I don't see how adding it to a bogus list helps with anything.
It preserves the "every bo is in one of these lists" statement, but other than that you are right we aren't otherwise doing anything with that list. (Or we could replace the list_del_init() with list_del().. I tend to instinctively go for list_del_init())
If you really want this list, it wouldn't hurt to at least have a comment saying that it's not used for anything so people like me doing go trying to figure out what it's used for. ;-)
@@ -198,6 +203,33 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(msm_obj->dontneed))
return;
The is_purgeable() function also checks other things besides just "MSM_MADV_DONTNEED". Do we need to check those too? Specifically:
msm_obj->sgt && !msm_obj->base.dma_buf && !msm_obj->base.import_attach
...or is it just being paranoid?
I guess I'm just worried that if any of those might be important then we'll consistently report back that we have a count of things that can be purged but then scan() won't find anything to do. That wouldn't be great.
Hmm, I thought msm_gem_madvise() returned an error instead of allowing MSM_MADV_DONTNEED to be set on imported/exported dma-bufs.. it probably should to be complete (but userspace already knows not to madvise an imported/exported buffer for other reasons.. ie. we can't let a shared buffer end up in the bo cache). I'll re-work that a bit.
The msm_obj->sgt case is a bit more tricky.. that will be the case of a freshly allocated obj that does not have backing patches yet. But it seems like enough of a corner case, that I'm happy to live with it.. ie. the tricky thing is not leaking decrements of priv->shrinkable_count or underflowing priv->shrinkable_count, and caring about the !msm_obj->sgt case doubles the number of states an object can be in, and the shrinker->count() return value is just an estimate.
I think it's equally important to make sure that we don't constantly have a non-zero count and then have scan() do nothing. If there's a transitory blip then it's fine, but it's not OK if it can be steady state. Then you end up with:
- How many objects do you have to free? 10
- OK, free some. How many did you free? 0
- Oh. You got more to do, I'll call you again.
- Goto #1
...and it just keeps looping, right?
Looking more closely at vmscan, it looks like we should return SHRINK_STOP instead of zero
BR, -R
As long as you're confident that this case can't happen then we're probably fine, but good to be careful. Is there any way we can make sure that a "freshly allocated object" isn't ever in the "DONTNEED" state?
priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
msm_obj->dontneed = true;
+}
+static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{
struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
WARN_ON(!mutex_is_locked(&priv->mm_lock));
if (WARN_ON(!msm_obj->dontneed))
return;
priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
WARN_ON(priv->shrinkable_count < 0);
If you changed the order maybe you could make shrinkable_count "unsigned long" to match the shrinker API?
new_shrinkable = msm_obj->base.size >> PAGE_SHIFT; WARN_ON(new_shrinkable > priv->shrinkable_count); priv->shrinkable_count -= new_shrinkable
True, although I've developed a preference for signed integers in cases where it can underflow if you mess up
Yeah, I guess it's fine since it's a count of pages and we really can't have _that_ many pages worth of stuff to purge. It might not hurt to at least declare it as a "long" instead of an "int" though to match the shrinker API.
-Doug
From: Rob Clark robdclark@chromium.org
In normal cases the gem obj lock is acquired first before mm_lock. The exception is iterating the various object lists. In the shrinker path, deadlock is avoided by using msm_gem_trylock() and skipping over objects that cannot be locked. But for debugfs the straightforward thing is to split things out into a separate list of all objects protected by it's own lock.
Fixes: d984457b31c4 ("drm/msm: Add priv->mm_lock to protect active/inactive lists") Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_debugfs.c | 14 +++----------- drivers/gpu/drm/msm/msm_drv.c | 3 +++ drivers/gpu/drm/msm/msm_drv.h | 8 +++++++- drivers/gpu/drm/msm/msm_gem.c | 14 +++++++++++++- drivers/gpu/drm/msm/msm_gem.h | 13 ++++++++++--- 5 files changed, 36 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 85ad0babc326..d611cc8e54a4 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = { static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; - struct msm_gpu *gpu = priv->gpu; int ret;
- ret = mutex_lock_interruptible(&priv->mm_lock); + ret = mutex_lock_interruptible(&priv->obj_lock); if (ret) return ret;
- if (gpu) { - seq_printf(m, "Active Objects (%s):\n", gpu->name); - msm_gem_describe_objects(&gpu->active_list, m); - } - - seq_printf(m, "Inactive Objects:\n"); - msm_gem_describe_objects(&priv->inactive_dontneed, m); - msm_gem_describe_objects(&priv->inactive_willneed, m); + msm_gem_describe_objects(&priv->objects, m);
- mutex_unlock(&priv->mm_lock); + mutex_unlock(&priv->obj_lock);
return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 3462b0ea14c6..1ef1cd0cc714 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -474,6 +474,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_LIST_HEAD(&priv->objects); + mutex_init(&priv->obj_lock); + INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); INIT_LIST_HEAD(&priv->inactive_purged); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 3ead5755f695..d69f4263bd4e 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -174,7 +174,13 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf;
- /* + /** + * List of all GEM objects (mainly for debugfs, protected by obj_lock + */ + struct list_head objects; + struct mutex obj_lock; + + /** * Lists of inactive GEM objects. Every bo is either in one of the * inactive lists (depending on whether or not it is shrinkable) or * gpu->active_list (for the gpu it is active on[1]) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 74a92eedc992..c184ea68a6d0 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -961,7 +961,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n"); - list_for_each_entry(msm_obj, list, mm_list) { + list_for_each_entry(msm_obj, list, node) { struct drm_gem_object *obj = &msm_obj->base; seq_puts(m, " "); msm_gem_describe(obj, m); @@ -980,6 +980,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private;
+ mutex_lock(&priv->obj_lock); + list_del(&msm_obj->node); + mutex_unlock(&priv->obj_lock); + mutex_lock(&priv->mm_lock); if (msm_obj->dontneed) mark_unpurgable(msm_obj); @@ -1170,6 +1174,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj;
fail: @@ -1240,6 +1248,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj;
fail: diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 0feabae75d3d..49956196025e 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -60,13 +60,20 @@ struct msm_gem_object { */ uint8_t vmap_count;
- /* And object is either: - * inactive - on priv->inactive_list + /** + * Node in list of all objects (mainly for debugfs, protected by + * struct_mutex + */ + struct list_head node; + + /** + * An object is either: + * inactive - on priv->inactive_dontneed or priv->inactive_willneed + * (depending on purgability status) * active - on one one of the gpu's active_list.. well, at * least for now we don't have (I don't think) hw sync between * 2d and 3d one devices which have both, meaning we need to * block on submit if a bo is already on other ring - * */ struct list_head mm_list;
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = { static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu; int ret;
ret = mutex_lock_interruptible(&priv->mm_lock);
ret = mutex_lock_interruptible(&priv->obj_lock); if (ret) return ret;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
msm_gem_describe_objects(&gpu->active_list, m);
}
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_dontneed, m);
msm_gem_describe_objects(&priv->inactive_willneed, m);
msm_gem_describe_objects(&priv->objects, m);
I guess we no longer sort the by Active and Inactive but that doesn't really matter?
@@ -174,7 +174,13 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf;
/*
/**
* List of all GEM objects (mainly for debugfs, protected by obj_lock
It wouldn't hurt to talk about lock ordering here? Like: "If we need the "obj_lock" and a "gem_lock" at the same time we always grab the "obj_lock" first.
@@ -60,13 +60,20 @@ struct msm_gem_object { */ uint8_t vmap_count;
/* And object is either:
* inactive - on priv->inactive_list
/**
* Node in list of all objects (mainly for debugfs, protected by
* struct_mutex
Not "struct_mutex" in comment, right? Maybe "obj_lock" I think?
-Doug
On Wed, Mar 31, 2021 at 4:13 PM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = { static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu; int ret;
ret = mutex_lock_interruptible(&priv->mm_lock);
ret = mutex_lock_interruptible(&priv->obj_lock); if (ret) return ret;
if (gpu) {
seq_printf(m, "Active Objects (%s):\n", gpu->name);
msm_gem_describe_objects(&gpu->active_list, m);
}
seq_printf(m, "Inactive Objects:\n");
msm_gem_describe_objects(&priv->inactive_dontneed, m);
msm_gem_describe_objects(&priv->inactive_willneed, m);
msm_gem_describe_objects(&priv->objects, m);
I guess we no longer sort the by Active and Inactive but that doesn't really matter?
It turned out to be less useful to sort by active/inactive, as much as just having the summary at the bottom that the last patch adds. We can already tell from the per-object entries whether it is active/purgable/purged.
I did initially try to come up with an approach that let me keep this, but it would basically amount to re-writing the gem_submit path (because you cannot do any memory allocation under mm_lock)
@@ -174,7 +174,13 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf;
/*
/**
* List of all GEM objects (mainly for debugfs, protected by obj_lock
It wouldn't hurt to talk about lock ordering here? Like: "If we need the "obj_lock" and a "gem_lock" at the same time we always grab the "obj_lock" first.
good point
@@ -60,13 +60,20 @@ struct msm_gem_object { */ uint8_t vmap_count;
/* And object is either:
* inactive - on priv->inactive_list
/**
* Node in list of all objects (mainly for debugfs, protected by
* struct_mutex
Not "struct_mutex" in comment, right? Maybe "obj_lock" I think?
oh, right, forgot to fix that from an earlier iteration
BR, -R
From: Rob Clark robdclark@chromium.org
The last patch lost the breakdown of active vs inactive GEM objects in $debugfs/gem. But we can add some better stats to summarize not just active vs inactive, but also purgable/purged to make up for that.
Signed-off-by: Rob Clark robdclark@chromium.org --- drivers/gpu/drm/msm/msm_fb.c | 3 ++- drivers/gpu/drm/msm/msm_gem.c | 31 ++++++++++++++++++++++++------- drivers/gpu/drm/msm/msm_gem.h | 11 ++++++++++- 3 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index d42f0665359a..887172a10c9a 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -33,6 +33,7 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { #ifdef CONFIG_DEBUG_FS void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { + struct msm_gem_stats stats = {{0}}; int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", @@ -42,7 +43,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) for (i = 0; i < n; i++) { seq_printf(m, " %d: offset=%d pitch=%d, obj: ", i, fb->offsets[i], fb->pitches[i]); - msm_gem_describe(fb->obj[i], m); + msm_gem_describe(fb->obj[i], m, &stats); } } #endif diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c184ea68a6d0..a933ca5dc6df 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -873,7 +873,8 @@ static void describe_fence(struct dma_fence *fence, const char *type, fence->seqno); }
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, + struct msm_gem_stats *stats) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; @@ -885,11 +886,23 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
msm_gem_lock(obj);
+ stats->all.count++; + stats->all.size += obj->size; + + if (is_active(msm_obj)) { + stats->active.count++; + stats->active.size += obj->size; + } + switch (msm_obj->madv) { case __MSM_MADV_PURGED: + stats->purged.count++; + stats->purged.size += obj->size; madv = " purged"; break; case MSM_MADV_DONTNEED: + stats->purgable.count++; + stats->purgable.size += obj->size; madv = " purgeable"; break; case MSM_MADV_WILLNEED: @@ -956,20 +969,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) { + struct msm_gem_stats stats = {{0}}; struct msm_gem_object *msm_obj; - int count = 0; - size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n"); list_for_each_entry(msm_obj, list, node) { struct drm_gem_object *obj = &msm_obj->base; seq_puts(m, " "); - msm_gem_describe(obj, m); - count++; - size += obj->size; + msm_gem_describe(obj, m, &stats); }
- seq_printf(m, "Total %d objects, %zu bytes\n", count, size); + seq_printf(m, "Total: %4d objects, %9zu bytes\n", + stats.all.count, stats.all.size); + seq_printf(m, "Active: %4d objects, %9zu bytes\n", + stats.active.count, stats.active.size); + seq_printf(m, "Purgable: %4d objects, %9zu bytes\n", + stats.purgable.count, stats.purgable.size); + seq_printf(m, "Purged: %4d objects, %9zu bytes\n", + stats.purged.count, stats.purged.size); } #endif
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 49956196025e..43510ac070dd 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -158,7 +158,16 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, __printf(2, 3) void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); #ifdef CONFIG_DEBUG_FS -void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); + +struct msm_gem_stats { + struct { + unsigned count; + size_t size; + } all, active, purgable, purged; +}; + +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, + struct msm_gem_stats *stats); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); #endif
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
The last patch lost the breakdown of active vs inactive GEM objects in $debugfs/gem. But we can add some better stats to summarize not just active vs inactive, but also purgable/purged to make up for that.
Signed-off-by: Rob Clark robdclark@chromium.org
drivers/gpu/drm/msm/msm_fb.c | 3 ++- drivers/gpu/drm/msm/msm_gem.c | 31 ++++++++++++++++++++++++------- drivers/gpu/drm/msm/msm_gem.h | 11 ++++++++++- 3 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index d42f0665359a..887172a10c9a 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -33,6 +33,7 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { #ifdef CONFIG_DEBUG_FS void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) {
struct msm_gem_stats stats = {{0}};
nit: instead of "{{0}}", can't you just do:
struct msm_gem_stats stats = {};
...both here and for the other usage.
Other than that this seems good to me.
Reviewed-by: Douglas Anderson dianders@chromium.org
Hi,
On Wed, Mar 31, 2021 at 3:14 PM Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
I've been spending some time looking into how things behave under high memory pressure. The first patch is a random cleanup I noticed along the way. The second improves the situation significantly when we are getting shrinker called from many threads in parallel. And the last two are $debugfs/gem fixes I needed so I could monitor the state of GEM objects (ie. how many are active/purgable/purged) while triggering high memory pressure.
We could probably go a bit further with dropping the mm_lock in the shrinker->scan() loop, but this is already a pretty big improvement. The next step is probably actually to add support to unpin/evict inactive objects. (We are part way there since we have already de- coupled the iova lifetime from the pages lifetime, but there are a few sharp corners to work through.)
Rob Clark (4): drm/msm: Remove unused freed llist node drm/msm: Avoid mutex in shrinker_count() drm/msm: Fix debugfs deadlock drm/msm: Improved debugfs gem stats
drivers/gpu/drm/msm/msm_debugfs.c | 14 ++---- drivers/gpu/drm/msm/msm_drv.c | 4 ++ drivers/gpu/drm/msm/msm_drv.h | 10 ++++- drivers/gpu/drm/msm/msm_fb.c | 3 +- drivers/gpu/drm/msm/msm_gem.c | 61 +++++++++++++++++++++----- drivers/gpu/drm/msm/msm_gem.h | 58 +++++++++++++++++++++--- drivers/gpu/drm/msm/msm_gem_shrinker.c | 17 +------ 7 files changed, 122 insertions(+), 45 deletions(-)
This makes a pretty big reduction in jankiness when under memory pressure and seems to work well for me.
Tested-by: Douglas Anderson dianders@chromium.org
From: Rob Clark robdclark@chromium.org
I've been spending some time looking into how things behave under high memory pressure. The first patch is a random cleanup I noticed along the way. The second improves the situation significantly when we are getting shrinker called from many threads in parallel. And the last two are $debugfs/gem fixes I needed so I could monitor the state of GEM objects (ie. how many are active/purgable/purged) while triggering high memory pressure.
We could probably go a bit further with dropping the mm_lock in the shrinker->scan() loop, but this is already a pretty big improvement. The next step is probably actually to add support to unpin/evict inactive objects. (We are part way there since we have already de- coupled the iova lifetime from the pages lifetime, but there are a few sharp corners to work through.)
Rob Clark (4): drm/msm: Remove unused freed llist node drm/msm: Avoid mutex in shrinker_count() drm/msm: Fix debugfs deadlock drm/msm: Improved debugfs gem stats
drivers/gpu/drm/msm/msm_debugfs.c | 14 ++--- drivers/gpu/drm/msm/msm_drv.c | 4 ++ drivers/gpu/drm/msm/msm_drv.h | 15 ++++-- drivers/gpu/drm/msm/msm_fb.c | 3 +- drivers/gpu/drm/msm/msm_gem.c | 65 ++++++++++++++++++----- drivers/gpu/drm/msm/msm_gem.h | 72 +++++++++++++++++++++++--- drivers/gpu/drm/msm/msm_gem_shrinker.c | 28 ++++------ 7 files changed, 150 insertions(+), 51 deletions(-)
From: Rob Clark robdclark@chromium.org
Unused since commit c951a9b284b9 ("drm/msm: Remove msm_gem_free_work")
Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org --- drivers/gpu/drm/msm/msm_gem.h | 2 -- 1 file changed, 2 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b3a0a880cbab..7a9107cf1818 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -78,8 +78,6 @@ struct msm_gem_object {
struct list_head vmas; /* list of msm_gem_vma */
- struct llist_node freed; - /* For physically contiguous buffers. Used when we don't have * an IOMMU. Also used for stolen/splashscreen buffer. */
Hi,
On Wed, Mar 31, 2021 at 6:23 PM Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
Unused since commit c951a9b284b9 ("drm/msm: Remove msm_gem_free_work")
Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org
drivers/gpu/drm/msm/msm_gem.h | 2 -- 1 file changed, 2 deletions(-)
Reviewed-by: Douglas Anderson dianders@chromium.org
From: Rob Clark robdclark@chromium.org
When the system is under heavy memory pressure, we can end up with lots of concurrent calls into the shrinker. Keeping a running tab on what we can shrink avoids grabbing a lock in shrinker->count(), and avoids shrinker->scan() getting called when not profitable.
Also, we can keep purged objects in their own list to avoid re-traversing them to help cut down time in the critical section further.
Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org --- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_drv.h | 6 ++- drivers/gpu/drm/msm/msm_gem.c | 20 ++++++++-- drivers/gpu/drm/msm/msm_gem.h | 53 ++++++++++++++++++++++++-- drivers/gpu/drm/msm/msm_gem_shrinker.c | 28 ++++++-------- 5 files changed, 81 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 4f9fa0189a07..3462b0ea14c6 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -476,6 +476,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); + INIT_LIST_HEAD(&priv->inactive_purged); mutex_init(&priv->mm_lock);
/* Teach lockdep about lock ordering wrt. shrinker: */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index a1264cfcac5e..503168817e24 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -179,8 +179,8 @@ struct msm_drm_private { * inactive lists (depending on whether or not it is shrinkable) or * gpu->active_list (for the gpu it is active on[1]) * - * These lists are protected by mm_lock. If struct_mutex is involved, it - * should be aquired prior to mm_lock. One should *not* hold mm_lock in + * These lists are protected by mm_lock (which should be acquired + * before per GEM object lock). One should *not* hold mm_lock in * get_pages()/vmap()/etc paths, as they can trigger the shrinker. * * [1] if someone ever added support for the old 2d cores, there could be @@ -188,6 +188,8 @@ struct msm_drm_private { */ struct list_head inactive_willneed; /* inactive + !shrinkable */ struct list_head inactive_dontneed; /* inactive + shrinkable */ + struct list_head inactive_purged; /* inactive + purged */ + long shrinkable_count; /* write access under mm_lock */ struct mutex mm_lock;
struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9d10739c4eb2..bec01bb48fce 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -719,6 +719,7 @@ void msm_gem_purge(struct drm_gem_object *obj) put_iova_vmas(obj);
msm_obj->madv = __MSM_MADV_PURGED; + mark_unpurgable(msm_obj);
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); drm_gem_free_mmap_offset(obj); @@ -790,10 +791,11 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) might_sleep(); WARN_ON(!msm_gem_is_locked(obj)); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED); + WARN_ON(msm_obj->dontneed);
if (msm_obj->active_count++ == 0) { mutex_lock(&priv->mm_lock); - list_del_init(&msm_obj->mm_list); + list_del(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); mutex_unlock(&priv->mm_lock); } @@ -818,11 +820,19 @@ static void update_inactive(struct msm_gem_object *msm_obj) mutex_lock(&priv->mm_lock); WARN_ON(msm_obj->active_count != 0);
- list_del_init(&msm_obj->mm_list); - if (msm_obj->madv == MSM_MADV_WILLNEED) + if (msm_obj->dontneed) + mark_unpurgable(msm_obj); + + list_del(&msm_obj->mm_list); + if (msm_obj->madv == MSM_MADV_WILLNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); - else + } else if (msm_obj->madv == MSM_MADV_DONTNEED) { list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed); + mark_purgable(msm_obj); + } else { + WARN_ON(msm_obj->madv != __MSM_MADV_PURGED); + list_add_tail(&msm_obj->mm_list, &priv->inactive_purged); + }
mutex_unlock(&priv->mm_lock); } @@ -971,6 +981,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct msm_drm_private *priv = dev->dev_private;
mutex_lock(&priv->mm_lock); + if (msm_obj->dontneed) + mark_unpurgable(msm_obj); list_del(&msm_obj->mm_list); mutex_unlock(&priv->mm_lock);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 7a9107cf1818..13aabfe92dac 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -50,18 +50,24 @@ struct msm_gem_object { */ uint8_t madv;
+ /** + * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)? + */ + bool dontneed : 1; + /** * count of active vmap'ing */ uint8_t vmap_count;
- /* And object is either: - * inactive - on priv->inactive_list + /** + * An object is either: + * inactive - on priv->inactive_dontneed/willneed/purged depending + * on status * active - on one one of the gpu's active_list.. well, at * least for now we don't have (I don't think) hw sync between * 2d and 3d one devices which have both, meaning we need to * block on submit if a bo is already on other ring - * */ struct list_head mm_list;
@@ -186,10 +192,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj) return msm_obj->active_count; }
+/* imported/exported objects are not purgable: */ +static inline bool is_unpurgable(struct msm_gem_object *msm_obj) +{ + return msm_obj->base.dma_buf && msm_obj->base.import_attach; +} + static inline bool is_purgeable(struct msm_gem_object *msm_obj) { return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt && - !msm_obj->base.dma_buf && !msm_obj->base.import_attach; + !is_unpurgable(msm_obj); }
static inline bool is_vunmapable(struct msm_gem_object *msm_obj) @@ -198,6 +210,39 @@ static inline bool is_vunmapable(struct msm_gem_object *msm_obj) return (msm_obj->vmap_count == 0) && msm_obj->vaddr; }
+static inline void mark_purgable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (is_unpurgable(msm_obj)) + return; + + if (WARN_ON(msm_obj->dontneed)) + return; + + priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT; + msm_obj->dontneed = true; +} + +static inline void mark_unpurgable(struct msm_gem_object *msm_obj) +{ + struct msm_drm_private *priv = msm_obj->base.dev->dev_private; + + WARN_ON(!mutex_is_locked(&priv->mm_lock)); + + if (is_unpurgable(msm_obj)) + return; + + if (WARN_ON(!msm_obj->dontneed)) + return; + + priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT; + WARN_ON(priv->shrinkable_count < 0); + msm_obj->dontneed = false; +} + void msm_gem_purge(struct drm_gem_object *obj); void msm_gem_vunmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 9d5248be746f..f3e948af01c5 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -14,22 +14,7 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = container_of(shrinker, struct msm_drm_private, shrinker); - struct msm_gem_object *msm_obj; - unsigned long count = 0; - - mutex_lock(&priv->mm_lock); - - list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { - if (!msm_gem_trylock(&msm_obj->base)) - continue; - if (is_purgeable(msm_obj)) - count += msm_obj->base.size >> PAGE_SHIFT; - msm_gem_unlock(&msm_obj->base); - } - - mutex_unlock(&priv->mm_lock); - - return count; + return priv->shrinkable_count; }
static unsigned long @@ -45,6 +30,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { if (freed >= sc->nr_to_scan) break; + /* Use trylock, because we cannot block on a obj that + * might be trying to acquire mm_lock + */ if (!msm_gem_trylock(&msm_obj->base)) continue; if (is_purgeable(msm_obj)) { @@ -56,8 +44,11 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
- if (freed > 0) + if (freed > 0) { trace_msm_gem_purge(freed << PAGE_SHIFT); + } else { + return SHRINK_STOP; + }
return freed; } @@ -75,6 +66,9 @@ vmap_shrink(struct list_head *mm_list) unsigned unmapped = 0;
list_for_each_entry(msm_obj, mm_list, mm_list) { + /* Use trylock, because we cannot block on a obj that + * might be trying to acquire mm_lock + */ if (!msm_gem_trylock(&msm_obj->base)) continue; if (is_vunmapable(msm_obj)) {
Hi,
On Wed, Mar 31, 2021 at 6:24 PM Rob Clark robdclark@gmail.com wrote:
@@ -45,6 +30,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { if (freed >= sc->nr_to_scan) break;
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
nit: I thought the above multi-line commenting style was only for "net" subsystem?
if (!msm_gem_trylock(&msm_obj->base)) continue; if (is_purgeable(msm_obj)) {
@@ -56,8 +44,11 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
if (freed > 0)
if (freed > 0) { trace_msm_gem_purge(freed << PAGE_SHIFT);
} else {
return SHRINK_STOP;
}
It probably doesn't matter, but I wonder if we should still be returning SHRINK_STOP if we got any trylock failures. It could possibly be worth returning 0 in that case?
@@ -75,6 +66,9 @@ vmap_shrink(struct list_head *mm_list) unsigned unmapped = 0;
list_for_each_entry(msm_obj, mm_list, mm_list) {
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
If you end up changing the commenting style above, should also be here.
At this point this seems fine to land to me. Though I'm not an expert on every interaction in this code, I've spent enough time starting at it that I'm comfortable with:
Reviewed-by: Douglas Anderson dianders@chromium.org
On Thu, Apr 1, 2021 at 8:34 AM Doug Anderson dianders@chromium.org wrote:
Hi,
On Wed, Mar 31, 2021 at 6:24 PM Rob Clark robdclark@gmail.com wrote:
@@ -45,6 +30,9 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) { if (freed >= sc->nr_to_scan) break;
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
nit: I thought the above multi-line commenting style was only for "net" subsystem?
we do use the "net" style a fair bit already.. (OTOH I tend to not really care what checkpatch says)
if (!msm_gem_trylock(&msm_obj->base)) continue; if (is_purgeable(msm_obj)) {
@@ -56,8 +44,11 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
mutex_unlock(&priv->mm_lock);
if (freed > 0)
if (freed > 0) { trace_msm_gem_purge(freed << PAGE_SHIFT);
} else {
return SHRINK_STOP;
}
It probably doesn't matter, but I wonder if we should still be returning SHRINK_STOP if we got any trylock failures. It could possibly be worth returning 0 in that case?
On the surface, you'd think that, but there be mm dragons.. we can hit shrinker from the submit path when the obj is locked already and we are trying to allocate backing pages. We don't want to tell vmscan to keep trying, because we'll keep failing to grab that objects lock
@@ -75,6 +66,9 @@ vmap_shrink(struct list_head *mm_list) unsigned unmapped = 0;
list_for_each_entry(msm_obj, mm_list, mm_list) {
/* Use trylock, because we cannot block on a obj that
* might be trying to acquire mm_lock
*/
If you end up changing the commenting style above, should also be here.
At this point this seems fine to land to me. Though I'm not an expert on every interaction in this code, I've spent enough time starting at it that I'm comfortable with:
Reviewed-by: Douglas Anderson dianders@chromium.org
thanks
BR, -R
From: Rob Clark robdclark@chromium.org
In normal cases the gem obj lock is acquired first before mm_lock. The exception is iterating the various object lists. In the shrinker path, deadlock is avoided by using msm_gem_trylock() and skipping over objects that cannot be locked. But for debugfs the straightforward thing is to split things out into a separate list of all objects protected by it's own lock.
Fixes: d984457b31c4 ("drm/msm: Add priv->mm_lock to protect active/inactive lists") Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org --- drivers/gpu/drm/msm/msm_debugfs.c | 14 +++----------- drivers/gpu/drm/msm/msm_drv.c | 3 +++ drivers/gpu/drm/msm/msm_drv.h | 9 ++++++++- drivers/gpu/drm/msm/msm_gem.c | 14 +++++++++++++- drivers/gpu/drm/msm/msm_gem.h | 10 ++++++++-- 5 files changed, 35 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 85ad0babc326..d611cc8e54a4 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = { static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; - struct msm_gpu *gpu = priv->gpu; int ret;
- ret = mutex_lock_interruptible(&priv->mm_lock); + ret = mutex_lock_interruptible(&priv->obj_lock); if (ret) return ret;
- if (gpu) { - seq_printf(m, "Active Objects (%s):\n", gpu->name); - msm_gem_describe_objects(&gpu->active_list, m); - } - - seq_printf(m, "Inactive Objects:\n"); - msm_gem_describe_objects(&priv->inactive_dontneed, m); - msm_gem_describe_objects(&priv->inactive_willneed, m); + msm_gem_describe_objects(&priv->objects, m);
- mutex_unlock(&priv->mm_lock); + mutex_unlock(&priv->obj_lock);
return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 3462b0ea14c6..1ef1cd0cc714 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -474,6 +474,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_LIST_HEAD(&priv->objects); + mutex_init(&priv->obj_lock); + INIT_LIST_HEAD(&priv->inactive_willneed); INIT_LIST_HEAD(&priv->inactive_dontneed); INIT_LIST_HEAD(&priv->inactive_purged); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 503168817e24..c84e6f84cb6d 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -174,7 +174,14 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf;
- /* + /** + * List of all GEM objects (mainly for debugfs, protected by obj_lock + * (acquire before per GEM object lock) + */ + struct list_head objects; + struct mutex obj_lock; + + /** * Lists of inactive GEM objects. Every bo is either in one of the * inactive lists (depending on whether or not it is shrinkable) or * gpu->active_list (for the gpu it is active on[1]) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index bec01bb48fce..7ca30e362222 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -961,7 +961,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n"); - list_for_each_entry(msm_obj, list, mm_list) { + list_for_each_entry(msm_obj, list, node) { struct drm_gem_object *obj = &msm_obj->base; seq_puts(m, " "); msm_gem_describe(obj, m); @@ -980,6 +980,10 @@ void msm_gem_free_object(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private;
+ mutex_lock(&priv->obj_lock); + list_del(&msm_obj->node); + mutex_unlock(&priv->obj_lock); + mutex_lock(&priv->mm_lock); if (msm_obj->dontneed) mark_unpurgable(msm_obj); @@ -1170,6 +1174,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj;
fail: @@ -1240,6 +1248,10 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed); mutex_unlock(&priv->mm_lock);
+ mutex_lock(&priv->obj_lock); + list_add_tail(&msm_obj->node, &priv->objects); + mutex_unlock(&priv->obj_lock); + return obj;
fail: diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 13aabfe92dac..e6b28edb1db9 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -60,10 +60,16 @@ struct msm_gem_object { */ uint8_t vmap_count;
+ /** + * Node in list of all objects (mainly for debugfs, protected by + * priv->obj_lock + */ + struct list_head node; + /** * An object is either: - * inactive - on priv->inactive_dontneed/willneed/purged depending - * on status + * inactive - on priv->inactive_dontneed or priv->inactive_willneed + * (depending on purgability status) * active - on one one of the gpu's active_list.. well, at * least for now we don't have (I don't think) hw sync between * 2d and 3d one devices which have both, meaning we need to
Hi,
On Wed, Mar 31, 2021 at 6:24 PM Rob Clark robdclark@gmail.com wrote:
From: Rob Clark robdclark@chromium.org
In normal cases the gem obj lock is acquired first before mm_lock. The exception is iterating the various object lists. In the shrinker path, deadlock is avoided by using msm_gem_trylock() and skipping over objects that cannot be locked. But for debugfs the straightforward thing is to split things out into a separate list of all objects protected by it's own lock.
Fixes: d984457b31c4 ("drm/msm: Add priv->mm_lock to protect active/inactive lists") Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org
Reviewed-by: Douglas Anderson dianders@chromium.org
From: Rob Clark robdclark@chromium.org
The last patch lost the breakdown of active vs inactive GEM objects in $debugfs/gem. But we can add some better stats to summarize not just active vs inactive, but also purgable/purged to make up for that.
Signed-off-by: Rob Clark robdclark@chromium.org Tested-by: Douglas Anderson dianders@chromium.org Reviewed-by: Douglas Anderson dianders@chromium.org --- drivers/gpu/drm/msm/msm_fb.c | 3 ++- drivers/gpu/drm/msm/msm_gem.c | 31 ++++++++++++++++++++++++------- drivers/gpu/drm/msm/msm_gem.h | 11 ++++++++++- 3 files changed, 36 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index d42f0665359a..91c0e493aed5 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -33,6 +33,7 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { #ifdef CONFIG_DEBUG_FS void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { + struct msm_gem_stats stats = {}; int i, n = fb->format->num_planes;
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", @@ -42,7 +43,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) for (i = 0; i < n; i++) { seq_printf(m, " %d: offset=%d pitch=%d, obj: ", i, fb->offsets[i], fb->pitches[i]); - msm_gem_describe(fb->obj[i], m); + msm_gem_describe(fb->obj[i], m, &stats); } } #endif diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7ca30e362222..2ecf7f1cef25 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -873,7 +873,8 @@ static void describe_fence(struct dma_fence *fence, const char *type, fence->seqno); }
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, + struct msm_gem_stats *stats) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; @@ -885,11 +886,23 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
msm_gem_lock(obj);
+ stats->all.count++; + stats->all.size += obj->size; + + if (is_active(msm_obj)) { + stats->active.count++; + stats->active.size += obj->size; + } + switch (msm_obj->madv) { case __MSM_MADV_PURGED: + stats->purged.count++; + stats->purged.size += obj->size; madv = " purged"; break; case MSM_MADV_DONTNEED: + stats->purgable.count++; + stats->purgable.size += obj->size; madv = " purgeable"; break; case MSM_MADV_WILLNEED: @@ -956,20 +969,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) { + struct msm_gem_stats stats = {}; struct msm_gem_object *msm_obj; - int count = 0; - size_t size = 0;
seq_puts(m, " flags id ref offset kaddr size madv name\n"); list_for_each_entry(msm_obj, list, node) { struct drm_gem_object *obj = &msm_obj->base; seq_puts(m, " "); - msm_gem_describe(obj, m); - count++; - size += obj->size; + msm_gem_describe(obj, m, &stats); }
- seq_printf(m, "Total %d objects, %zu bytes\n", count, size); + seq_printf(m, "Total: %4d objects, %9zu bytes\n", + stats.all.count, stats.all.size); + seq_printf(m, "Active: %4d objects, %9zu bytes\n", + stats.active.count, stats.active.size); + seq_printf(m, "Purgable: %4d objects, %9zu bytes\n", + stats.purgable.count, stats.purgable.size); + seq_printf(m, "Purged: %4d objects, %9zu bytes\n", + stats.purged.count, stats.purged.size); } #endif
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index e6b28edb1db9..7c7d54bad189 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -158,7 +158,16 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, __printf(2, 3) void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...); #ifdef CONFIG_DEBUG_FS -void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); + +struct msm_gem_stats { + struct { + unsigned count; + size_t size; + } all, active, purgable, purged; +}; + +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, + struct msm_gem_stats *stats); void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); #endif
dri-devel@lists.freedesktop.org