5.10-stable review patch. If anyone has any objections, please let me know.
------------------
From: Rob Clark robdclark@chromium.org
[ Upstream commit d984457b31c4c53d2af374d5e78b3eb64debd483 ]
Rather than relying on the big dev->struct_mutex hammer, introduce a more specific lock for protecting the bo lists.
Signed-off-by: Rob Clark robdclark@chromium.org Reviewed-by: Jordan Crouse jcrouse@codeaurora.org Reviewed-by: Kristian H. Kristensen hoegsberg@google.com Signed-off-by: Rob Clark robdclark@chromium.org Stable-dep-of: a30f9f65b5ac ("drm/msm/a5xx: workaround early ring-buffer emptiness check") Signed-off-by: Sasha Levin sashal@kernel.org --- drivers/gpu/drm/msm/msm_debugfs.c | 7 +++++++ drivers/gpu/drm/msm/msm_drv.c | 7 +++++++ drivers/gpu/drm/msm/msm_drv.h | 13 +++++++++++- drivers/gpu/drm/msm/msm_gem.c | 28 +++++++++++++++----------- drivers/gpu/drm/msm/msm_gem_shrinker.c | 12 +++++++++++ drivers/gpu/drm/msm/msm_gpu.h | 5 ++++- 6 files changed, 58 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 7a7ccad65c922..97cbb850ad6d4 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -113,6 +113,11 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) { struct msm_drm_private *priv = dev->dev_private; struct msm_gpu *gpu = priv->gpu; + int ret; + + ret = mutex_lock_interruptible(&priv->mm_lock); + if (ret) + return ret;
if (gpu) { seq_printf(m, "Active Objects (%s):\n", gpu->name); @@ -122,6 +127,8 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m) seq_printf(m, "Inactive Objects:\n"); msm_gem_describe_objects(&priv->inactive_list, m);
+ mutex_unlock(&priv->mm_lock); + return 0; }
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 087efcb1f34cf..130c721fcd4e6 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -7,6 +7,7 @@
#include <linux/dma-mapping.h> #include <linux/kthread.h> +#include <linux/sched/mm.h> #include <linux/uaccess.h> #include <uapi/linux/sched/types.h>
@@ -442,6 +443,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) init_llist_head(&priv->free_list);
INIT_LIST_HEAD(&priv->inactive_list); + mutex_init(&priv->mm_lock); + + /* Teach lockdep about lock ordering wrt. shrinker: */ + fs_reclaim_acquire(GFP_KERNEL); + might_lock(&priv->mm_lock); + fs_reclaim_release(GFP_KERNEL);
drm_mode_config_init(ddev);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 1fe809add8f62..3a49e8eb338c0 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -175,8 +175,19 @@ struct msm_drm_private { struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */ struct msm_perf_state *perf;
- /* list of GEM objects: */ + /* + * List of inactive GEM objects. Every bo is either in the inactive_list + * or gpu->active_list (for the gpu it is active on[1]) + * + * These lists are protected by mm_lock. If struct_mutex is involved, it + * should be aquired prior to mm_lock. One should *not* hold mm_lock in + * get_pages()/vmap()/etc paths, as they can trigger the shrinker. + * + * [1] if someone ever added support for the old 2d cores, there could be + * more than one gpu object + */ struct list_head inactive_list; + struct mutex mm_lock;
/* worker for delayed free of objects: */ struct work_struct free_work; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9c05bf6c45510..d0201909ee7ae 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -745,13 +745,17 @@ int msm_gem_sync_object(struct drm_gem_object *obj, void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + struct msm_drm_private *priv = obj->dev->dev_private; + + might_sleep(); WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
if (!atomic_fetch_inc(&msm_obj->active_count)) { + mutex_lock(&priv->mm_lock); msm_obj->gpu = gpu; list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &gpu->active_list); + mutex_unlock(&priv->mm_lock); } }
@@ -760,12 +764,14 @@ void msm_gem_active_put(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_drm_private *priv = obj->dev->dev_private;
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); + might_sleep();
if (!atomic_dec_return(&msm_obj->active_count)) { + mutex_lock(&priv->mm_lock); msm_obj->gpu = NULL; list_del_init(&msm_obj->mm_list); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&priv->mm_lock); } }
@@ -921,13 +927,16 @@ static void free_object(struct msm_gem_object *msm_obj) { struct drm_gem_object *obj = &msm_obj->base; struct drm_device *dev = obj->dev; + struct msm_drm_private *priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* object should not be on active list: */ WARN_ON(is_active(msm_obj));
+ mutex_lock(&priv->mm_lock); list_del(&msm_obj->mm_list); + mutex_unlock(&priv->mm_lock);
mutex_lock(&msm_obj->lock);
@@ -1103,14 +1112,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); }
- if (struct_mutex_locked) { - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - } else { - mutex_lock(&dev->struct_mutex); - list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); - } + mutex_lock(&priv->mm_lock); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); + mutex_unlock(&priv->mm_lock);
return obj;
@@ -1174,9 +1178,9 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
mutex_unlock(&msm_obj->lock);
- mutex_lock(&dev->struct_mutex); + mutex_lock(&priv->mm_lock); list_add_tail(&msm_obj->mm_list, &priv->inactive_list); - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&priv->mm_lock);
return obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 482576d7a39a5..c41b84a3a4842 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -51,11 +51,15 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) if (!msm_gem_shrinker_lock(dev, &unlock)) return 0;
+ mutex_lock(&priv->mm_lock); + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { if (is_purgeable(msm_obj)) count += msm_obj->base.size >> PAGE_SHIFT; }
+ mutex_unlock(&priv->mm_lock); + if (unlock) mutex_unlock(&dev->struct_mutex);
@@ -75,6 +79,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) if (!msm_gem_shrinker_lock(dev, &unlock)) return SHRINK_STOP;
+ mutex_lock(&priv->mm_lock); + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { if (freed >= sc->nr_to_scan) break; @@ -84,6 +90,8 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) } }
+ mutex_unlock(&priv->mm_lock); + if (unlock) mutex_unlock(&dev->struct_mutex);
@@ -106,6 +114,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) if (!msm_gem_shrinker_lock(dev, &unlock)) return NOTIFY_DONE;
+ mutex_lock(&priv->mm_lock); + list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) { if (is_vunmapable(msm_obj)) { msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER); @@ -118,6 +128,8 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) } }
+ mutex_unlock(&priv->mm_lock); + if (unlock) mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 6c9e1fdc1a762..1806e87600c0e 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -94,7 +94,10 @@ struct msm_gpu { struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS]; int nr_rings;
- /* list of GEM active objects: */ + /* + * List of GEM active objects on this gpu. Protected by + * msm_drm_private::mm_lock + */ struct list_head active_list;
/* does gpu need hw_init? */