drm/msm: Separate locking of buffer resources from struct_mutex

Buffer object specific resources like pages, domains, sg list
need not be protected with struct_mutex. They can be protected
with a buffer object level lock. This simplifies locking and
makes it easier to avoid potential recursive locking scenarios
for SVM involving mmap_sem and struct_mutex. This also removes
unnecessary serialization when creating buffer objects, and also
between buffer object creation and GPU command submission.

Signed-off-by: Sushmita Susheelendra <ssusheel@codeaurora.org>
[robclark: squash in handling new locking for shrinker]
Signed-off-by: Rob Clark <robdclark@gmail.com>
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index ab1dd02..b72d8e6 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -20,6 +20,18 @@
 
 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 {
+	/* NOTE: we are *closer* to being able to get rid of
+	 * mutex_trylock_recursive().. the msm_gem code itself does
+	 * not need struct_mutex, although codepaths that can trigger
+	 * shrinker are still called in code-paths that hold the
+	 * struct_mutex.
+	 *
+	 * Also, msm_obj->madv is protected by struct_mutex.
+	 *
+	 * The next step is probably split out a seperate lock for
+	 * protecting inactive_list, so that shrinker does not need
+	 * struct_mutex.
+	 */
 	switch (mutex_trylock_recursive(&dev->struct_mutex)) {
 	case MUTEX_TRYLOCK_FAILED:
 		return false;
@@ -77,7 +89,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
 		if (freed >= sc->nr_to_scan)
 			break;
 		if (is_purgeable(msm_obj)) {
-			msm_gem_purge(&msm_obj->base);
+			msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
 			freed += msm_obj->base.size >> PAGE_SHIFT;
 		}
 	}
@@ -106,7 +118,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
 
 	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
 		if (is_vunmapable(msm_obj)) {
-			msm_gem_vunmap(&msm_obj->base);
+			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
 			/* since we don't know any better, lets bail after a few
 			 * and if necessary the shrinker will be invoked again.
 			 * Seems better than unmapping *everything*