Merge "net: rmnet_data: Allow changes in UL aggregation parameters"
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 4cf6250..87e1431 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -2050,6 +2050,8 @@
 {
 	u64 val;
 	struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
+	struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
+
 	/*
 	 * We cannot enable or disable the clocks in interrupt context, this
 	 * function is called from interrupt context if there is an axi error
@@ -2057,9 +2059,11 @@
 	if (in_interrupt())
 		return 0;
 
+	if (ctx->regbase == NULL)
+		return 0;
+
 	kgsl_iommu_enable_clk(mmu);
-	val = KGSL_IOMMU_GET_CTX_REG_Q(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER],
-					TTBR0);
+	val = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
 	kgsl_iommu_disable_clk(mmu);
 	return val;
 }
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index dd1d142..38a66fd 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -41,6 +41,10 @@
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lockdep_map	dep_map;
 #endif
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+	/* count for waiters preempt to queue in wait list */
+	long m_count;
+#endif
 };
 
 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
@@ -75,12 +79,19 @@
 #define __RWSEM_OPT_INIT(lockname)
 #endif
 
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+#define __RWSEM_PRIO_AWARE_INIT(lockname)	.m_count = 0
+#else
+#define __RWSEM_PRIO_AWARE_INIT(lockname)
+#endif
+
 #define __RWSEM_INITIALIZER(name)				\
 	{ __RWSEM_INIT_COUNT(name),				\
 	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
 	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock)	\
 	  __RWSEM_OPT_INIT(name)				\
-	  __RWSEM_DEP_MAP_INIT(name) }
+	  __RWSEM_DEP_MAP_INIT(name),				\
+	  __RWSEM_PRIO_AWARE_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index ebdb004..2a31a58 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -248,3 +248,7 @@
 config QUEUED_RWLOCKS
 	def_bool y if ARCH_USE_QUEUED_RWLOCKS
 	depends on SMP
+
+config RWSEM_PRIO_AWARE
+       def_bool y
+       depends on RWSEM_XCHGADD_ALGORITHM
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4112df..15be517 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -87,21 +87,13 @@
 	sem->owner = NULL;
 	osq_lock_init(&sem->osq);
 #endif
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+	sem->m_count = 0;
+#endif
 }
 
 EXPORT_SYMBOL(__init_rwsem);
 
-enum rwsem_waiter_type {
-	RWSEM_WAITING_FOR_WRITE,
-	RWSEM_WAITING_FOR_READ
-};
-
-struct rwsem_waiter {
-	struct list_head list;
-	struct task_struct *task;
-	enum rwsem_waiter_type type;
-};
-
 enum rwsem_wake_type {
 	RWSEM_WAKE_ANY,		/* Wake whatever's at head of wait list */
 	RWSEM_WAKE_READERS,	/* Wake readers only */
@@ -226,6 +218,7 @@
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk = current;
 	WAKE_Q(wake_q);
+	bool is_first_waiter = false;
 
 	waiter.task = tsk;
 	waiter.type = RWSEM_WAITING_FOR_READ;
@@ -233,7 +226,9 @@
 	raw_spin_lock_irq(&sem->wait_lock);
 	if (list_empty(&sem->wait_list))
 		adjustment += RWSEM_WAITING_BIAS;
-	list_add_tail(&waiter.list, &sem->wait_list);
+
+	/* is_first_waiter == true means we are first in the queue */
+	is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
 
 	/* we're now waiting on the lock, but no longer actively locking */
 	count = atomic_long_add_return(adjustment, &sem->count);
@@ -246,7 +241,8 @@
 	 */
 	if (count == RWSEM_WAITING_BIAS ||
 	    (count > RWSEM_WAITING_BIAS &&
-	     adjustment != -RWSEM_ACTIVE_READ_BIAS))
+	     (adjustment != -RWSEM_ACTIVE_READ_BIAS ||
+	     is_first_waiter)))
 		__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
 	raw_spin_unlock_irq(&sem->wait_lock);
@@ -462,6 +458,7 @@
 	struct rwsem_waiter waiter;
 	struct rw_semaphore *ret = sem;
 	WAKE_Q(wake_q);
+	bool is_first_waiter = false;
 
 	/* undo write bias from down_write operation, stop active locking */
 	count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
@@ -483,7 +480,11 @@
 	if (list_empty(&sem->wait_list))
 		waiting = false;
 
-	list_add_tail(&waiter.list, &sem->wait_list);
+	/*
+	 * is_first_waiter == true means we are first in the queue,
+	 * so there is no read locks that were queued ahead of us.
+	 */
+	is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
 
 	/* we're now waiting on the lock, but no longer actively locking */
 	if (waiting) {
@@ -494,7 +495,7 @@
 		 * no active writers, the lock must be read owned; so we try to
 		 * wake any read locks that were queued ahead of us.
 		 */
-		if (count > RWSEM_WAITING_BIAS) {
+		if (!is_first_waiter && count > RWSEM_WAITING_BIAS) {
 			WAKE_Q(wake_q);
 
 			__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
diff --git a/kernel/locking/rwsem.h b/kernel/locking/rwsem.h
index a699f40..b60c842 100644
--- a/kernel/locking/rwsem.h
+++ b/kernel/locking/rwsem.h
@@ -15,6 +15,17 @@
  */
 #define RWSEM_READER_OWNED	((struct task_struct *)1UL)
 
+enum rwsem_waiter_type {
+	RWSEM_WAITING_FOR_WRITE,
+	RWSEM_WAITING_FOR_READ
+};
+
+struct rwsem_waiter {
+	struct list_head list;
+	struct task_struct *task;
+	enum rwsem_waiter_type type;
+};
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * All writes to owner are protected by WRITE_ONCE() to make sure that
@@ -66,3 +77,60 @@
 {
 }
 #endif
+
+#ifdef CONFIG_RWSEM_PRIO_AWARE
+
+#define RWSEM_MAX_PREEMPT_ALLOWED 3000
+
+/*
+ * Return true if current waiter is added in the front of the rwsem wait list.
+ */
+static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
+				    struct rw_semaphore *sem)
+{
+	struct list_head *pos;
+	struct list_head *head;
+	struct rwsem_waiter *waiter = NULL;
+
+	pos = head = &sem->wait_list;
+	/*
+	 * Rules for task prio aware rwsem wait list queueing:
+	 * 1:	Only try to preempt waiters with which task priority
+	 *	which is higher than DEFAULT_PRIO.
+	 * 2:	To avoid starvation, add count to record
+	 *	how many high priority waiters preempt to queue in wait
+	 *	list.
+	 *	If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED,
+	 *	use simple fifo until wait list is empty.
+	 */
+	if (list_empty(head)) {
+		list_add_tail(&waiter_in->list, head);
+		sem->m_count = 0;
+		return true;
+	}
+
+	if (waiter_in->task->prio < DEFAULT_PRIO
+		&& sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) {
+
+		list_for_each(pos, head) {
+			waiter = list_entry(pos, struct rwsem_waiter, list);
+			if (waiter->task->prio > waiter_in->task->prio) {
+				list_add(&waiter_in->list, pos->prev);
+				sem->m_count++;
+				return &waiter_in->list == head->next;
+			}
+		}
+	}
+
+	list_add_tail(&waiter_in->list, head);
+
+	return false;
+}
+#else
+static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
+				    struct rw_semaphore *sem)
+{
+	list_add_tail(&waiter_in->list, &sem->wait_list);
+	return false;
+}
+#endif