uid_sys_stats: change to use rt_mutex
We see this happens multiple times in heavy workload in systrace
and AMS stuck in uid_lock.
Running process: Process 953
Running thread: android.ui
State: Uninterruptible Sleep
Start:
1,025.628 ms
Duration:
27,955.949 ms
On CPU:
Running instead: system_server
Args:
{kernel callsite when blocked:: "uid_procstat_write+0xb8/0x144"}
Changing to rt_mutex can mitigate the priority inversion
Bug: 34991231
Bug: 34193533
Test: on marlin
Change-Id: I28eb3971331cea60b1075740c792ab87d103262c
Signed-off-by: Wei Wang <wvw@google.com>
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 4988e32..33fc2b9 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -21,15 +21,17 @@
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
+#include <linux/rtmutex.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+
#define UID_HASH_BITS 10
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
-static DEFINE_MUTEX(uid_lock);
+static DEFINE_RT_MUTEX(uid_lock);
static struct proc_dir_entry *cpu_parent;
static struct proc_dir_entry *io_parent;
static struct proc_dir_entry *proc_parent;
@@ -98,7 +100,7 @@
cputime_t stime;
unsigned long bkt;
- mutex_lock(&uid_lock);
+ rt_mutex_lock(&uid_lock);
hash_for_each(hash_table, bkt, uid_entry, hash) {
uid_entry->active_stime = 0;
@@ -111,7 +113,7 @@
current_user_ns(), task_uid(task)));
if (!uid_entry) {
read_unlock(&tasklist_lock);
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
pr_err("%s: failed to find the uid_entry for uid %d\n",
__func__, from_kuid_munged(current_user_ns(),
task_uid(task)));
@@ -135,7 +137,7 @@
cputime_to_jiffies(total_stime)) * USEC_PER_MSEC);
}
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return 0;
}
@@ -182,7 +184,7 @@
kstrtol(end_uid, 10, &uid_end) != 0) {
return -EINVAL;
}
- mutex_lock(&uid_lock);
+ rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
hash_for_each_possible_safe(hash_table, uid_entry, tmp,
@@ -194,7 +196,7 @@
}
}
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return count;
}
@@ -243,7 +245,7 @@
struct io_stats *io_bucket, *io_curr, *io_last;
unsigned long bkt;
- BUG_ON(!mutex_is_locked(&uid_lock));
+ BUG_ON(!rt_mutex_is_locked(&uid_lock));
hash_for_each(hash_table, bkt, uid_entry, hash)
memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
@@ -285,7 +287,7 @@
struct uid_entry *uid_entry;
unsigned long bkt;
- mutex_lock(&uid_lock);
+ rt_mutex_lock(&uid_lock);
update_io_stats_locked();
@@ -304,7 +306,7 @@
uid_entry->io[UID_STATE_BACKGROUND].fsync);
}
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return 0;
}
@@ -349,16 +351,16 @@
if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
return -EINVAL;
- mutex_lock(&uid_lock);
+ rt_mutex_lock(&uid_lock);
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return -EINVAL;
}
if (uid_entry->state == state) {
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return count;
}
@@ -366,7 +368,7 @@
uid_entry->state = state;
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return count;
}
@@ -388,7 +390,7 @@
if (!task)
return NOTIFY_OK;
- mutex_lock(&uid_lock);
+ rt_mutex_lock(&uid_lock);
uid = from_kuid_munged(current_user_ns(), task_uid(task));
uid_entry = find_or_register_uid(uid);
if (!uid_entry) {
@@ -404,7 +406,7 @@
clean_uid_io_last_stats(uid_entry, task);
exit:
- mutex_unlock(&uid_lock);
+ rt_mutex_unlock(&uid_lock);
return NOTIFY_OK;
}