blob: 8ad53c9d38b66ccfd9f0d993718537a544b581b3 [file] [log] [blame]
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +01001/*
2 * Context tracking: Probe on high level context boundaries such as kernel
3 * and userspace. This includes syscalls and exceptions entry/exit.
4 *
5 * This is used by RCU to remove its dependency on the timer tick while a CPU
6 * runs in userspace.
7 *
8 * Started by Frederic Weisbecker:
9 *
10 * Copyright (C) 2012 Red Hat, Inc., Frederic Weisbecker <fweisbec@redhat.com>
11 *
12 * Many thanks to Gilad Ben-Yossef, Paul McKenney, Ingo Molnar, Andrew Morton,
13 * Steven Rostedt, Peter Zijlstra for suggestions and improvements.
14 *
15 */
16
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010017#include <linux/context_tracking.h>
18#include <linux/rcupdate.h>
19#include <linux/sched.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010020#include <linux/hardirq.h>
Frederic Weisbecker6a616712012-12-16 20:00:34 +010021#include <linux/export.h>
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +000022#include <linux/kprobes.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010023
Frederic Weisbecker1b6a2592013-07-11 20:27:43 +020024#define CREATE_TRACE_POINTS
25#include <trace/events/context_tracking.h>
26
Frederic Weisbecker65f382f2013-07-11 19:12:32 +020027struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
Frederic Weisbecker48d6a812013-07-10 02:44:35 +020028EXPORT_SYMBOL_GPL(context_tracking_enabled);
Frederic Weisbecker65f382f2013-07-11 19:12:32 +020029
30DEFINE_PER_CPU(struct context_tracking, context_tracking);
Frederic Weisbecker48d6a812013-07-10 02:44:35 +020031EXPORT_SYMBOL_GPL(context_tracking);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010032
Frederic Weisbecker2e709332013-07-10 00:55:25 +020033void context_tracking_cpu_set(int cpu)
34{
Frederic Weisbecker65f382f2013-07-11 19:12:32 +020035 if (!per_cpu(context_tracking.active, cpu)) {
36 per_cpu(context_tracking.active, cpu) = true;
37 static_key_slow_inc(&context_tracking_enabled);
38 }
Frederic Weisbecker2e709332013-07-10 00:55:25 +020039}
40
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010041/**
Frederic Weisbeckerad657822013-07-10 02:44:35 +020042 * context_tracking_user_enter - Inform the context tracking that the CPU is going to
43 * enter userspace mode.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010044 *
45 * This function must be called right before we switch from the kernel
46 * to userspace, when it's guaranteed the remaining kernel instructions
47 * to execute won't use any RCU read side critical section because this
48 * function sets RCU in extended quiescent state.
49 */
Frederic Weisbeckerad657822013-07-10 02:44:35 +020050void context_tracking_user_enter(void)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010051{
52 unsigned long flags;
53
54 /*
Frederic Weisbecker0c06a5d2013-09-10 00:54:17 +020055 * Repeat the user_enter() check here because some archs may be calling
56 * this from asm and if no CPU needs context tracking, they shouldn't
Frederic Weisbecker58135f52013-11-06 14:45:57 +010057 * go further. Repeat the check here until they support the inline static
58 * key check.
Frederic Weisbecker0c06a5d2013-09-10 00:54:17 +020059 */
Frederic Weisbecker58135f52013-11-06 14:45:57 +010060 if (!context_tracking_is_enabled())
Frederic Weisbecker0c06a5d2013-09-10 00:54:17 +020061 return;
62
63 /*
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010064 * Some contexts may involve an exception occuring in an irq,
65 * leading to that nesting:
66 * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
67 * This would mess up the dyntick_nesting count though. And rcu_irq_*()
68 * helpers are enough to protect RCU uses inside the exception. So
69 * just return immediately if we detect we are in an IRQ.
70 */
71 if (in_interrupt())
72 return;
73
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010074 /* Kernel threads aren't supposed to go to userspace */
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010075 WARN_ON_ONCE(!current->mm);
76
77 local_irq_save(flags);
Frederic Weisbeckerc467ea72015-03-04 18:06:33 +010078 if ( __this_cpu_read(context_tracking.state) != CONTEXT_USER) {
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020079 if (__this_cpu_read(context_tracking.active)) {
Frederic Weisbecker1b6a2592013-07-11 20:27:43 +020080 trace_user_enter(0);
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020081 /*
82 * At this stage, only low level arch entry code remains and
83 * then we'll run in userspace. We can assume there won't be
84 * any RCU read-side critical section until the next call to
85 * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
86 * on the tick.
87 */
88 vtime_user_enter(current);
89 rcu_user_enter();
90 }
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +010091 /*
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +020092 * Even if context tracking is disabled on this CPU, because it's outside
93 * the full dynticks mask for example, we still have to keep track of the
94 * context transitions and states to prevent inconsistency on those of
95 * other CPUs.
96 * If a task triggers an exception in userspace, sleep on the exception
97 * handler and then migrate to another CPU, that new CPU must know where
98 * the exception returns by the time we call exception_exit().
99 * This information can only be provided by the previous CPU when it called
100 * exception_enter().
101 * OTOH we can spare the calls to vtime and RCU when context_tracking.active
102 * is false because we know that CPU is not tickless.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100103 */
Frederic Weisbeckerc467ea72015-03-04 18:06:33 +0100104 __this_cpu_write(context_tracking.state, CONTEXT_USER);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100105 }
106 local_irq_restore(flags);
107}
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +0000108NOKPROBE_SYMBOL(context_tracking_user_enter);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100109
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100110/**
Frederic Weisbeckerad657822013-07-10 02:44:35 +0200111 * context_tracking_user_exit - Inform the context tracking that the CPU is
112 * exiting userspace mode and entering the kernel.
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100113 *
114 * This function must be called after we entered the kernel from userspace
115 * before any use of RCU read side critical section. This potentially include
116 * any high level kernel code like syscalls, exceptions, signal handling, etc...
117 *
118 * This call supports re-entrancy. This way it can be called from any exception
119 * handler without needing to know if we came from userspace or not.
120 */
Frederic Weisbeckerad657822013-07-10 02:44:35 +0200121void context_tracking_user_exit(void)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100122{
123 unsigned long flags;
124
Frederic Weisbecker58135f52013-11-06 14:45:57 +0100125 if (!context_tracking_is_enabled())
Frederic Weisbecker0c06a5d2013-09-10 00:54:17 +0200126 return;
127
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100128 if (in_interrupt())
129 return;
130
131 local_irq_save(flags);
Frederic Weisbeckerc467ea72015-03-04 18:06:33 +0100132 if (__this_cpu_read(context_tracking.state) == CONTEXT_USER) {
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +0200133 if (__this_cpu_read(context_tracking.active)) {
134 /*
135 * We are going to run code that may use RCU. Inform
136 * RCU core about that (ie: we may need the tick again).
137 */
138 rcu_user_exit();
139 vtime_user_exit(current);
Frederic Weisbecker1b6a2592013-07-11 20:27:43 +0200140 trace_user_exit(0);
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +0200141 }
Frederic Weisbeckerc467ea72015-03-04 18:06:33 +0100142 __this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100143 }
144 local_irq_restore(flags);
145}
Masami Hiramatsu4cdf77a2014-06-14 06:47:12 +0000146NOKPROBE_SYMBOL(context_tracking_user_exit);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100147
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100148/**
Frederic Weisbecker73d424f2013-07-11 19:42:13 +0200149 * __context_tracking_task_switch - context switch the syscall callbacks
Frederic Weisbecker4eacdf12013-01-16 17:16:37 +0100150 * @prev: the task that is being switched out
151 * @next: the task that is being switched in
152 *
153 * The context tracking uses the syscall slow path to implement its user-kernel
154 * boundaries probes on syscalls. This way it doesn't impact the syscall fast
155 * path on CPUs that don't do context tracking.
156 *
157 * But we need to clear the flag on the previous task because it may later
158 * migrate to some CPU that doesn't do the context tracking. As such the TIF
159 * flag may not be desired there.
160 */
Frederic Weisbecker73d424f2013-07-11 19:42:13 +0200161void __context_tracking_task_switch(struct task_struct *prev,
162 struct task_struct *next)
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100163{
Frederic Weisbeckerd65ec122013-07-11 23:59:33 +0200164 clear_tsk_thread_flag(prev, TIF_NOHZ);
165 set_tsk_thread_flag(next, TIF_NOHZ);
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +0100166}
Frederic Weisbecker65f382f2013-07-11 19:12:32 +0200167
168#ifdef CONFIG_CONTEXT_TRACKING_FORCE
169void __init context_tracking_init(void)
170{
171 int cpu;
172
173 for_each_possible_cpu(cpu)
174 context_tracking_cpu_set(cpu);
175}
176#endif